Commit eede821dbfd58df89edb072da64e006321eaef58

Authored by Marc Zyngier
Committed by Christoffer Dall
1 parent 63f8344cb4

KVM: arm/arm64: vgic: move GICv2 registers to their own structure

In order to make way for the GICv3 registers, move the v2-specific
registers to their own structure.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

Showing 6 changed files with 81 additions and 75 deletions Side-by-side Diff

arch/arm/kernel/asm-offsets.c
... ... @@ -182,13 +182,13 @@
182 182 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
183 183 #ifdef CONFIG_KVM_ARM_VGIC
184 184 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
185   - DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
186   - DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
187   - DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
188   - DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
189   - DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
190   - DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
191   - DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
  185 + DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
  186 + DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
  187 + DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
  188 + DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
  189 + DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
  190 + DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
  191 + DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
192 192 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
193 193 #ifdef CONFIG_KVM_ARM_TIMER
194 194 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
arch/arm/kvm/interrupts_head.S
... ... @@ -421,14 +421,14 @@
421 421 ldr r9, [r2, #GICH_ELRSR1]
422 422 ldr r10, [r2, #GICH_APR]
423 423  
424   - str r3, [r11, #VGIC_CPU_HCR]
425   - str r4, [r11, #VGIC_CPU_VMCR]
426   - str r5, [r11, #VGIC_CPU_MISR]
427   - str r6, [r11, #VGIC_CPU_EISR]
428   - str r7, [r11, #(VGIC_CPU_EISR + 4)]
429   - str r8, [r11, #VGIC_CPU_ELRSR]
430   - str r9, [r11, #(VGIC_CPU_ELRSR + 4)]
431   - str r10, [r11, #VGIC_CPU_APR]
  424 + str r3, [r11, #VGIC_V2_CPU_HCR]
  425 + str r4, [r11, #VGIC_V2_CPU_VMCR]
  426 + str r5, [r11, #VGIC_V2_CPU_MISR]
  427 + str r6, [r11, #VGIC_V2_CPU_EISR]
  428 + str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
  429 + str r8, [r11, #VGIC_V2_CPU_ELRSR]
  430 + str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
  431 + str r10, [r11, #VGIC_V2_CPU_APR]
432 432  
433 433 /* Clear GICH_HCR */
434 434 mov r5, #0
... ... @@ -436,7 +436,7 @@
436 436  
437 437 /* Save list registers */
438 438 add r2, r2, #GICH_LR0
439   - add r3, r11, #VGIC_CPU_LR
  439 + add r3, r11, #VGIC_V2_CPU_LR
440 440 ldr r4, [r11, #VGIC_CPU_NR_LR]
441 441 1: ldr r6, [r2], #4
442 442 str r6, [r3], #4
... ... @@ -463,9 +463,9 @@
463 463 add r11, vcpu, #VCPU_VGIC_CPU
464 464  
465 465 /* We only restore a minimal set of registers */
466   - ldr r3, [r11, #VGIC_CPU_HCR]
467   - ldr r4, [r11, #VGIC_CPU_VMCR]
468   - ldr r8, [r11, #VGIC_CPU_APR]
  466 + ldr r3, [r11, #VGIC_V2_CPU_HCR]
  467 + ldr r4, [r11, #VGIC_V2_CPU_VMCR]
  468 + ldr r8, [r11, #VGIC_V2_CPU_APR]
469 469  
470 470 str r3, [r2, #GICH_HCR]
471 471 str r4, [r2, #GICH_VMCR]
... ... @@ -473,7 +473,7 @@
473 473  
474 474 /* Restore list registers */
475 475 add r2, r2, #GICH_LR0
476   - add r3, r11, #VGIC_CPU_LR
  476 + add r3, r11, #VGIC_V2_CPU_LR
477 477 ldr r4, [r11, #VGIC_CPU_NR_LR]
478 478 1: ldr r6, [r3], #4
479 479 str r6, [r2], #4
arch/arm64/kernel/asm-offsets.c
... ... @@ -129,13 +129,13 @@
129 129 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
130 130 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
131 131 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
132   - DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
133   - DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
134   - DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
135   - DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
136   - DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
137   - DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
138   - DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
  132 + DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
  133 + DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
  134 + DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
  135 + DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
  136 + DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
  137 + DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
  138 + DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
139 139 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
140 140 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
141 141 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
arch/arm64/kvm/hyp.S
... ... @@ -412,14 +412,14 @@
412 412 CPU_BE( rev w10, w10 )
413 413 CPU_BE( rev w11, w11 )
414 414  
415   - str w4, [x3, #VGIC_CPU_HCR]
416   - str w5, [x3, #VGIC_CPU_VMCR]
417   - str w6, [x3, #VGIC_CPU_MISR]
418   - str w7, [x3, #VGIC_CPU_EISR]
419   - str w8, [x3, #(VGIC_CPU_EISR + 4)]
420   - str w9, [x3, #VGIC_CPU_ELRSR]
421   - str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
422   - str w11, [x3, #VGIC_CPU_APR]
  415 + str w4, [x3, #VGIC_V2_CPU_HCR]
  416 + str w5, [x3, #VGIC_V2_CPU_VMCR]
  417 + str w6, [x3, #VGIC_V2_CPU_MISR]
  418 + str w7, [x3, #VGIC_V2_CPU_EISR]
  419 + str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
  420 + str w9, [x3, #VGIC_V2_CPU_ELRSR]
  421 + str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
  422 + str w11, [x3, #VGIC_V2_CPU_APR]
423 423  
424 424 /* Clear GICH_HCR */
425 425 str wzr, [x2, #GICH_HCR]
... ... @@ -427,7 +427,7 @@
427 427 /* Save list registers */
428 428 add x2, x2, #GICH_LR0
429 429 ldr w4, [x3, #VGIC_CPU_NR_LR]
430   - add x3, x3, #VGIC_CPU_LR
  430 + add x3, x3, #VGIC_V2_CPU_LR
431 431 1: ldr w5, [x2], #4
432 432 CPU_BE( rev w5, w5 )
433 433 str w5, [x3], #4
... ... @@ -452,9 +452,9 @@
452 452 add x3, x0, #VCPU_VGIC_CPU
453 453  
454 454 /* We only restore a minimal set of registers */
455   - ldr w4, [x3, #VGIC_CPU_HCR]
456   - ldr w5, [x3, #VGIC_CPU_VMCR]
457   - ldr w6, [x3, #VGIC_CPU_APR]
  455 + ldr w4, [x3, #VGIC_V2_CPU_HCR]
  456 + ldr w5, [x3, #VGIC_V2_CPU_VMCR]
  457 + ldr w6, [x3, #VGIC_V2_CPU_APR]
458 458 CPU_BE( rev w4, w4 )
459 459 CPU_BE( rev w5, w5 )
460 460 CPU_BE( rev w6, w6 )
... ... @@ -466,7 +466,7 @@
466 466 /* Restore list registers */
467 467 add x2, x2, #GICH_LR0
468 468 ldr w4, [x3, #VGIC_CPU_NR_LR]
469   - add x3, x3, #VGIC_CPU_LR
  469 + add x3, x3, #VGIC_V2_CPU_LR
470 470 1: ldr w5, [x3], #4
471 471 CPU_BE( rev w5, w5 )
472 472 str w5, [x2], #4
include/kvm/arm_vgic.h
... ... @@ -110,6 +110,16 @@
110 110 #endif
111 111 };
112 112  
  113 +struct vgic_v2_cpu_if {
  114 + u32 vgic_hcr;
  115 + u32 vgic_vmcr;
  116 + u32 vgic_misr; /* Saved only */
  117 + u32 vgic_eisr[2]; /* Saved only */
  118 + u32 vgic_elrsr[2]; /* Saved only */
  119 + u32 vgic_apr;
  120 + u32 vgic_lr[VGIC_MAX_LRS];
  121 +};
  122 +
113 123 struct vgic_cpu {
114 124 #ifdef CONFIG_KVM_ARM_VGIC
115 125 /* per IRQ to LR mapping */
... ... @@ -126,13 +136,9 @@
126 136 int nr_lr;
127 137  
128 138 /* CPU vif control registers for world switch */
129   - u32 vgic_hcr;
130   - u32 vgic_vmcr;
131   - u32 vgic_misr; /* Saved only */
132   - u32 vgic_eisr[2]; /* Saved only */
133   - u32 vgic_elrsr[2]; /* Saved only */
134   - u32 vgic_apr;
135   - u32 vgic_lr[VGIC_MAX_LRS];
  139 + union {
  140 + struct vgic_v2_cpu_if vgic_v2;
  141 + };
136 142 #endif
137 143 };
138 144  
... ... @@ -601,7 +601,7 @@
601 601 static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
602 602 {
603 603 clear_bit(lr_nr, vgic_cpu->lr_used);
604   - vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
  604 + vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE;
605 605 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
606 606 }
607 607  
... ... @@ -626,7 +626,7 @@
626 626 u32 *lr;
627 627  
628 628 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
629   - lr = &vgic_cpu->vgic_lr[i];
  629 + lr = &vgic_cpu->vgic_v2.vgic_lr[i];
630 630 irq = LR_IRQID(*lr);
631 631 source_cpu = LR_CPUID(*lr);
632 632  
... ... @@ -1007,7 +1007,7 @@
1007 1007 int lr;
1008 1008  
1009 1009 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
1010   - int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
  1010 + int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
1011 1011  
1012 1012 if (!vgic_irq_is_enabled(vcpu, irq)) {
1013 1013 vgic_retire_lr(lr, irq, vgic_cpu);
1014 1014  
1015 1015  
... ... @@ -1037,11 +1037,11 @@
1037 1037  
1038 1038 /* Do we have an active interrupt for the same CPUID? */
1039 1039 if (lr != LR_EMPTY &&
1040   - (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
  1040 + (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) {
1041 1041 kvm_debug("LR%d piggyback for IRQ%d %x\n",
1042   - lr, irq, vgic_cpu->vgic_lr[lr]);
  1042 + lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]);
1043 1043 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1044   - vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
  1044 + vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT;
1045 1045 return true;
1046 1046 }
1047 1047  
1048 1048  
... ... @@ -1052,12 +1052,12 @@
1052 1052 return false;
1053 1053  
1054 1054 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1055   - vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
  1055 + vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
1056 1056 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1057 1057 set_bit(lr, vgic_cpu->lr_used);
1058 1058  
1059 1059 if (!vgic_irq_is_edge(vcpu, irq))
1060   - vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
  1060 + vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI;
1061 1061  
1062 1062 return true;
1063 1063 }
1064 1064  
... ... @@ -1155,9 +1155,9 @@
1155 1155  
1156 1156 epilog:
1157 1157 if (overflow) {
1158   - vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
  1158 + vgic_cpu->vgic_v2.vgic_hcr |= GICH_HCR_UIE;
1159 1159 } else {
1160   - vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
  1160 + vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
1161 1161 /*
1162 1162 * We're about to run this VCPU, and we've consumed
1163 1163 * everything the distributor had in store for
1164 1164  
1165 1165  
1166 1166  
1167 1167  
... ... @@ -1173,21 +1173,21 @@
1173 1173 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1174 1174 bool level_pending = false;
1175 1175  
1176   - kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
  1176 + kvm_debug("MISR = %08x\n", vgic_cpu->vgic_v2.vgic_misr);
1177 1177  
1178   - if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
  1178 + if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_EOI) {
1179 1179 /*
1180 1180 * Some level interrupts have been EOIed. Clear their
1181 1181 * active bit.
1182 1182 */
1183 1183 int lr, irq;
1184 1184  
1185   - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
  1185 + for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
1186 1186 vgic_cpu->nr_lr) {
1187   - irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
  1187 + irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
1188 1188  
1189 1189 vgic_irq_clear_active(vcpu, irq);
1190   - vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
  1190 + vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI;
1191 1191  
1192 1192 /* Any additional pending interrupt? */
1193 1193 if (vgic_dist_irq_is_pending(vcpu, irq)) {
1194 1194  
... ... @@ -1201,13 +1201,13 @@
1201 1201 * Despite being EOIed, the LR may not have
1202 1202 * been marked as empty.
1203 1203 */
1204   - set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
1205   - vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
  1204 + set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr);
  1205 + vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1206 1206 }
1207 1207 }
1208 1208  
1209   - if (vgic_cpu->vgic_misr & GICH_MISR_U)
1210   - vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
  1209 + if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_U)
  1210 + vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
1211 1211  
1212 1212 return level_pending;
1213 1213 }
1214 1214  
1215 1215  
... ... @@ -1226,21 +1226,21 @@
1226 1226 level_pending = vgic_process_maintenance(vcpu);
1227 1227  
1228 1228 /* Clear mappings for empty LRs */
1229   - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
  1229 + for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
1230 1230 vgic_cpu->nr_lr) {
1231 1231 int irq;
1232 1232  
1233 1233 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1234 1234 continue;
1235 1235  
1236   - irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
  1236 + irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
1237 1237  
1238 1238 BUG_ON(irq >= VGIC_NR_IRQS);
1239 1239 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1240 1240 }
1241 1241  
1242 1242 /* Check if we still have something up our sleeve... */
1243   - pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
  1243 + pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
1244 1244 vgic_cpu->nr_lr);
1245 1245 if (level_pending || pending < vgic_cpu->nr_lr)
1246 1246 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1247 1247  
... ... @@ -1436,10 +1436,10 @@
1436 1436 * points to their reset values. Anything else resets to zero
1437 1437 * anyway.
1438 1438 */
1439   - vgic_cpu->vgic_vmcr = 0;
  1439 + vgic_cpu->vgic_v2.vgic_vmcr = 0;
1440 1440  
1441 1441 vgic_cpu->nr_lr = vgic_nr_lr;
1442   - vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
  1442 + vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1443 1443  
1444 1444 return 0;
1445 1445 }
1446 1446  
1447 1447  
... ... @@ -1746,15 +1746,15 @@
1746 1746 }
1747 1747  
1748 1748 if (!mmio->is_write) {
1749   - reg = (vgic_cpu->vgic_vmcr & mask) >> shift;
  1749 + reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift;
1750 1750 mmio_data_write(mmio, ~0, reg);
1751 1751 } else {
1752 1752 reg = mmio_data_read(mmio, ~0);
1753 1753 reg = (reg << shift) & mask;
1754   - if (reg != (vgic_cpu->vgic_vmcr & mask))
  1754 + if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask))
1755 1755 updated = true;
1756   - vgic_cpu->vgic_vmcr &= ~mask;
1757   - vgic_cpu->vgic_vmcr |= reg;
  1756 + vgic_cpu->vgic_v2.vgic_vmcr &= ~mask;
  1757 + vgic_cpu->vgic_v2.vgic_vmcr |= reg;
1758 1758 }
1759 1759 return updated;
1760 1760 }