Blame view

virt/kvm/arm/vgic/vgic-mmio-v3.c 25.5 KB
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
  /*
   * VGICv3 MMIO handling functions
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  
  #include <linux/irqchip/arm-gic-v3.h>
  #include <linux/kvm.h>
  #include <linux/kvm_host.h>
  #include <kvm/iodev.h>
  #include <kvm/arm_vgic.h>
  
  #include <asm/kvm_emulate.h>
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
21
22
  #include <asm/kvm_arm.h>
  #include <asm/kvm_mmu.h>
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
23
24
25
  
  #include "vgic.h"
  #include "vgic-mmio.h"
741972d8a   Andre Przywara   KVM: arm/arm64: v...
26
  /* extract @num bytes at @offset bytes offset in data */
d7d0a11e4   Vladimir Murzin   KVM: arm: vgic: S...
27
  unsigned long extract_bytes(u64 data, unsigned int offset,
424c33830   Andre Przywara   KVM: arm64: vgic-...
28
  			    unsigned int num)
741972d8a   Andre Przywara   KVM: arm/arm64: v...
29
30
31
  {
  	return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
  }
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
32
  /* allows updates of any half of a 64-bit register (or the whole thing) */
424c33830   Andre Przywara   KVM: arm64: vgic-...
33
34
  u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
  		     unsigned long val)
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
35
36
37
38
39
40
41
42
43
  {
  	int lower = (offset & 4) * 8;
  	int upper = lower + 8 * len - 1;
  
  	reg &= ~GENMASK_ULL(upper, lower);
  	val &= GENMASK_ULL(len * 8 - 1, 0);
  
  	return reg | ((u64)val << lower);
  }
59c5ab409   Andre Przywara   KVM: arm64: vgic-...
44
45
46
47
48
49
  bool vgic_has_its(struct kvm *kvm)
  {
  	struct vgic_dist *dist = &kvm->arch.vgic;
  
  	if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
  		return false;
1085fdc68   Andre Przywara   KVM: arm64: vgic-...
50
  	return dist->has_its;
59c5ab409   Andre Przywara   KVM: arm64: vgic-...
51
  }
e7c480592   Marc Zyngier   KVM: arm/arm64: G...
52
53
54
55
  bool vgic_supports_direct_msis(struct kvm *kvm)
  {
  	return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
  }
fd59ed3be   Andre Przywara   KVM: arm/arm64: v...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
  					    gpa_t addr, unsigned int len)
  {
  	u32 value = 0;
  
  	switch (addr & 0x0c) {
  	case GICD_CTLR:
  		if (vcpu->kvm->arch.vgic.enabled)
  			value |= GICD_CTLR_ENABLE_SS_G1;
  		value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
  		break;
  	case GICD_TYPER:
  		value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  		value = (value >> 5) - 1;
0e4e82f15   Andre Przywara   KVM: arm64: vgic-...
70
71
72
73
74
75
  		if (vgic_has_its(vcpu->kvm)) {
  			value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
  			value |= GICD_TYPER_LPIS;
  		} else {
  			value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
  		}
fd59ed3be   Andre Przywara   KVM: arm/arm64: v...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  		break;
  	case GICD_IIDR:
  		value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  		break;
  	default:
  		return 0;
  	}
  
  	return value;
  }
  
  static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
  				    gpa_t addr, unsigned int len,
  				    unsigned long val)
  {
  	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  	bool was_enabled = dist->enabled;
  
  	switch (addr & 0x0c) {
  	case GICD_CTLR:
  		dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
  
  		if (!was_enabled && dist->enabled)
  			vgic_kick_vcpus(vcpu->kvm);
  		break;
  	case GICD_TYPER:
  	case GICD_IIDR:
  		return;
  	}
  }
78a714aba   Andre Przywara   KVM: arm/arm64: v...
106
107
108
109
110
  static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
  					    gpa_t addr, unsigned int len)
  {
  	int intid = VGIC_ADDR_TO_INTID(addr, 64);
  	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
111
  	unsigned long ret = 0;
78a714aba   Andre Przywara   KVM: arm/arm64: v...
112
113
114
115
116
  
  	if (!irq)
  		return 0;
  
  	/* The upper word is RAZ for us. */
5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
117
118
  	if (!(addr & 4))
  		ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
78a714aba   Andre Przywara   KVM: arm/arm64: v...
119

5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
120
121
  	vgic_put_irq(vcpu->kvm, irq);
  	return ret;
78a714aba   Andre Przywara   KVM: arm/arm64: v...
122
123
124
125
126
127
128
  }
  
  static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
  				    gpa_t addr, unsigned int len,
  				    unsigned long val)
  {
  	int intid = VGIC_ADDR_TO_INTID(addr, 64);
5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
129
  	struct vgic_irq *irq;
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
130
  	unsigned long flags;
78a714aba   Andre Przywara   KVM: arm/arm64: v...
131
132
133
134
  
  	/* The upper word is WI for us since we don't implement Aff3. */
  	if (addr & 4)
  		return;
5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
135
136
137
138
  	irq = vgic_get_irq(vcpu->kvm, NULL, intid);
  
  	if (!irq)
  		return;
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
139
  	spin_lock_irqsave(&irq->irq_lock, flags);
78a714aba   Andre Przywara   KVM: arm/arm64: v...
140
141
142
143
  
  	/* We only care about and preserve Aff0, Aff1 and Aff2. */
  	irq->mpidr = val & GENMASK(23, 0);
  	irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
144
  	spin_unlock_irqrestore(&irq->irq_lock, flags);
5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
145
  	vgic_put_irq(vcpu->kvm, irq);
78a714aba   Andre Przywara   KVM: arm/arm64: v...
146
  }
59c5ab409   Andre Przywara   KVM: arm64: vgic-...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
  static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
  					     gpa_t addr, unsigned int len)
  {
  	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  
  	return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
  }
  
  
  static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
  				     gpa_t addr, unsigned int len,
  				     unsigned long val)
  {
  	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  	bool was_enabled = vgic_cpu->lpis_enabled;
  
  	if (!vgic_has_its(vcpu->kvm))
  		return;
  
  	vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
0e4e82f15   Andre Przywara   KVM: arm64: vgic-...
167
168
  	if (!was_enabled && vgic_cpu->lpis_enabled)
  		vgic_enable_lpis(vcpu);
59c5ab409   Andre Przywara   KVM: arm64: vgic-...
169
  }
741972d8a   Andre Przywara   KVM: arm/arm64: v...
170
171
172
173
174
175
  static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
  					      gpa_t addr, unsigned int len)
  {
  	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
  	int target_vcpu_id = vcpu->vcpu_id;
  	u64 value;
e533a37f7   Vladimir Murzin   KVM: arm: vgic: F...
176
  	value = (u64)(mpidr & GENMASK(23, 0)) << 32;
741972d8a   Andre Przywara   KVM: arm/arm64: v...
177
178
179
  	value |= ((target_vcpu_id & 0xffff) << 8);
  	if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
  		value |= GICR_TYPER_LAST;
0e4e82f15   Andre Przywara   KVM: arm64: vgic-...
180
181
  	if (vgic_has_its(vcpu->kvm))
  		value |= GICR_TYPER_PLPIS;
741972d8a   Andre Przywara   KVM: arm/arm64: v...
182
183
184
185
186
187
188
189
190
  
  	return extract_bytes(value, addr & 7, len);
  }
  
  static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
  					     gpa_t addr, unsigned int len)
  {
  	return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  }
54f59d2b3   Andre Przywara   KVM: arm/arm64: v...
191
192
193
194
195
196
197
198
199
200
201
  static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
  					      gpa_t addr, unsigned int len)
  {
  	switch (addr & 0xffff) {
  	case GICD_PIDR2:
  		/* report a GICv3 compliant implementation */
  		return 0x3b;
  	}
  
  	return 0;
  }
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
  static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
  						  gpa_t addr, unsigned int len)
  {
  	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  	u32 value = 0;
  	int i;
  
  	/*
  	 * pending state of interrupt is latched in pending_latch variable.
  	 * Userspace will save and restore pending state and line_level
  	 * separately.
  	 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
  	 * for handling of ISPENDR and ICPENDR.
  	 */
  	for (i = 0; i < len * 8; i++) {
  		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  
  		if (irq->pending_latch)
  			value |= (1U << i);
  
  		vgic_put_irq(vcpu->kvm, irq);
  	}
  
  	return value;
  }
  
  static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
  					  gpa_t addr, unsigned int len,
  					  unsigned long val)
  {
  	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  	int i;
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
234
  	unsigned long flags;
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
235
236
237
  
  	for (i = 0; i < len * 8; i++) {
  		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
238
  		spin_lock_irqsave(&irq->irq_lock, flags);
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
239
240
241
242
243
244
245
  		if (test_bit(i, &val)) {
  			/*
  			 * pending_latch is set irrespective of irq type
  			 * (level or edge) to avoid dependency that VM should
  			 * restore irq config before pending info.
  			 */
  			irq->pending_latch = true;
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
246
  			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
247
248
  		} else {
  			irq->pending_latch = false;
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
249
  			spin_unlock_irqrestore(&irq->irq_lock, flags);
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
250
251
252
253
254
  		}
  
  		vgic_put_irq(vcpu->kvm, irq);
  	}
  }
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
  /* We want to avoid outer shareable. */
  u64 vgic_sanitise_shareability(u64 field)
  {
  	switch (field) {
  	case GIC_BASER_OuterShareable:
  		return GIC_BASER_InnerShareable;
  	default:
  		return field;
  	}
  }
  
  /* Avoid any inner non-cacheable mapping. */
  u64 vgic_sanitise_inner_cacheability(u64 field)
  {
  	switch (field) {
  	case GIC_BASER_CACHE_nCnB:
  	case GIC_BASER_CACHE_nC:
  		return GIC_BASER_CACHE_RaWb;
  	default:
  		return field;
  	}
  }
  
  /* Non-cacheable or same-as-inner are OK. */
  u64 vgic_sanitise_outer_cacheability(u64 field)
  {
  	switch (field) {
  	case GIC_BASER_CACHE_SameAsInner:
  	case GIC_BASER_CACHE_nC:
  		return field;
  	default:
  		return GIC_BASER_CACHE_nC;
  	}
  }
  
  u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
  			u64 (*sanitise_fn)(u64))
  {
  	u64 field = (reg & field_mask) >> field_shift;
  
  	field = sanitise_fn(field) << field_shift;
  	return (reg & ~field_mask) | field;
  }
  
  #define PROPBASER_RES0_MASK						\
  	(GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
  #define PENDBASER_RES0_MASK						\
  	(BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) |	\
  	 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
  
  static u64 vgic_sanitise_pendbaser(u64 reg)
  {
  	reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
  				  GICR_PENDBASER_SHAREABILITY_SHIFT,
  				  vgic_sanitise_shareability);
  	reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
  				  GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
  				  vgic_sanitise_inner_cacheability);
  	reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
  				  GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
  				  vgic_sanitise_outer_cacheability);
  
  	reg &= ~PENDBASER_RES0_MASK;
  	reg &= ~GENMASK_ULL(51, 48);
  
  	return reg;
  }
  
  static u64 vgic_sanitise_propbaser(u64 reg)
  {
  	reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
  				  GICR_PROPBASER_SHAREABILITY_SHIFT,
  				  vgic_sanitise_shareability);
  	reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
  				  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
  				  vgic_sanitise_inner_cacheability);
  	reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
  				  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
  				  vgic_sanitise_outer_cacheability);
  
  	reg &= ~PROPBASER_RES0_MASK;
  	reg &= ~GENMASK_ULL(51, 48);
  	return reg;
  }
  
  static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
  					     gpa_t addr, unsigned int len)
  {
  	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  
  	return extract_bytes(dist->propbaser, addr & 7, len);
  }
  
  static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
  				     gpa_t addr, unsigned int len,
  				     unsigned long val)
  {
  	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
d9ae449b3   Christoffer Dall   KVM: arm64: vgic-...
354
  	u64 old_propbaser, propbaser;
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
355
356
357
358
  
  	/* Storing a value with LPIs already enabled is undefined */
  	if (vgic_cpu->lpis_enabled)
  		return;
d9ae449b3   Christoffer Dall   KVM: arm64: vgic-...
359
  	do {
3af4e414a   Christoffer Dall   KVM: arm/arm64: v...
360
  		old_propbaser = READ_ONCE(dist->propbaser);
d9ae449b3   Christoffer Dall   KVM: arm64: vgic-...
361
362
363
364
365
  		propbaser = old_propbaser;
  		propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
  		propbaser = vgic_sanitise_propbaser(propbaser);
  	} while (cmpxchg64(&dist->propbaser, old_propbaser,
  			   propbaser) != old_propbaser);
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  }
  
  static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
  					     gpa_t addr, unsigned int len)
  {
  	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  
  	return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
  }
  
  static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
  				     gpa_t addr, unsigned int len,
  				     unsigned long val)
  {
  	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
d9ae449b3   Christoffer Dall   KVM: arm64: vgic-...
381
  	u64 old_pendbaser, pendbaser;
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
382
383
384
385
  
  	/* Storing a value with LPIs already enabled is undefined */
  	if (vgic_cpu->lpis_enabled)
  		return;
d9ae449b3   Christoffer Dall   KVM: arm64: vgic-...
386
  	do {
3af4e414a   Christoffer Dall   KVM: arm/arm64: v...
387
  		old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
d9ae449b3   Christoffer Dall   KVM: arm64: vgic-...
388
389
390
391
392
  		pendbaser = old_pendbaser;
  		pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
  		pendbaser = vgic_sanitise_pendbaser(pendbaser);
  	} while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
  			   pendbaser) != old_pendbaser);
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
393
  }
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
394
395
396
397
398
399
400
  /*
   * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
   * redistributors, while SPIs are covered by registers in the distributor
   * block. Trying to set private IRQs in this block gets ignored.
   * We take some special care here to fix the calculation of the register
   * offset.
   */
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
401
  #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
402
403
404
405
406
407
408
409
410
411
412
413
414
415
  	{								\
  		.reg_offset = off,					\
  		.bits_per_irq = bpi,					\
  		.len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8,		\
  		.access_flags = acc,					\
  		.read = vgic_mmio_read_raz,				\
  		.write = vgic_mmio_write_wi,				\
  	}, {								\
  		.reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8,	\
  		.bits_per_irq = bpi,					\
  		.len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8,	\
  		.access_flags = acc,					\
  		.read = rd,						\
  		.write = wr,						\
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
416
417
  		.uaccess_read = ur,					\
  		.uaccess_write = uw,					\
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
418
419
420
421
  	}
  
  static const struct vgic_register_region vgic_v3_dist_registers[] = {
  	REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
fd59ed3be   Andre Przywara   KVM: arm/arm64: v...
422
  		vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
423
  		VGIC_ACCESS_32bit),
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
424
425
426
  	REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
  		vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
  		VGIC_ACCESS_32bit),
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
427
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
428
  		vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
429
430
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
431
  		vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
432
433
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
434
  		vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
435
436
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
437
438
  		vgic_mmio_read_pending, vgic_mmio_write_spending,
  		vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
439
440
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
441
442
  		vgic_mmio_read_pending, vgic_mmio_write_cpending,
  		vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
443
444
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
3197191e5   Christoffer Dall   KVM: arm/arm64: S...
445
446
  		vgic_mmio_read_active, vgic_mmio_write_sactive,
  		NULL, vgic_mmio_uaccess_write_sactive, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
447
448
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
3197191e5   Christoffer Dall   KVM: arm/arm64: S...
449
450
451
  		vgic_mmio_read_active, vgic_mmio_write_cactive,
  		NULL, vgic_mmio_uaccess_write_cactive,
  		1, VGIC_ACCESS_32bit),
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
452
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
453
454
  		vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
  		8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
455
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
456
  		vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
457
458
  		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
459
  		vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
460
461
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
462
  		vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
463
464
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
465
  		vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
466
467
  		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
54f59d2b3   Andre Przywara   KVM: arm/arm64: v...
468
  		vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
469
470
471
472
473
  		VGIC_ACCESS_32bit),
  };
  
  static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
  	REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
59c5ab409   Andre Przywara   KVM: arm64: vgic-...
474
  		vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
475
  		VGIC_ACCESS_32bit),
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
476
477
478
  	REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
  		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
  		VGIC_ACCESS_32bit),
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
479
  	REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
741972d8a   Andre Przywara   KVM: arm/arm64: v...
480
  		vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
481
482
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
741972d8a   Andre Przywara   KVM: arm/arm64: v...
483
  		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
484
  		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
485
486
487
  	REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
  		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
  		VGIC_ACCESS_32bit),
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
488
  	REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
489
  		vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
490
491
  		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
0aa1de573   Andre Przywara   KVM: arm64: vgic:...
492
  		vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
493
494
  		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
54f59d2b3   Andre Przywara   KVM: arm/arm64: v...
495
  		vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
496
497
498
499
500
501
502
503
504
505
506
507
508
  		VGIC_ACCESS_32bit),
  };
  
  static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
  	REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
  		vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0,
  		vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
  		vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
  		VGIC_ACCESS_32bit),
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
509
510
511
  	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0,
  		vgic_mmio_read_pending, vgic_mmio_write_spending,
  		vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
512
  		VGIC_ACCESS_32bit),
2df903a89   Vijaya Kumar K   KVM: arm/arm64: v...
513
514
515
  	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0,
  		vgic_mmio_read_pending, vgic_mmio_write_cpending,
  		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
516
  		VGIC_ACCESS_32bit),
0710f9a63   Christoffer Dall   KVM: arm/arm64: U...
517
518
519
520
521
522
523
524
  	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0,
  		vgic_mmio_read_active, vgic_mmio_write_sactive,
  		NULL, vgic_mmio_uaccess_write_sactive,
  		4, VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0,
  		vgic_mmio_read_active, vgic_mmio_write_cactive,
  		NULL, vgic_mmio_uaccess_write_cactive,
  		4, VGIC_ACCESS_32bit),
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
  	REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
  		vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
  		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0,
  		vgic_mmio_read_config, vgic_mmio_write_config, 8,
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0,
  		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
  		VGIC_ACCESS_32bit),
  	REGISTER_DESC_WITH_LENGTH(GICR_NSACR,
  		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
  		VGIC_ACCESS_32bit),
  };
  
  unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
  {
  	dev->regions = vgic_v3_dist_registers;
  	dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
  
  	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
  
  	return SZ_64K;
  }
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
548
549
550
551
552
553
554
555
556
  /**
   * vgic_register_redist_iodev - register a single redist iodev
   * @vcpu:    The VCPU to which the redistributor belongs
   *
   * Register a KVM iodev for this VCPU's redistributor using the address
   * provided.
   *
   * Return 0 on success, -ERRNO otherwise.
   */
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
557
  int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
558
559
560
  {
  	struct kvm *kvm = vcpu->kvm;
  	struct vgic_dist *vgic = &kvm->arch.vgic;
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
561
  	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
562
563
  	struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
  	struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
564
  	struct vgic_redist_region *rdreg;
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
565
566
  	gpa_t rd_base, sgi_base;
  	int ret;
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
567
568
569
570
571
572
  	/*
  	 * We may be creating VCPUs before having set the base address for the
  	 * redistributor region, in which case we will come back to this
  	 * function for all VCPUs when the base address is set.  Just return
  	 * without doing any work for now.
  	 */
dc5246193   Eric Auger   KVM: arm/arm64: H...
573
  	rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
574
  	if (!rdreg)
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
575
576
577
578
  		return 0;
  
  	if (!vgic_v3_check_base(kvm))
  		return -EINVAL;
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
579
580
581
  	vgic_cpu->rdreg = rdreg;
  
  	rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
  	sgi_base = rd_base + SZ_64K;
  
  	kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
  	rd_dev->base_addr = rd_base;
  	rd_dev->iodev_type = IODEV_REDIST;
  	rd_dev->regions = vgic_v3_rdbase_registers;
  	rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
  	rd_dev->redist_vcpu = vcpu;
  
  	mutex_lock(&kvm->slots_lock);
  	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
  				      SZ_64K, &rd_dev->dev);
  	mutex_unlock(&kvm->slots_lock);
  
  	if (ret)
  		return ret;
  
  	kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
  	sgi_dev->base_addr = sgi_base;
  	sgi_dev->iodev_type = IODEV_REDIST;
  	sgi_dev->regions = vgic_v3_sgibase_registers;
  	sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
  	sgi_dev->redist_vcpu = vcpu;
  
  	mutex_lock(&kvm->slots_lock);
  	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
  				      SZ_64K, &sgi_dev->dev);
552c9f47f   Christoffer Dall   KVM: arm/arm64: F...
609
  	if (ret) {
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
610
611
  		kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
  					  &rd_dev->dev);
fa472fa91   Christoffer Dall   KVM: arm/arm64: H...
612
  		goto out;
552c9f47f   Christoffer Dall   KVM: arm/arm64: F...
613
  	}
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
614

dbd9733ab   Eric Auger   KVM: arm/arm64: R...
615
  	rdreg->free_index++;
fa472fa91   Christoffer Dall   KVM: arm/arm64: H...
616
617
  out:
  	mutex_unlock(&kvm->slots_lock);
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
618
619
620
621
622
623
624
625
626
627
628
  	return ret;
  }
  
  static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
  {
  	struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
  	struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
  
  	kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
  	kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &sgi_dev->dev);
  }
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
629
  static int vgic_register_all_redist_iodevs(struct kvm *kvm)
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
630
  {
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
631
  	struct kvm_vcpu *vcpu;
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
632
  	int c, ret = 0;
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
633
  	kvm_for_each_vcpu(c, vcpu, kvm) {
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
634
  		ret = vgic_register_redist_iodev(vcpu);
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
635
636
  		if (ret)
  			break;
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
637
638
639
640
  	}
  
  	if (ret) {
  		/* The current c failed, so we start with the previous one. */
fa472fa91   Christoffer Dall   KVM: arm/arm64: H...
641
  		mutex_lock(&kvm->slots_lock);
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
642
  		for (c--; c >= 0; c--) {
8f6cdc1c2   Andre Przywara   KVM: arm/arm64: v...
643
  			vcpu = kvm_get_vcpu(kvm, c);
7fadcd3a8   Christoffer Dall   KVM: arm/arm64: R...
644
  			vgic_unregister_redist_iodev(vcpu);
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
645
  		}
fa472fa91   Christoffer Dall   KVM: arm/arm64: H...
646
  		mutex_unlock(&kvm->slots_lock);
ed9b8cefa   Andre Przywara   KVM: arm/arm64: v...
647
648
649
650
  	}
  
  	return ret;
  }
621ecd8d2   Andre Przywara   KVM: arm/arm64: v...
651

1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
652
653
654
  int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr)
  {
  	struct vgic_dist *vgic = &kvm->arch.vgic;
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
655
  	struct vgic_redist_region *rdreg;
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
656
657
658
  	int ret;
  
  	/* vgic_check_ioaddr makes sure we don't do this twice */
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
659
660
661
662
663
664
665
666
667
668
  	if (!list_empty(&vgic->rd_regions))
  		return -EINVAL;
  
  	rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL);
  	if (!rdreg)
  		return -ENOMEM;
  
  	rdreg->base = VGIC_ADDR_UNDEF;
  
  	ret = vgic_check_ioaddr(kvm, &rdreg->base, addr, SZ_64K);
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
669
  	if (ret)
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
670
  		goto out;
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
671

dbd9733ab   Eric Auger   KVM: arm/arm64: R...
672
  	rdreg->base = addr;
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
673
  	if (!vgic_v3_check_base(kvm)) {
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
674
675
  		ret = -EINVAL;
  		goto out;
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
676
  	}
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
677
  	list_add(&rdreg->list, &vgic->rd_regions);
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
678
679
680
681
682
683
684
685
686
  	/*
  	 * Register iodevs for each existing VCPU.  Adding more VCPUs
  	 * afterwards will register the iodevs when needed.
  	 */
  	ret = vgic_register_all_redist_iodevs(kvm);
  	if (ret)
  		return ret;
  
  	return 0;
dbd9733ab   Eric Auger   KVM: arm/arm64: R...
687
688
689
690
  
  out:
  	kfree(rdreg);
  	return ret;
1aab6f468   Christoffer Dall   KVM: arm/arm64: R...
691
  }
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
  int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
  {
  	const struct vgic_register_region *region;
  	struct vgic_io_device iodev;
  	struct vgic_reg_attr reg_attr;
  	struct kvm_vcpu *vcpu;
  	gpa_t addr;
  	int ret;
  
  	ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
  	if (ret)
  		return ret;
  
  	vcpu = reg_attr.vcpu;
  	addr = reg_attr.addr;
  
  	switch (attr->group) {
  	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  		iodev.regions = vgic_v3_dist_registers;
  		iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
  		iodev.base_addr = 0;
  		break;
  	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
  		iodev.regions = vgic_v3_rdbase_registers;
  		iodev.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
  		iodev.base_addr = 0;
  		break;
  	}
d017d7b0b   Vijaya Kumar K   KVM: arm/arm64: v...
720
721
722
723
724
725
  	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  		u64 reg, id;
  
  		id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
  		return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
  	}
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
726
727
728
729
730
731
732
733
734
735
736
737
738
739
  	default:
  		return -ENXIO;
  	}
  
  	/* We only support aligned 32-bit accesses. */
  	if (addr & 3)
  		return -ENXIO;
  
  	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
  	if (!region)
  		return -ENXIO;
  
  	return 0;
  }
621ecd8d2   Andre Przywara   KVM: arm/arm64: v...
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
  /*
   * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
   * generation register ICC_SGI1R_EL1) with a given VCPU.
   * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
   * return -1.
   */
  static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
  {
  	unsigned long affinity;
  	int level0;
  
  	/*
  	 * Split the current VCPU's MPIDR into affinity level 0 and the
  	 * rest as this is what we have to compare against.
  	 */
  	affinity = kvm_vcpu_get_mpidr_aff(vcpu);
  	level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
  	affinity &= ~MPIDR_LEVEL_MASK;
  
  	/* bail out if the upper three levels don't match */
  	if (sgi_aff != affinity)
  		return -1;
  
  	/* Is this VCPU's bit set in the mask ? */
  	if (!(sgi_cpu_mask & BIT(level0)))
  		return -1;
  
  	return level0;
  }
  
  /*
   * The ICC_SGI* registers encode the affinity differently from the MPIDR,
   * so provide a wrapper to use the existing defines to isolate a certain
   * affinity level.
   */
  #define SGI_AFFINITY_LEVEL(reg, level) \
  	((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
  	>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
  
  /**
   * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
   * @vcpu: The VCPU requesting a SGI
   * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
   *
   * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
   * This will trap in sys_regs.c and call this function.
   * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
   * target processors as well as a bitmask of 16 Aff0 CPUs.
   * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
   * check for matching ones. If this bit is set, we signal all, but not the
   * calling VCPU.
   */
  void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
  {
  	struct kvm *kvm = vcpu->kvm;
  	struct kvm_vcpu *c_vcpu;
  	u16 target_cpus;
  	u64 mpidr;
  	int sgi, c;
  	int vcpu_id = vcpu->vcpu_id;
  	bool broadcast;
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
801
  	unsigned long flags;
621ecd8d2   Andre Przywara   KVM: arm/arm64: v...
802
803
  
  	sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
e533a37f7   Vladimir Murzin   KVM: arm: vgic: F...
804
  	broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
621ecd8d2   Andre Przywara   KVM: arm/arm64: v...
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
  	target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
  	mpidr = SGI_AFFINITY_LEVEL(reg, 3);
  	mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
  	mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
  
  	/*
  	 * We iterate over all VCPUs to find the MPIDRs matching the request.
  	 * If we have handled one CPU, we clear its bit to detect early
  	 * if we are already finished. This avoids iterating through all
  	 * VCPUs when most of the times we just signal a single VCPU.
  	 */
  	kvm_for_each_vcpu(c, c_vcpu, kvm) {
  		struct vgic_irq *irq;
  
  		/* Exit early if we have dealt with all requested CPUs */
  		if (!broadcast && target_cpus == 0)
  			break;
  
  		/* Don't signal the calling VCPU */
  		if (broadcast && c == vcpu_id)
  			continue;
  
  		if (!broadcast) {
  			int level0;
  
  			level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
  			if (level0 == -1)
  				continue;
  
  			/* remove this matching VCPU from the mask */
  			target_cpus &= ~BIT(level0);
  		}
  
  		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
006df0f34   Christoffer Dall   KVM: arm/arm64: S...
839
  		spin_lock_irqsave(&irq->irq_lock, flags);
8694e4da6   Christoffer Dall   KVM: arm/arm64: R...
840
  		irq->pending_latch = true;
621ecd8d2   Andre Przywara   KVM: arm/arm64: v...
841

006df0f34   Christoffer Dall   KVM: arm/arm64: S...
842
  		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
5dd4b924e   Andre Przywara   KVM: arm/arm64: v...
843
  		vgic_put_irq(vcpu->kvm, irq);
621ecd8d2   Andre Przywara   KVM: arm/arm64: v...
844
845
  	}
  }
94574c948   Vijaya Kumar K   KVM: arm/arm64: v...
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
  
  int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  			 int offset, u32 *val)
  {
  	struct vgic_io_device dev = {
  		.regions = vgic_v3_dist_registers,
  		.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
  	};
  
  	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  }
  
  int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  			   int offset, u32 *val)
  {
  	struct vgic_io_device rd_dev = {
  		.regions = vgic_v3_rdbase_registers,
  		.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers),
  	};
  
  	struct vgic_io_device sgi_dev = {
  		.regions = vgic_v3_sgibase_registers,
  		.nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers),
  	};
  
  	/* SGI_base is the next 64K frame after RD_base */
  	if (offset >= SZ_64K)
  		return vgic_uaccess(vcpu, &sgi_dev, is_write, offset - SZ_64K,
  				    val);
  	else
  		return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
  }
e96a006cb   Vijaya Kumar K   KVM: arm/arm64: v...
878
879
880
881
882
883
884
885
886
887
888
889
890
891
  
  int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  				    u32 intid, u64 *val)
  {
  	if (intid % 32)
  		return -EINVAL;
  
  	if (is_write)
  		vgic_write_irq_line_level_info(vcpu, intid, *val);
  	else
  		*val = vgic_read_irq_line_level_info(vcpu, intid);
  
  	return 0;
  }