Commit 46a47b1ed118cda1a08b7f6077b837a00fbc112b
1 parent
be835674b5
Exists in
master
and in
4 other branches
KVM: convert ioapic lock to spinlock
kvm_set_irq is used from non sleepable contexes, so convert ioapic from mutex to spinlock. KVM-Stable-Tag. Tested-by: Ralf Bonenkamp <ralf.bonenkamp@swyx.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Showing 2 changed files with 16 additions and 16 deletions Inline Diff
virt/kvm/ioapic.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 MandrakeSoft S.A. | 2 | * Copyright (C) 2001 MandrakeSoft S.A. |
3 | * | 3 | * |
4 | * MandrakeSoft S.A. | 4 | * MandrakeSoft S.A. |
5 | * 43, rue d'Aboukir | 5 | * 43, rue d'Aboukir |
6 | * 75002 Paris - France | 6 | * 75002 Paris - France |
7 | * http://www.linux-mandrake.com/ | 7 | * http://www.linux-mandrake.com/ |
8 | * http://www.mandrakesoft.com/ | 8 | * http://www.mandrakesoft.com/ |
9 | * | 9 | * |
10 | * This library is free software; you can redistribute it and/or | 10 | * This library is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU Lesser General Public | 11 | * modify it under the terms of the GNU Lesser General Public |
12 | * License as published by the Free Software Foundation; either | 12 | * License as published by the Free Software Foundation; either |
13 | * version 2 of the License, or (at your option) any later version. | 13 | * version 2 of the License, or (at your option) any later version. |
14 | * | 14 | * |
15 | * This library is distributed in the hope that it will be useful, | 15 | * This library is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | * Lesser General Public License for more details. | 18 | * Lesser General Public License for more details. |
19 | * | 19 | * |
20 | * You should have received a copy of the GNU Lesser General Public | 20 | * You should have received a copy of the GNU Lesser General Public |
21 | * License along with this library; if not, write to the Free Software | 21 | * License along with this library; if not, write to the Free Software |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | * | 23 | * |
24 | * Yunhong Jiang <yunhong.jiang@intel.com> | 24 | * Yunhong Jiang <yunhong.jiang@intel.com> |
25 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | 25 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> |
26 | * Based on Xen 3.1 code. | 26 | * Based on Xen 3.1 code. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/kvm_host.h> | 29 | #include <linux/kvm_host.h> |
30 | #include <linux/kvm.h> | 30 | #include <linux/kvm.h> |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/highmem.h> | 32 | #include <linux/highmem.h> |
33 | #include <linux/smp.h> | 33 | #include <linux/smp.h> |
34 | #include <linux/hrtimer.h> | 34 | #include <linux/hrtimer.h> |
35 | #include <linux/io.h> | 35 | #include <linux/io.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
38 | #include <asm/page.h> | 38 | #include <asm/page.h> |
39 | #include <asm/current.h> | 39 | #include <asm/current.h> |
40 | #include <trace/events/kvm.h> | 40 | #include <trace/events/kvm.h> |
41 | 41 | ||
42 | #include "ioapic.h" | 42 | #include "ioapic.h" |
43 | #include "lapic.h" | 43 | #include "lapic.h" |
44 | #include "irq.h" | 44 | #include "irq.h" |
45 | 45 | ||
46 | #if 0 | 46 | #if 0 |
47 | #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) | 47 | #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) |
48 | #else | 48 | #else |
49 | #define ioapic_debug(fmt, arg...) | 49 | #define ioapic_debug(fmt, arg...) |
50 | #endif | 50 | #endif |
51 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); | 51 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); |
52 | 52 | ||
53 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | 53 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
54 | unsigned long addr, | 54 | unsigned long addr, |
55 | unsigned long length) | 55 | unsigned long length) |
56 | { | 56 | { |
57 | unsigned long result = 0; | 57 | unsigned long result = 0; |
58 | 58 | ||
59 | switch (ioapic->ioregsel) { | 59 | switch (ioapic->ioregsel) { |
60 | case IOAPIC_REG_VERSION: | 60 | case IOAPIC_REG_VERSION: |
61 | result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | 61 | result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) |
62 | | (IOAPIC_VERSION_ID & 0xff)); | 62 | | (IOAPIC_VERSION_ID & 0xff)); |
63 | break; | 63 | break; |
64 | 64 | ||
65 | case IOAPIC_REG_APIC_ID: | 65 | case IOAPIC_REG_APIC_ID: |
66 | case IOAPIC_REG_ARB_ID: | 66 | case IOAPIC_REG_ARB_ID: |
67 | result = ((ioapic->id & 0xf) << 24); | 67 | result = ((ioapic->id & 0xf) << 24); |
68 | break; | 68 | break; |
69 | 69 | ||
70 | default: | 70 | default: |
71 | { | 71 | { |
72 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; | 72 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; |
73 | u64 redir_content; | 73 | u64 redir_content; |
74 | 74 | ||
75 | ASSERT(redir_index < IOAPIC_NUM_PINS); | 75 | ASSERT(redir_index < IOAPIC_NUM_PINS); |
76 | 76 | ||
77 | redir_content = ioapic->redirtbl[redir_index].bits; | 77 | redir_content = ioapic->redirtbl[redir_index].bits; |
78 | result = (ioapic->ioregsel & 0x1) ? | 78 | result = (ioapic->ioregsel & 0x1) ? |
79 | (redir_content >> 32) & 0xffffffff : | 79 | (redir_content >> 32) & 0xffffffff : |
80 | redir_content & 0xffffffff; | 80 | redir_content & 0xffffffff; |
81 | break; | 81 | break; |
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | return result; | 85 | return result; |
86 | } | 86 | } |
87 | 87 | ||
88 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | 88 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) |
89 | { | 89 | { |
90 | union kvm_ioapic_redirect_entry *pent; | 90 | union kvm_ioapic_redirect_entry *pent; |
91 | int injected = -1; | 91 | int injected = -1; |
92 | 92 | ||
93 | pent = &ioapic->redirtbl[idx]; | 93 | pent = &ioapic->redirtbl[idx]; |
94 | 94 | ||
95 | if (!pent->fields.mask) { | 95 | if (!pent->fields.mask) { |
96 | injected = ioapic_deliver(ioapic, idx); | 96 | injected = ioapic_deliver(ioapic, idx); |
97 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 97 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
98 | pent->fields.remote_irr = 1; | 98 | pent->fields.remote_irr = 1; |
99 | } | 99 | } |
100 | 100 | ||
101 | return injected; | 101 | return injected; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void update_handled_vectors(struct kvm_ioapic *ioapic) | 104 | static void update_handled_vectors(struct kvm_ioapic *ioapic) |
105 | { | 105 | { |
106 | DECLARE_BITMAP(handled_vectors, 256); | 106 | DECLARE_BITMAP(handled_vectors, 256); |
107 | int i; | 107 | int i; |
108 | 108 | ||
109 | memset(handled_vectors, 0, sizeof(handled_vectors)); | 109 | memset(handled_vectors, 0, sizeof(handled_vectors)); |
110 | for (i = 0; i < IOAPIC_NUM_PINS; ++i) | 110 | for (i = 0; i < IOAPIC_NUM_PINS; ++i) |
111 | __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors); | 111 | __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors); |
112 | memcpy(ioapic->handled_vectors, handled_vectors, | 112 | memcpy(ioapic->handled_vectors, handled_vectors, |
113 | sizeof(handled_vectors)); | 113 | sizeof(handled_vectors)); |
114 | smp_wmb(); | 114 | smp_wmb(); |
115 | } | 115 | } |
116 | 116 | ||
117 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | 117 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
118 | { | 118 | { |
119 | unsigned index; | 119 | unsigned index; |
120 | bool mask_before, mask_after; | 120 | bool mask_before, mask_after; |
121 | union kvm_ioapic_redirect_entry *e; | 121 | union kvm_ioapic_redirect_entry *e; |
122 | 122 | ||
123 | switch (ioapic->ioregsel) { | 123 | switch (ioapic->ioregsel) { |
124 | case IOAPIC_REG_VERSION: | 124 | case IOAPIC_REG_VERSION: |
125 | /* Writes are ignored. */ | 125 | /* Writes are ignored. */ |
126 | break; | 126 | break; |
127 | 127 | ||
128 | case IOAPIC_REG_APIC_ID: | 128 | case IOAPIC_REG_APIC_ID: |
129 | ioapic->id = (val >> 24) & 0xf; | 129 | ioapic->id = (val >> 24) & 0xf; |
130 | break; | 130 | break; |
131 | 131 | ||
132 | case IOAPIC_REG_ARB_ID: | 132 | case IOAPIC_REG_ARB_ID: |
133 | break; | 133 | break; |
134 | 134 | ||
135 | default: | 135 | default: |
136 | index = (ioapic->ioregsel - 0x10) >> 1; | 136 | index = (ioapic->ioregsel - 0x10) >> 1; |
137 | 137 | ||
138 | ioapic_debug("change redir index %x val %x\n", index, val); | 138 | ioapic_debug("change redir index %x val %x\n", index, val); |
139 | if (index >= IOAPIC_NUM_PINS) | 139 | if (index >= IOAPIC_NUM_PINS) |
140 | return; | 140 | return; |
141 | e = &ioapic->redirtbl[index]; | 141 | e = &ioapic->redirtbl[index]; |
142 | mask_before = e->fields.mask; | 142 | mask_before = e->fields.mask; |
143 | if (ioapic->ioregsel & 1) { | 143 | if (ioapic->ioregsel & 1) { |
144 | e->bits &= 0xffffffff; | 144 | e->bits &= 0xffffffff; |
145 | e->bits |= (u64) val << 32; | 145 | e->bits |= (u64) val << 32; |
146 | } else { | 146 | } else { |
147 | e->bits &= ~0xffffffffULL; | 147 | e->bits &= ~0xffffffffULL; |
148 | e->bits |= (u32) val; | 148 | e->bits |= (u32) val; |
149 | e->fields.remote_irr = 0; | 149 | e->fields.remote_irr = 0; |
150 | } | 150 | } |
151 | update_handled_vectors(ioapic); | 151 | update_handled_vectors(ioapic); |
152 | mask_after = e->fields.mask; | 152 | mask_after = e->fields.mask; |
153 | if (mask_before != mask_after) | 153 | if (mask_before != mask_after) |
154 | kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); | 154 | kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); |
155 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG | 155 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG |
156 | && ioapic->irr & (1 << index)) | 156 | && ioapic->irr & (1 << index)) |
157 | ioapic_service(ioapic, index); | 157 | ioapic_service(ioapic, index); |
158 | break; | 158 | break; |
159 | } | 159 | } |
160 | } | 160 | } |
161 | 161 | ||
162 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 162 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) |
163 | { | 163 | { |
164 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; | 164 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; |
165 | struct kvm_lapic_irq irqe; | 165 | struct kvm_lapic_irq irqe; |
166 | 166 | ||
167 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 167 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " |
168 | "vector=%x trig_mode=%x\n", | 168 | "vector=%x trig_mode=%x\n", |
169 | entry->fields.dest, entry->fields.dest_mode, | 169 | entry->fields.dest, entry->fields.dest_mode, |
170 | entry->fields.delivery_mode, entry->fields.vector, | 170 | entry->fields.delivery_mode, entry->fields.vector, |
171 | entry->fields.trig_mode); | 171 | entry->fields.trig_mode); |
172 | 172 | ||
173 | irqe.dest_id = entry->fields.dest_id; | 173 | irqe.dest_id = entry->fields.dest_id; |
174 | irqe.vector = entry->fields.vector; | 174 | irqe.vector = entry->fields.vector; |
175 | irqe.dest_mode = entry->fields.dest_mode; | 175 | irqe.dest_mode = entry->fields.dest_mode; |
176 | irqe.trig_mode = entry->fields.trig_mode; | 176 | irqe.trig_mode = entry->fields.trig_mode; |
177 | irqe.delivery_mode = entry->fields.delivery_mode << 8; | 177 | irqe.delivery_mode = entry->fields.delivery_mode << 8; |
178 | irqe.level = 1; | 178 | irqe.level = 1; |
179 | irqe.shorthand = 0; | 179 | irqe.shorthand = 0; |
180 | 180 | ||
181 | #ifdef CONFIG_X86 | 181 | #ifdef CONFIG_X86 |
182 | /* Always delivery PIT interrupt to vcpu 0 */ | 182 | /* Always delivery PIT interrupt to vcpu 0 */ |
183 | if (irq == 0) { | 183 | if (irq == 0) { |
184 | irqe.dest_mode = 0; /* Physical mode. */ | 184 | irqe.dest_mode = 0; /* Physical mode. */ |
185 | /* need to read apic_id from apic regiest since | 185 | /* need to read apic_id from apic regiest since |
186 | * it can be rewritten */ | 186 | * it can be rewritten */ |
187 | irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; | 187 | irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; |
188 | } | 188 | } |
189 | #endif | 189 | #endif |
190 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); | 190 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); |
191 | } | 191 | } |
192 | 192 | ||
193 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | 193 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) |
194 | { | 194 | { |
195 | u32 old_irr = ioapic->irr; | 195 | u32 old_irr = ioapic->irr; |
196 | u32 mask = 1 << irq; | 196 | u32 mask = 1 << irq; |
197 | union kvm_ioapic_redirect_entry entry; | 197 | union kvm_ioapic_redirect_entry entry; |
198 | int ret = 1; | 198 | int ret = 1; |
199 | 199 | ||
200 | mutex_lock(&ioapic->lock); | 200 | spin_lock(&ioapic->lock); |
201 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 201 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
202 | entry = ioapic->redirtbl[irq]; | 202 | entry = ioapic->redirtbl[irq]; |
203 | level ^= entry.fields.polarity; | 203 | level ^= entry.fields.polarity; |
204 | if (!level) | 204 | if (!level) |
205 | ioapic->irr &= ~mask; | 205 | ioapic->irr &= ~mask; |
206 | else { | 206 | else { |
207 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | 207 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); |
208 | ioapic->irr |= mask; | 208 | ioapic->irr |= mask; |
209 | if ((edge && old_irr != ioapic->irr) || | 209 | if ((edge && old_irr != ioapic->irr) || |
210 | (!edge && !entry.fields.remote_irr)) | 210 | (!edge && !entry.fields.remote_irr)) |
211 | ret = ioapic_service(ioapic, irq); | 211 | ret = ioapic_service(ioapic, irq); |
212 | else | 212 | else |
213 | ret = 0; /* report coalesced interrupt */ | 213 | ret = 0; /* report coalesced interrupt */ |
214 | } | 214 | } |
215 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 215 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
216 | } | 216 | } |
217 | mutex_unlock(&ioapic->lock); | 217 | spin_unlock(&ioapic->lock); |
218 | 218 | ||
219 | return ret; | 219 | return ret; |
220 | } | 220 | } |
221 | 221 | ||
222 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | 222 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, |
223 | int trigger_mode) | 223 | int trigger_mode) |
224 | { | 224 | { |
225 | int i; | 225 | int i; |
226 | 226 | ||
227 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | 227 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
228 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | 228 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; |
229 | 229 | ||
230 | if (ent->fields.vector != vector) | 230 | if (ent->fields.vector != vector) |
231 | continue; | 231 | continue; |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * We are dropping lock while calling ack notifiers because ack | 234 | * We are dropping lock while calling ack notifiers because ack |
235 | * notifier callbacks for assigned devices call into IOAPIC | 235 | * notifier callbacks for assigned devices call into IOAPIC |
236 | * recursively. Since remote_irr is cleared only after call | 236 | * recursively. Since remote_irr is cleared only after call |
237 | * to notifiers if the same vector will be delivered while lock | 237 | * to notifiers if the same vector will be delivered while lock |
238 | * is dropped it will be put into irr and will be delivered | 238 | * is dropped it will be put into irr and will be delivered |
239 | * after ack notifier returns. | 239 | * after ack notifier returns. |
240 | */ | 240 | */ |
241 | mutex_unlock(&ioapic->lock); | 241 | spin_unlock(&ioapic->lock); |
242 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 242 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
243 | mutex_lock(&ioapic->lock); | 243 | spin_lock(&ioapic->lock); |
244 | 244 | ||
245 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 245 | if (trigger_mode != IOAPIC_LEVEL_TRIG) |
246 | continue; | 246 | continue; |
247 | 247 | ||
248 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 248 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
249 | ent->fields.remote_irr = 0; | 249 | ent->fields.remote_irr = 0; |
250 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) | 250 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) |
251 | ioapic_service(ioapic, i); | 251 | ioapic_service(ioapic, i); |
252 | } | 252 | } |
253 | } | 253 | } |
254 | 254 | ||
255 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | 255 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) |
256 | { | 256 | { |
257 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 257 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
258 | 258 | ||
259 | smp_rmb(); | 259 | smp_rmb(); |
260 | if (!test_bit(vector, ioapic->handled_vectors)) | 260 | if (!test_bit(vector, ioapic->handled_vectors)) |
261 | return; | 261 | return; |
262 | mutex_lock(&ioapic->lock); | 262 | spin_lock(&ioapic->lock); |
263 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); | 263 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); |
264 | mutex_unlock(&ioapic->lock); | 264 | spin_unlock(&ioapic->lock); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | 267 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) |
268 | { | 268 | { |
269 | return container_of(dev, struct kvm_ioapic, dev); | 269 | return container_of(dev, struct kvm_ioapic, dev); |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) | 272 | static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) |
273 | { | 273 | { |
274 | return ((addr >= ioapic->base_address && | 274 | return ((addr >= ioapic->base_address && |
275 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); | 275 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); |
276 | } | 276 | } |
277 | 277 | ||
278 | static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | 278 | static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, |
279 | void *val) | 279 | void *val) |
280 | { | 280 | { |
281 | struct kvm_ioapic *ioapic = to_ioapic(this); | 281 | struct kvm_ioapic *ioapic = to_ioapic(this); |
282 | u32 result; | 282 | u32 result; |
283 | if (!ioapic_in_range(ioapic, addr)) | 283 | if (!ioapic_in_range(ioapic, addr)) |
284 | return -EOPNOTSUPP; | 284 | return -EOPNOTSUPP; |
285 | 285 | ||
286 | ioapic_debug("addr %lx\n", (unsigned long)addr); | 286 | ioapic_debug("addr %lx\n", (unsigned long)addr); |
287 | ASSERT(!(addr & 0xf)); /* check alignment */ | 287 | ASSERT(!(addr & 0xf)); /* check alignment */ |
288 | 288 | ||
289 | addr &= 0xff; | 289 | addr &= 0xff; |
290 | mutex_lock(&ioapic->lock); | 290 | spin_lock(&ioapic->lock); |
291 | switch (addr) { | 291 | switch (addr) { |
292 | case IOAPIC_REG_SELECT: | 292 | case IOAPIC_REG_SELECT: |
293 | result = ioapic->ioregsel; | 293 | result = ioapic->ioregsel; |
294 | break; | 294 | break; |
295 | 295 | ||
296 | case IOAPIC_REG_WINDOW: | 296 | case IOAPIC_REG_WINDOW: |
297 | result = ioapic_read_indirect(ioapic, addr, len); | 297 | result = ioapic_read_indirect(ioapic, addr, len); |
298 | break; | 298 | break; |
299 | 299 | ||
300 | default: | 300 | default: |
301 | result = 0; | 301 | result = 0; |
302 | break; | 302 | break; |
303 | } | 303 | } |
304 | mutex_unlock(&ioapic->lock); | 304 | spin_unlock(&ioapic->lock); |
305 | 305 | ||
306 | switch (len) { | 306 | switch (len) { |
307 | case 8: | 307 | case 8: |
308 | *(u64 *) val = result; | 308 | *(u64 *) val = result; |
309 | break; | 309 | break; |
310 | case 1: | 310 | case 1: |
311 | case 2: | 311 | case 2: |
312 | case 4: | 312 | case 4: |
313 | memcpy(val, (char *)&result, len); | 313 | memcpy(val, (char *)&result, len); |
314 | break; | 314 | break; |
315 | default: | 315 | default: |
316 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); | 316 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); |
317 | } | 317 | } |
318 | return 0; | 318 | return 0; |
319 | } | 319 | } |
320 | 320 | ||
321 | static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | 321 | static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, |
322 | const void *val) | 322 | const void *val) |
323 | { | 323 | { |
324 | struct kvm_ioapic *ioapic = to_ioapic(this); | 324 | struct kvm_ioapic *ioapic = to_ioapic(this); |
325 | u32 data; | 325 | u32 data; |
326 | if (!ioapic_in_range(ioapic, addr)) | 326 | if (!ioapic_in_range(ioapic, addr)) |
327 | return -EOPNOTSUPP; | 327 | return -EOPNOTSUPP; |
328 | 328 | ||
329 | ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", | 329 | ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", |
330 | (void*)addr, len, val); | 330 | (void*)addr, len, val); |
331 | ASSERT(!(addr & 0xf)); /* check alignment */ | 331 | ASSERT(!(addr & 0xf)); /* check alignment */ |
332 | 332 | ||
333 | if (len == 4 || len == 8) | 333 | if (len == 4 || len == 8) |
334 | data = *(u32 *) val; | 334 | data = *(u32 *) val; |
335 | else { | 335 | else { |
336 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); | 336 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); |
337 | return 0; | 337 | return 0; |
338 | } | 338 | } |
339 | 339 | ||
340 | addr &= 0xff; | 340 | addr &= 0xff; |
341 | mutex_lock(&ioapic->lock); | 341 | spin_lock(&ioapic->lock); |
342 | switch (addr) { | 342 | switch (addr) { |
343 | case IOAPIC_REG_SELECT: | 343 | case IOAPIC_REG_SELECT: |
344 | ioapic->ioregsel = data; | 344 | ioapic->ioregsel = data; |
345 | break; | 345 | break; |
346 | 346 | ||
347 | case IOAPIC_REG_WINDOW: | 347 | case IOAPIC_REG_WINDOW: |
348 | ioapic_write_indirect(ioapic, data); | 348 | ioapic_write_indirect(ioapic, data); |
349 | break; | 349 | break; |
350 | #ifdef CONFIG_IA64 | 350 | #ifdef CONFIG_IA64 |
351 | case IOAPIC_REG_EOI: | 351 | case IOAPIC_REG_EOI: |
352 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); | 352 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); |
353 | break; | 353 | break; |
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | default: | 356 | default: |
357 | break; | 357 | break; |
358 | } | 358 | } |
359 | mutex_unlock(&ioapic->lock); | 359 | spin_unlock(&ioapic->lock); |
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
363 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | 363 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) |
364 | { | 364 | { |
365 | int i; | 365 | int i; |
366 | 366 | ||
367 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | 367 | for (i = 0; i < IOAPIC_NUM_PINS; i++) |
368 | ioapic->redirtbl[i].fields.mask = 1; | 368 | ioapic->redirtbl[i].fields.mask = 1; |
369 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; | 369 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; |
370 | ioapic->ioregsel = 0; | 370 | ioapic->ioregsel = 0; |
371 | ioapic->irr = 0; | 371 | ioapic->irr = 0; |
372 | ioapic->id = 0; | 372 | ioapic->id = 0; |
373 | update_handled_vectors(ioapic); | 373 | update_handled_vectors(ioapic); |
374 | } | 374 | } |
375 | 375 | ||
376 | static const struct kvm_io_device_ops ioapic_mmio_ops = { | 376 | static const struct kvm_io_device_ops ioapic_mmio_ops = { |
377 | .read = ioapic_mmio_read, | 377 | .read = ioapic_mmio_read, |
378 | .write = ioapic_mmio_write, | 378 | .write = ioapic_mmio_write, |
379 | }; | 379 | }; |
380 | 380 | ||
381 | int kvm_ioapic_init(struct kvm *kvm) | 381 | int kvm_ioapic_init(struct kvm *kvm) |
382 | { | 382 | { |
383 | struct kvm_ioapic *ioapic; | 383 | struct kvm_ioapic *ioapic; |
384 | int ret; | 384 | int ret; |
385 | 385 | ||
386 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | 386 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); |
387 | if (!ioapic) | 387 | if (!ioapic) |
388 | return -ENOMEM; | 388 | return -ENOMEM; |
389 | mutex_init(&ioapic->lock); | 389 | spin_lock_init(&ioapic->lock); |
390 | kvm->arch.vioapic = ioapic; | 390 | kvm->arch.vioapic = ioapic; |
391 | kvm_ioapic_reset(ioapic); | 391 | kvm_ioapic_reset(ioapic); |
392 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 392 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
393 | ioapic->kvm = kvm; | 393 | ioapic->kvm = kvm; |
394 | mutex_lock(&kvm->slots_lock); | 394 | mutex_lock(&kvm->slots_lock); |
395 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | 395 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); |
396 | mutex_unlock(&kvm->slots_lock); | 396 | mutex_unlock(&kvm->slots_lock); |
397 | if (ret < 0) { | 397 | if (ret < 0) { |
398 | kvm->arch.vioapic = NULL; | 398 | kvm->arch.vioapic = NULL; |
399 | kfree(ioapic); | 399 | kfree(ioapic); |
400 | } | 400 | } |
401 | 401 | ||
402 | return ret; | 402 | return ret; |
403 | } | 403 | } |
404 | 404 | ||
405 | void kvm_ioapic_destroy(struct kvm *kvm) | 405 | void kvm_ioapic_destroy(struct kvm *kvm) |
406 | { | 406 | { |
407 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 407 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
408 | 408 | ||
409 | if (ioapic) { | 409 | if (ioapic) { |
410 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | 410 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); |
411 | kvm->arch.vioapic = NULL; | 411 | kvm->arch.vioapic = NULL; |
412 | kfree(ioapic); | 412 | kfree(ioapic); |
413 | } | 413 | } |
414 | } | 414 | } |
415 | 415 | ||
416 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | 416 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) |
417 | { | 417 | { |
418 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | 418 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); |
419 | if (!ioapic) | 419 | if (!ioapic) |
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | 421 | ||
422 | mutex_lock(&ioapic->lock); | 422 | spin_lock(&ioapic->lock); |
423 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | 423 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); |
424 | mutex_unlock(&ioapic->lock); | 424 | spin_unlock(&ioapic->lock); |
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
428 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | 428 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) |
429 | { | 429 | { |
430 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | 430 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); |
431 | if (!ioapic) | 431 | if (!ioapic) |
432 | return -EINVAL; | 432 | return -EINVAL; |
433 | 433 | ||
434 | mutex_lock(&ioapic->lock); | 434 | spin_lock(&ioapic->lock); |
435 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 435 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
436 | update_handled_vectors(ioapic); | 436 | update_handled_vectors(ioapic); |
437 | mutex_unlock(&ioapic->lock); | 437 | spin_unlock(&ioapic->lock); |
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
440 | 440 |
virt/kvm/ioapic.h
1 | #ifndef __KVM_IO_APIC_H | 1 | #ifndef __KVM_IO_APIC_H |
2 | #define __KVM_IO_APIC_H | 2 | #define __KVM_IO_APIC_H |
3 | 3 | ||
4 | #include <linux/kvm_host.h> | 4 | #include <linux/kvm_host.h> |
5 | 5 | ||
6 | #include "iodev.h" | 6 | #include "iodev.h" |
7 | 7 | ||
8 | struct kvm; | 8 | struct kvm; |
9 | struct kvm_vcpu; | 9 | struct kvm_vcpu; |
10 | 10 | ||
11 | #define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS | 11 | #define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS |
12 | #define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */ | 12 | #define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */ |
13 | #define IOAPIC_EDGE_TRIG 0 | 13 | #define IOAPIC_EDGE_TRIG 0 |
14 | #define IOAPIC_LEVEL_TRIG 1 | 14 | #define IOAPIC_LEVEL_TRIG 1 |
15 | 15 | ||
16 | #define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 | 16 | #define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 |
17 | #define IOAPIC_MEM_LENGTH 0x100 | 17 | #define IOAPIC_MEM_LENGTH 0x100 |
18 | 18 | ||
19 | /* Direct registers. */ | 19 | /* Direct registers. */ |
20 | #define IOAPIC_REG_SELECT 0x00 | 20 | #define IOAPIC_REG_SELECT 0x00 |
21 | #define IOAPIC_REG_WINDOW 0x10 | 21 | #define IOAPIC_REG_WINDOW 0x10 |
22 | #define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */ | 22 | #define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */ |
23 | 23 | ||
24 | /* Indirect registers. */ | 24 | /* Indirect registers. */ |
25 | #define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ | 25 | #define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ |
26 | #define IOAPIC_REG_VERSION 0x01 | 26 | #define IOAPIC_REG_VERSION 0x01 |
27 | #define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ | 27 | #define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ |
28 | 28 | ||
29 | /*ioapic delivery mode*/ | 29 | /*ioapic delivery mode*/ |
30 | #define IOAPIC_FIXED 0x0 | 30 | #define IOAPIC_FIXED 0x0 |
31 | #define IOAPIC_LOWEST_PRIORITY 0x1 | 31 | #define IOAPIC_LOWEST_PRIORITY 0x1 |
32 | #define IOAPIC_PMI 0x2 | 32 | #define IOAPIC_PMI 0x2 |
33 | #define IOAPIC_NMI 0x4 | 33 | #define IOAPIC_NMI 0x4 |
34 | #define IOAPIC_INIT 0x5 | 34 | #define IOAPIC_INIT 0x5 |
35 | #define IOAPIC_EXTINT 0x7 | 35 | #define IOAPIC_EXTINT 0x7 |
36 | 36 | ||
37 | struct kvm_ioapic { | 37 | struct kvm_ioapic { |
38 | u64 base_address; | 38 | u64 base_address; |
39 | u32 ioregsel; | 39 | u32 ioregsel; |
40 | u32 id; | 40 | u32 id; |
41 | u32 irr; | 41 | u32 irr; |
42 | u32 pad; | 42 | u32 pad; |
43 | union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; | 43 | union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; |
44 | unsigned long irq_states[IOAPIC_NUM_PINS]; | 44 | unsigned long irq_states[IOAPIC_NUM_PINS]; |
45 | struct kvm_io_device dev; | 45 | struct kvm_io_device dev; |
46 | struct kvm *kvm; | 46 | struct kvm *kvm; |
47 | void (*ack_notifier)(void *opaque, int irq); | 47 | void (*ack_notifier)(void *opaque, int irq); |
48 | struct mutex lock; | 48 | spinlock_t lock; |
49 | DECLARE_BITMAP(handled_vectors, 256); | 49 | DECLARE_BITMAP(handled_vectors, 256); |
50 | }; | 50 | }; |
51 | 51 | ||
52 | #ifdef DEBUG | 52 | #ifdef DEBUG |
53 | #define ASSERT(x) \ | 53 | #define ASSERT(x) \ |
54 | do { \ | 54 | do { \ |
55 | if (!(x)) { \ | 55 | if (!(x)) { \ |
56 | printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ | 56 | printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ |
57 | __FILE__, __LINE__, #x); \ | 57 | __FILE__, __LINE__, #x); \ |
58 | BUG(); \ | 58 | BUG(); \ |
59 | } \ | 59 | } \ |
60 | } while (0) | 60 | } while (0) |
61 | #else | 61 | #else |
62 | #define ASSERT(x) do { } while (0) | 62 | #define ASSERT(x) do { } while (0) |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) | 65 | static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) |
66 | { | 66 | { |
67 | return kvm->arch.vioapic; | 67 | return kvm->arch.vioapic; |
68 | } | 68 | } |
69 | 69 | ||
70 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 70 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
71 | int short_hand, int dest, int dest_mode); | 71 | int short_hand, int dest, int dest_mode); |
72 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); | 72 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); |
73 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); | 73 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); |
74 | int kvm_ioapic_init(struct kvm *kvm); | 74 | int kvm_ioapic_init(struct kvm *kvm); |
75 | void kvm_ioapic_destroy(struct kvm *kvm); | 75 | void kvm_ioapic_destroy(struct kvm *kvm); |
76 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); | 76 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); |
77 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 77 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
78 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 78 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
79 | struct kvm_lapic_irq *irq); | 79 | struct kvm_lapic_irq *irq); |
80 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 80 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
81 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 81 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
82 | 82 | ||
83 | #endif | 83 | #endif |
84 | 84 |