Commit a38f84ca8c6991925cb8bb6371ade8df9d3cc1e6
Committed by
Avi Kivity
1 parent
a78484c60e
Exists in
master
and in
4 other branches
KVM: ioapic: Fix an error field reference
Function ioapic_debug() in the ioapic_deliver() misnames one filed by reference. This patch correct it. Signed-off-by: Liu Yuan <tailai.ly@taobao.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
virt/kvm/ioapic.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 MandrakeSoft S.A. | 2 | * Copyright (C) 2001 MandrakeSoft S.A. |
3 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | 3 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
4 | * | 4 | * |
5 | * MandrakeSoft S.A. | 5 | * MandrakeSoft S.A. |
6 | * 43, rue d'Aboukir | 6 | * 43, rue d'Aboukir |
7 | * 75002 Paris - France | 7 | * 75002 Paris - France |
8 | * http://www.linux-mandrake.com/ | 8 | * http://www.linux-mandrake.com/ |
9 | * http://www.mandrakesoft.com/ | 9 | * http://www.mandrakesoft.com/ |
10 | * | 10 | * |
11 | * This library is free software; you can redistribute it and/or | 11 | * This library is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU Lesser General Public | 12 | * modify it under the terms of the GNU Lesser General Public |
13 | * License as published by the Free Software Foundation; either | 13 | * License as published by the Free Software Foundation; either |
14 | * version 2 of the License, or (at your option) any later version. | 14 | * version 2 of the License, or (at your option) any later version. |
15 | * | 15 | * |
16 | * This library is distributed in the hope that it will be useful, | 16 | * This library is distributed in the hope that it will be useful, |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 | * Lesser General Public License for more details. | 19 | * Lesser General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU Lesser General Public | 21 | * You should have received a copy of the GNU Lesser General Public |
22 | * License along with this library; if not, write to the Free Software | 22 | * License along with this library; if not, write to the Free Software |
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | * | 24 | * |
25 | * Yunhong Jiang <yunhong.jiang@intel.com> | 25 | * Yunhong Jiang <yunhong.jiang@intel.com> |
26 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | 26 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> |
27 | * Based on Xen 3.1 code. | 27 | * Based on Xen 3.1 code. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/kvm_host.h> | 30 | #include <linux/kvm_host.h> |
31 | #include <linux/kvm.h> | 31 | #include <linux/kvm.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/highmem.h> | 33 | #include <linux/highmem.h> |
34 | #include <linux/smp.h> | 34 | #include <linux/smp.h> |
35 | #include <linux/hrtimer.h> | 35 | #include <linux/hrtimer.h> |
36 | #include <linux/io.h> | 36 | #include <linux/io.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/page.h> | 39 | #include <asm/page.h> |
40 | #include <asm/current.h> | 40 | #include <asm/current.h> |
41 | #include <trace/events/kvm.h> | 41 | #include <trace/events/kvm.h> |
42 | 42 | ||
43 | #include "ioapic.h" | 43 | #include "ioapic.h" |
44 | #include "lapic.h" | 44 | #include "lapic.h" |
45 | #include "irq.h" | 45 | #include "irq.h" |
46 | 46 | ||
47 | #if 0 | 47 | #if 0 |
48 | #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) | 48 | #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) |
49 | #else | 49 | #else |
50 | #define ioapic_debug(fmt, arg...) | 50 | #define ioapic_debug(fmt, arg...) |
51 | #endif | 51 | #endif |
52 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); | 52 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); |
53 | 53 | ||
54 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | 54 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
55 | unsigned long addr, | 55 | unsigned long addr, |
56 | unsigned long length) | 56 | unsigned long length) |
57 | { | 57 | { |
58 | unsigned long result = 0; | 58 | unsigned long result = 0; |
59 | 59 | ||
60 | switch (ioapic->ioregsel) { | 60 | switch (ioapic->ioregsel) { |
61 | case IOAPIC_REG_VERSION: | 61 | case IOAPIC_REG_VERSION: |
62 | result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | 62 | result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) |
63 | | (IOAPIC_VERSION_ID & 0xff)); | 63 | | (IOAPIC_VERSION_ID & 0xff)); |
64 | break; | 64 | break; |
65 | 65 | ||
66 | case IOAPIC_REG_APIC_ID: | 66 | case IOAPIC_REG_APIC_ID: |
67 | case IOAPIC_REG_ARB_ID: | 67 | case IOAPIC_REG_ARB_ID: |
68 | result = ((ioapic->id & 0xf) << 24); | 68 | result = ((ioapic->id & 0xf) << 24); |
69 | break; | 69 | break; |
70 | 70 | ||
71 | default: | 71 | default: |
72 | { | 72 | { |
73 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; | 73 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; |
74 | u64 redir_content; | 74 | u64 redir_content; |
75 | 75 | ||
76 | ASSERT(redir_index < IOAPIC_NUM_PINS); | 76 | ASSERT(redir_index < IOAPIC_NUM_PINS); |
77 | 77 | ||
78 | redir_content = ioapic->redirtbl[redir_index].bits; | 78 | redir_content = ioapic->redirtbl[redir_index].bits; |
79 | result = (ioapic->ioregsel & 0x1) ? | 79 | result = (ioapic->ioregsel & 0x1) ? |
80 | (redir_content >> 32) & 0xffffffff : | 80 | (redir_content >> 32) & 0xffffffff : |
81 | redir_content & 0xffffffff; | 81 | redir_content & 0xffffffff; |
82 | break; | 82 | break; |
83 | } | 83 | } |
84 | } | 84 | } |
85 | 85 | ||
86 | return result; | 86 | return result; |
87 | } | 87 | } |
88 | 88 | ||
89 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | 89 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) |
90 | { | 90 | { |
91 | union kvm_ioapic_redirect_entry *pent; | 91 | union kvm_ioapic_redirect_entry *pent; |
92 | int injected = -1; | 92 | int injected = -1; |
93 | 93 | ||
94 | pent = &ioapic->redirtbl[idx]; | 94 | pent = &ioapic->redirtbl[idx]; |
95 | 95 | ||
96 | if (!pent->fields.mask) { | 96 | if (!pent->fields.mask) { |
97 | injected = ioapic_deliver(ioapic, idx); | 97 | injected = ioapic_deliver(ioapic, idx); |
98 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 98 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
99 | pent->fields.remote_irr = 1; | 99 | pent->fields.remote_irr = 1; |
100 | } | 100 | } |
101 | 101 | ||
102 | return injected; | 102 | return injected; |
103 | } | 103 | } |
104 | 104 | ||
105 | static void update_handled_vectors(struct kvm_ioapic *ioapic) | 105 | static void update_handled_vectors(struct kvm_ioapic *ioapic) |
106 | { | 106 | { |
107 | DECLARE_BITMAP(handled_vectors, 256); | 107 | DECLARE_BITMAP(handled_vectors, 256); |
108 | int i; | 108 | int i; |
109 | 109 | ||
110 | memset(handled_vectors, 0, sizeof(handled_vectors)); | 110 | memset(handled_vectors, 0, sizeof(handled_vectors)); |
111 | for (i = 0; i < IOAPIC_NUM_PINS; ++i) | 111 | for (i = 0; i < IOAPIC_NUM_PINS; ++i) |
112 | __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors); | 112 | __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors); |
113 | memcpy(ioapic->handled_vectors, handled_vectors, | 113 | memcpy(ioapic->handled_vectors, handled_vectors, |
114 | sizeof(handled_vectors)); | 114 | sizeof(handled_vectors)); |
115 | smp_wmb(); | 115 | smp_wmb(); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | 118 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
119 | { | 119 | { |
120 | unsigned index; | 120 | unsigned index; |
121 | bool mask_before, mask_after; | 121 | bool mask_before, mask_after; |
122 | union kvm_ioapic_redirect_entry *e; | 122 | union kvm_ioapic_redirect_entry *e; |
123 | 123 | ||
124 | switch (ioapic->ioregsel) { | 124 | switch (ioapic->ioregsel) { |
125 | case IOAPIC_REG_VERSION: | 125 | case IOAPIC_REG_VERSION: |
126 | /* Writes are ignored. */ | 126 | /* Writes are ignored. */ |
127 | break; | 127 | break; |
128 | 128 | ||
129 | case IOAPIC_REG_APIC_ID: | 129 | case IOAPIC_REG_APIC_ID: |
130 | ioapic->id = (val >> 24) & 0xf; | 130 | ioapic->id = (val >> 24) & 0xf; |
131 | break; | 131 | break; |
132 | 132 | ||
133 | case IOAPIC_REG_ARB_ID: | 133 | case IOAPIC_REG_ARB_ID: |
134 | break; | 134 | break; |
135 | 135 | ||
136 | default: | 136 | default: |
137 | index = (ioapic->ioregsel - 0x10) >> 1; | 137 | index = (ioapic->ioregsel - 0x10) >> 1; |
138 | 138 | ||
139 | ioapic_debug("change redir index %x val %x\n", index, val); | 139 | ioapic_debug("change redir index %x val %x\n", index, val); |
140 | if (index >= IOAPIC_NUM_PINS) | 140 | if (index >= IOAPIC_NUM_PINS) |
141 | return; | 141 | return; |
142 | e = &ioapic->redirtbl[index]; | 142 | e = &ioapic->redirtbl[index]; |
143 | mask_before = e->fields.mask; | 143 | mask_before = e->fields.mask; |
144 | if (ioapic->ioregsel & 1) { | 144 | if (ioapic->ioregsel & 1) { |
145 | e->bits &= 0xffffffff; | 145 | e->bits &= 0xffffffff; |
146 | e->bits |= (u64) val << 32; | 146 | e->bits |= (u64) val << 32; |
147 | } else { | 147 | } else { |
148 | e->bits &= ~0xffffffffULL; | 148 | e->bits &= ~0xffffffffULL; |
149 | e->bits |= (u32) val; | 149 | e->bits |= (u32) val; |
150 | e->fields.remote_irr = 0; | 150 | e->fields.remote_irr = 0; |
151 | } | 151 | } |
152 | update_handled_vectors(ioapic); | 152 | update_handled_vectors(ioapic); |
153 | mask_after = e->fields.mask; | 153 | mask_after = e->fields.mask; |
154 | if (mask_before != mask_after) | 154 | if (mask_before != mask_after) |
155 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); | 155 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); |
156 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG | 156 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG |
157 | && ioapic->irr & (1 << index)) | 157 | && ioapic->irr & (1 << index)) |
158 | ioapic_service(ioapic, index); | 158 | ioapic_service(ioapic, index); |
159 | break; | 159 | break; |
160 | } | 160 | } |
161 | } | 161 | } |
162 | 162 | ||
163 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 163 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) |
164 | { | 164 | { |
165 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; | 165 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; |
166 | struct kvm_lapic_irq irqe; | 166 | struct kvm_lapic_irq irqe; |
167 | 167 | ||
168 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 168 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " |
169 | "vector=%x trig_mode=%x\n", | 169 | "vector=%x trig_mode=%x\n", |
170 | entry->fields.dest, entry->fields.dest_mode, | 170 | entry->fields.dest_id, entry->fields.dest_mode, |
171 | entry->fields.delivery_mode, entry->fields.vector, | 171 | entry->fields.delivery_mode, entry->fields.vector, |
172 | entry->fields.trig_mode); | 172 | entry->fields.trig_mode); |
173 | 173 | ||
174 | irqe.dest_id = entry->fields.dest_id; | 174 | irqe.dest_id = entry->fields.dest_id; |
175 | irqe.vector = entry->fields.vector; | 175 | irqe.vector = entry->fields.vector; |
176 | irqe.dest_mode = entry->fields.dest_mode; | 176 | irqe.dest_mode = entry->fields.dest_mode; |
177 | irqe.trig_mode = entry->fields.trig_mode; | 177 | irqe.trig_mode = entry->fields.trig_mode; |
178 | irqe.delivery_mode = entry->fields.delivery_mode << 8; | 178 | irqe.delivery_mode = entry->fields.delivery_mode << 8; |
179 | irqe.level = 1; | 179 | irqe.level = 1; |
180 | irqe.shorthand = 0; | 180 | irqe.shorthand = 0; |
181 | 181 | ||
182 | #ifdef CONFIG_X86 | 182 | #ifdef CONFIG_X86 |
183 | /* Always delivery PIT interrupt to vcpu 0 */ | 183 | /* Always delivery PIT interrupt to vcpu 0 */ |
184 | if (irq == 0) { | 184 | if (irq == 0) { |
185 | irqe.dest_mode = 0; /* Physical mode. */ | 185 | irqe.dest_mode = 0; /* Physical mode. */ |
186 | /* need to read apic_id from apic regiest since | 186 | /* need to read apic_id from apic regiest since |
187 | * it can be rewritten */ | 187 | * it can be rewritten */ |
188 | irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; | 188 | irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; |
189 | } | 189 | } |
190 | #endif | 190 | #endif |
191 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); | 191 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); |
192 | } | 192 | } |
193 | 193 | ||
194 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | 194 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) |
195 | { | 195 | { |
196 | u32 old_irr; | 196 | u32 old_irr; |
197 | u32 mask = 1 << irq; | 197 | u32 mask = 1 << irq; |
198 | union kvm_ioapic_redirect_entry entry; | 198 | union kvm_ioapic_redirect_entry entry; |
199 | int ret = 1; | 199 | int ret = 1; |
200 | 200 | ||
201 | spin_lock(&ioapic->lock); | 201 | spin_lock(&ioapic->lock); |
202 | old_irr = ioapic->irr; | 202 | old_irr = ioapic->irr; |
203 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 203 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
204 | entry = ioapic->redirtbl[irq]; | 204 | entry = ioapic->redirtbl[irq]; |
205 | level ^= entry.fields.polarity; | 205 | level ^= entry.fields.polarity; |
206 | if (!level) | 206 | if (!level) |
207 | ioapic->irr &= ~mask; | 207 | ioapic->irr &= ~mask; |
208 | else { | 208 | else { |
209 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | 209 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); |
210 | ioapic->irr |= mask; | 210 | ioapic->irr |= mask; |
211 | if ((edge && old_irr != ioapic->irr) || | 211 | if ((edge && old_irr != ioapic->irr) || |
212 | (!edge && !entry.fields.remote_irr)) | 212 | (!edge && !entry.fields.remote_irr)) |
213 | ret = ioapic_service(ioapic, irq); | 213 | ret = ioapic_service(ioapic, irq); |
214 | else | 214 | else |
215 | ret = 0; /* report coalesced interrupt */ | 215 | ret = 0; /* report coalesced interrupt */ |
216 | } | 216 | } |
217 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 217 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
218 | } | 218 | } |
219 | spin_unlock(&ioapic->lock); | 219 | spin_unlock(&ioapic->lock); |
220 | 220 | ||
221 | return ret; | 221 | return ret; |
222 | } | 222 | } |
223 | 223 | ||
224 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | 224 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, |
225 | int trigger_mode) | 225 | int trigger_mode) |
226 | { | 226 | { |
227 | int i; | 227 | int i; |
228 | 228 | ||
229 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | 229 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
230 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | 230 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; |
231 | 231 | ||
232 | if (ent->fields.vector != vector) | 232 | if (ent->fields.vector != vector) |
233 | continue; | 233 | continue; |
234 | 234 | ||
235 | /* | 235 | /* |
236 | * We are dropping lock while calling ack notifiers because ack | 236 | * We are dropping lock while calling ack notifiers because ack |
237 | * notifier callbacks for assigned devices call into IOAPIC | 237 | * notifier callbacks for assigned devices call into IOAPIC |
238 | * recursively. Since remote_irr is cleared only after call | 238 | * recursively. Since remote_irr is cleared only after call |
239 | * to notifiers if the same vector will be delivered while lock | 239 | * to notifiers if the same vector will be delivered while lock |
240 | * is dropped it will be put into irr and will be delivered | 240 | * is dropped it will be put into irr and will be delivered |
241 | * after ack notifier returns. | 241 | * after ack notifier returns. |
242 | */ | 242 | */ |
243 | spin_unlock(&ioapic->lock); | 243 | spin_unlock(&ioapic->lock); |
244 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 244 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
245 | spin_lock(&ioapic->lock); | 245 | spin_lock(&ioapic->lock); |
246 | 246 | ||
247 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 247 | if (trigger_mode != IOAPIC_LEVEL_TRIG) |
248 | continue; | 248 | continue; |
249 | 249 | ||
250 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 250 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
251 | ent->fields.remote_irr = 0; | 251 | ent->fields.remote_irr = 0; |
252 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) | 252 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) |
253 | ioapic_service(ioapic, i); | 253 | ioapic_service(ioapic, i); |
254 | } | 254 | } |
255 | } | 255 | } |
256 | 256 | ||
257 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | 257 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) |
258 | { | 258 | { |
259 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 259 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
260 | 260 | ||
261 | smp_rmb(); | 261 | smp_rmb(); |
262 | if (!test_bit(vector, ioapic->handled_vectors)) | 262 | if (!test_bit(vector, ioapic->handled_vectors)) |
263 | return; | 263 | return; |
264 | spin_lock(&ioapic->lock); | 264 | spin_lock(&ioapic->lock); |
265 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); | 265 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); |
266 | spin_unlock(&ioapic->lock); | 266 | spin_unlock(&ioapic->lock); |
267 | } | 267 | } |
268 | 268 | ||
269 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | 269 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) |
270 | { | 270 | { |
271 | return container_of(dev, struct kvm_ioapic, dev); | 271 | return container_of(dev, struct kvm_ioapic, dev); |
272 | } | 272 | } |
273 | 273 | ||
274 | static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) | 274 | static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) |
275 | { | 275 | { |
276 | return ((addr >= ioapic->base_address && | 276 | return ((addr >= ioapic->base_address && |
277 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); | 277 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); |
278 | } | 278 | } |
279 | 279 | ||
280 | static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | 280 | static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, |
281 | void *val) | 281 | void *val) |
282 | { | 282 | { |
283 | struct kvm_ioapic *ioapic = to_ioapic(this); | 283 | struct kvm_ioapic *ioapic = to_ioapic(this); |
284 | u32 result; | 284 | u32 result; |
285 | if (!ioapic_in_range(ioapic, addr)) | 285 | if (!ioapic_in_range(ioapic, addr)) |
286 | return -EOPNOTSUPP; | 286 | return -EOPNOTSUPP; |
287 | 287 | ||
288 | ioapic_debug("addr %lx\n", (unsigned long)addr); | 288 | ioapic_debug("addr %lx\n", (unsigned long)addr); |
289 | ASSERT(!(addr & 0xf)); /* check alignment */ | 289 | ASSERT(!(addr & 0xf)); /* check alignment */ |
290 | 290 | ||
291 | addr &= 0xff; | 291 | addr &= 0xff; |
292 | spin_lock(&ioapic->lock); | 292 | spin_lock(&ioapic->lock); |
293 | switch (addr) { | 293 | switch (addr) { |
294 | case IOAPIC_REG_SELECT: | 294 | case IOAPIC_REG_SELECT: |
295 | result = ioapic->ioregsel; | 295 | result = ioapic->ioregsel; |
296 | break; | 296 | break; |
297 | 297 | ||
298 | case IOAPIC_REG_WINDOW: | 298 | case IOAPIC_REG_WINDOW: |
299 | result = ioapic_read_indirect(ioapic, addr, len); | 299 | result = ioapic_read_indirect(ioapic, addr, len); |
300 | break; | 300 | break; |
301 | 301 | ||
302 | default: | 302 | default: |
303 | result = 0; | 303 | result = 0; |
304 | break; | 304 | break; |
305 | } | 305 | } |
306 | spin_unlock(&ioapic->lock); | 306 | spin_unlock(&ioapic->lock); |
307 | 307 | ||
308 | switch (len) { | 308 | switch (len) { |
309 | case 8: | 309 | case 8: |
310 | *(u64 *) val = result; | 310 | *(u64 *) val = result; |
311 | break; | 311 | break; |
312 | case 1: | 312 | case 1: |
313 | case 2: | 313 | case 2: |
314 | case 4: | 314 | case 4: |
315 | memcpy(val, (char *)&result, len); | 315 | memcpy(val, (char *)&result, len); |
316 | break; | 316 | break; |
317 | default: | 317 | default: |
318 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); | 318 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); |
319 | } | 319 | } |
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | 323 | static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, |
324 | const void *val) | 324 | const void *val) |
325 | { | 325 | { |
326 | struct kvm_ioapic *ioapic = to_ioapic(this); | 326 | struct kvm_ioapic *ioapic = to_ioapic(this); |
327 | u32 data; | 327 | u32 data; |
328 | if (!ioapic_in_range(ioapic, addr)) | 328 | if (!ioapic_in_range(ioapic, addr)) |
329 | return -EOPNOTSUPP; | 329 | return -EOPNOTSUPP; |
330 | 330 | ||
331 | ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", | 331 | ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", |
332 | (void*)addr, len, val); | 332 | (void*)addr, len, val); |
333 | ASSERT(!(addr & 0xf)); /* check alignment */ | 333 | ASSERT(!(addr & 0xf)); /* check alignment */ |
334 | 334 | ||
335 | if (len == 4 || len == 8) | 335 | if (len == 4 || len == 8) |
336 | data = *(u32 *) val; | 336 | data = *(u32 *) val; |
337 | else { | 337 | else { |
338 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); | 338 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); |
339 | return 0; | 339 | return 0; |
340 | } | 340 | } |
341 | 341 | ||
342 | addr &= 0xff; | 342 | addr &= 0xff; |
343 | spin_lock(&ioapic->lock); | 343 | spin_lock(&ioapic->lock); |
344 | switch (addr) { | 344 | switch (addr) { |
345 | case IOAPIC_REG_SELECT: | 345 | case IOAPIC_REG_SELECT: |
346 | ioapic->ioregsel = data; | 346 | ioapic->ioregsel = data; |
347 | break; | 347 | break; |
348 | 348 | ||
349 | case IOAPIC_REG_WINDOW: | 349 | case IOAPIC_REG_WINDOW: |
350 | ioapic_write_indirect(ioapic, data); | 350 | ioapic_write_indirect(ioapic, data); |
351 | break; | 351 | break; |
352 | #ifdef CONFIG_IA64 | 352 | #ifdef CONFIG_IA64 |
353 | case IOAPIC_REG_EOI: | 353 | case IOAPIC_REG_EOI: |
354 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); | 354 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); |
355 | break; | 355 | break; |
356 | #endif | 356 | #endif |
357 | 357 | ||
358 | default: | 358 | default: |
359 | break; | 359 | break; |
360 | } | 360 | } |
361 | spin_unlock(&ioapic->lock); | 361 | spin_unlock(&ioapic->lock); |
362 | return 0; | 362 | return 0; |
363 | } | 363 | } |
364 | 364 | ||
365 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | 365 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) |
366 | { | 366 | { |
367 | int i; | 367 | int i; |
368 | 368 | ||
369 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | 369 | for (i = 0; i < IOAPIC_NUM_PINS; i++) |
370 | ioapic->redirtbl[i].fields.mask = 1; | 370 | ioapic->redirtbl[i].fields.mask = 1; |
371 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; | 371 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; |
372 | ioapic->ioregsel = 0; | 372 | ioapic->ioregsel = 0; |
373 | ioapic->irr = 0; | 373 | ioapic->irr = 0; |
374 | ioapic->id = 0; | 374 | ioapic->id = 0; |
375 | update_handled_vectors(ioapic); | 375 | update_handled_vectors(ioapic); |
376 | } | 376 | } |
377 | 377 | ||
378 | static const struct kvm_io_device_ops ioapic_mmio_ops = { | 378 | static const struct kvm_io_device_ops ioapic_mmio_ops = { |
379 | .read = ioapic_mmio_read, | 379 | .read = ioapic_mmio_read, |
380 | .write = ioapic_mmio_write, | 380 | .write = ioapic_mmio_write, |
381 | }; | 381 | }; |
382 | 382 | ||
383 | int kvm_ioapic_init(struct kvm *kvm) | 383 | int kvm_ioapic_init(struct kvm *kvm) |
384 | { | 384 | { |
385 | struct kvm_ioapic *ioapic; | 385 | struct kvm_ioapic *ioapic; |
386 | int ret; | 386 | int ret; |
387 | 387 | ||
388 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | 388 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); |
389 | if (!ioapic) | 389 | if (!ioapic) |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | spin_lock_init(&ioapic->lock); | 391 | spin_lock_init(&ioapic->lock); |
392 | kvm->arch.vioapic = ioapic; | 392 | kvm->arch.vioapic = ioapic; |
393 | kvm_ioapic_reset(ioapic); | 393 | kvm_ioapic_reset(ioapic); |
394 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 394 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
395 | ioapic->kvm = kvm; | 395 | ioapic->kvm = kvm; |
396 | mutex_lock(&kvm->slots_lock); | 396 | mutex_lock(&kvm->slots_lock); |
397 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | 397 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); |
398 | mutex_unlock(&kvm->slots_lock); | 398 | mutex_unlock(&kvm->slots_lock); |
399 | if (ret < 0) { | 399 | if (ret < 0) { |
400 | kvm->arch.vioapic = NULL; | 400 | kvm->arch.vioapic = NULL; |
401 | kfree(ioapic); | 401 | kfree(ioapic); |
402 | } | 402 | } |
403 | 403 | ||
404 | return ret; | 404 | return ret; |
405 | } | 405 | } |
406 | 406 | ||
407 | void kvm_ioapic_destroy(struct kvm *kvm) | 407 | void kvm_ioapic_destroy(struct kvm *kvm) |
408 | { | 408 | { |
409 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 409 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
410 | 410 | ||
411 | if (ioapic) { | 411 | if (ioapic) { |
412 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | 412 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); |
413 | kvm->arch.vioapic = NULL; | 413 | kvm->arch.vioapic = NULL; |
414 | kfree(ioapic); | 414 | kfree(ioapic); |
415 | } | 415 | } |
416 | } | 416 | } |
417 | 417 | ||
418 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | 418 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) |
419 | { | 419 | { |
420 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | 420 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); |
421 | if (!ioapic) | 421 | if (!ioapic) |
422 | return -EINVAL; | 422 | return -EINVAL; |
423 | 423 | ||
424 | spin_lock(&ioapic->lock); | 424 | spin_lock(&ioapic->lock); |
425 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | 425 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); |
426 | spin_unlock(&ioapic->lock); | 426 | spin_unlock(&ioapic->lock); |
427 | return 0; | 427 | return 0; |
428 | } | 428 | } |
429 | 429 | ||
430 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | 430 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) |
431 | { | 431 | { |
432 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | 432 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); |
433 | if (!ioapic) | 433 | if (!ioapic) |
434 | return -EINVAL; | 434 | return -EINVAL; |
435 | 435 | ||
436 | spin_lock(&ioapic->lock); | 436 | spin_lock(&ioapic->lock); |
437 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 437 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
438 | update_handled_vectors(ioapic); | 438 | update_handled_vectors(ioapic); |
439 | spin_unlock(&ioapic->lock); | 439 | spin_unlock(&ioapic->lock); |
440 | return 0; | 440 | return 0; |
441 | } | 441 | } |
442 | 442 |