Commit fd10cde9294f73eeccbc16f3fec1ae6cde7b800c
Committed by
Avi Kivity
1 parent
344d9588a9
Exists in
master
and in
4 other branches
KVM paravirt: Add async PF initialization to PV guest.
Enable async PF in a guest if async PF capability is discovered. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Showing 3 changed files with 101 additions and 0 deletions Side-by-side Diff
Documentation/kernel-parameters.txt
... | ... | @@ -1707,6 +1707,9 @@ |
1707 | 1707 | |
1708 | 1708 | no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver |
1709 | 1709 | |
1710 | + no-kvmapf [X86,KVM] Disable paravirtualized asynchronous page | |
1711 | + fault handling. | |
1712 | + | |
1710 | 1713 | nolapic [X86-32,APIC] Do not enable or use the local APIC. |
1711 | 1714 | |
1712 | 1715 | nolapic_timer [X86-32,APIC] Do not use the local APIC timer. |
arch/x86/include/asm/kvm_para.h
arch/x86/kernel/kvm.c
... | ... | @@ -27,16 +27,30 @@ |
27 | 27 | #include <linux/mm.h> |
28 | 28 | #include <linux/highmem.h> |
29 | 29 | #include <linux/hardirq.h> |
30 | +#include <linux/notifier.h> | |
31 | +#include <linux/reboot.h> | |
30 | 32 | #include <asm/timer.h> |
33 | +#include <asm/cpu.h> | |
31 | 34 | |
32 | 35 | #define MMU_QUEUE_SIZE 1024 |
33 | 36 | |
37 | +static int kvmapf = 1; | |
38 | + | |
39 | +static int parse_no_kvmapf(char *arg) | |
40 | +{ | |
41 | + kvmapf = 0; | |
42 | + return 0; | |
43 | +} | |
44 | + | |
45 | +early_param("no-kvmapf", parse_no_kvmapf); | |
46 | + | |
34 | 47 | struct kvm_para_state { |
35 | 48 | u8 mmu_queue[MMU_QUEUE_SIZE]; |
36 | 49 | int mmu_queue_len; |
37 | 50 | }; |
38 | 51 | |
39 | 52 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); |
53 | +static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); | |
40 | 54 | |
41 | 55 | static struct kvm_para_state *kvm_para_state(void) |
42 | 56 | { |
43 | 57 | |
44 | 58 | |
... | ... | @@ -231,12 +245,86 @@ |
231 | 245 | #endif |
232 | 246 | } |
233 | 247 | |
248 | +void __cpuinit kvm_guest_cpu_init(void) | |
249 | +{ | |
250 | + if (!kvm_para_available()) | |
251 | + return; | |
252 | + | |
253 | + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | |
254 | + u64 pa = __pa(&__get_cpu_var(apf_reason)); | |
255 | + | |
256 | + wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); | |
257 | + __get_cpu_var(apf_reason).enabled = 1; | |
258 | + printk(KERN_INFO"KVM setup async PF for cpu %d\n", | |
259 | + smp_processor_id()); | |
260 | + } | |
261 | +} | |
262 | + | |
263 | +static void kvm_pv_disable_apf(void *unused) | |
264 | +{ | |
265 | + if (!__get_cpu_var(apf_reason).enabled) | |
266 | + return; | |
267 | + | |
268 | + wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | |
269 | + __get_cpu_var(apf_reason).enabled = 0; | |
270 | + | |
271 | + printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | |
272 | + smp_processor_id()); | |
273 | +} | |
274 | + | |
275 | +static int kvm_pv_reboot_notify(struct notifier_block *nb, | |
276 | + unsigned long code, void *unused) | |
277 | +{ | |
278 | + if (code == SYS_RESTART) | |
279 | + on_each_cpu(kvm_pv_disable_apf, NULL, 1); | |
280 | + return NOTIFY_DONE; | |
281 | +} | |
282 | + | |
283 | +static struct notifier_block kvm_pv_reboot_nb = { | |
284 | + .notifier_call = kvm_pv_reboot_notify, | |
285 | +}; | |
286 | + | |
234 | 287 | #ifdef CONFIG_SMP |
235 | 288 | static void __init kvm_smp_prepare_boot_cpu(void) |
236 | 289 | { |
237 | 290 | WARN_ON(kvm_register_clock("primary cpu clock")); |
291 | + kvm_guest_cpu_init(); | |
238 | 292 | native_smp_prepare_boot_cpu(); |
239 | 293 | } |
294 | + | |
295 | +static void kvm_guest_cpu_online(void *dummy) | |
296 | +{ | |
297 | + kvm_guest_cpu_init(); | |
298 | +} | |
299 | + | |
300 | +static void kvm_guest_cpu_offline(void *dummy) | |
301 | +{ | |
302 | + kvm_pv_disable_apf(NULL); | |
303 | +} | |
304 | + | |
305 | +static int __cpuinit kvm_cpu_notify(struct notifier_block *self, | |
306 | + unsigned long action, void *hcpu) | |
307 | +{ | |
308 | + int cpu = (unsigned long)hcpu; | |
309 | + switch (action) { | |
310 | + case CPU_ONLINE: | |
311 | + case CPU_DOWN_FAILED: | |
312 | + case CPU_ONLINE_FROZEN: | |
313 | + smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); | |
314 | + break; | |
315 | + case CPU_DOWN_PREPARE: | |
316 | + case CPU_DOWN_PREPARE_FROZEN: | |
317 | + smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); | |
318 | + break; | |
319 | + default: | |
320 | + break; | |
321 | + } | |
322 | + return NOTIFY_OK; | |
323 | +} | |
324 | + | |
325 | +static struct notifier_block __cpuinitdata kvm_cpu_notifier = { | |
326 | + .notifier_call = kvm_cpu_notify, | |
327 | +}; | |
240 | 328 | #endif |
241 | 329 | |
242 | 330 | void __init kvm_guest_init(void) |
243 | 331 | |
... | ... | @@ -245,8 +333,12 @@ |
245 | 333 | return; |
246 | 334 | |
247 | 335 | paravirt_ops_setup(); |
336 | + register_reboot_notifier(&kvm_pv_reboot_nb); | |
248 | 337 | #ifdef CONFIG_SMP |
249 | 338 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; |
339 | + register_cpu_notifier(&kvm_cpu_notifier); | |
340 | +#else | |
341 | + kvm_guest_cpu_init(); | |
250 | 342 | #endif |
251 | 343 | } |