Commit a1f4d39500ad8ed61825eff061debff42386ab5b

Authored by Avi Kivity
1 parent fc34531db3

KVM: Remove memory alias support

As advertised in feature-removal-schedule.txt.  Equivalent support is provided
by overlapping memory regions.

Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 13 changed files with 11 additions and 225 deletions Side-by-side Diff

Documentation/feature-removal-schedule.txt
... ... @@ -538,17 +538,6 @@
538 538  
539 539 ----------------------------
540 540  
541   -What: KVM memory aliases support
542   -When: July 2010
543   -Why: Memory aliasing support is used for speeding up guest vga access
544   - through the vga windows.
545   -
546   - Modern userspace no longer uses this feature, so it's just bitrotted
547   - code and can be removed with no impact.
548   -Who: Avi Kivity <avi@redhat.com>
549   -
550   -----------------------------
551   -
552 541 What: xtime, wall_to_monotonic
553 542 When: 2.6.36+
554 543 Files: kernel/time/timekeeping.c include/linux/time.h
Documentation/kvm/api.txt
... ... @@ -226,17 +226,7 @@
226 226 Parameters: struct kvm_memory_alias (in)
227 227 Returns: 0 (success), -1 (error)
228 228  
229   -struct kvm_memory_alias {
230   - __u32 slot; /* this has a different namespace than memory slots */
231   - __u32 flags;
232   - __u64 guest_phys_addr;
233   - __u64 memory_size;
234   - __u64 target_phys_addr;
235   -};
236   -
237   -Defines a guest physical address space region as an alias to another
238   -region. Useful for aliased address, for example the VGA low memory
239   -window. Should not be used with userspace memory.
  229 +This ioctl is obsolete and has been removed.
240 230  
241 231 4.9 KVM_RUN
242 232  
arch/ia64/kvm/kvm-ia64.c
... ... @@ -1946,11 +1946,6 @@
1946 1946 return vcpu->arch.timer_fired;
1947 1947 }
1948 1948  
1949   -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1950   -{
1951   - return gfn;
1952   -}
1953   -
1954 1949 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1955 1950 {
1956 1951 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
arch/powerpc/kvm/powerpc.c
... ... @@ -36,11 +36,6 @@
36 36 #define CREATE_TRACE_POINTS
37 37 #include "trace.h"
38 38  
39   -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
40   -{
41   - return gfn;
42   -}
43   -
44 39 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
45 40 {
46 41 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
arch/s390/kvm/kvm-s390.c
... ... @@ -723,11 +723,6 @@
723 723 {
724 724 }
725 725  
726   -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
727   -{
728   - return gfn;
729   -}
730   -
731 726 static int __init kvm_s390_init(void)
732 727 {
733 728 int ret;
arch/x86/include/asm/kvm_host.h
... ... @@ -69,8 +69,6 @@
69 69  
70 70 #define IOPL_SHIFT 12
71 71  
72   -#define KVM_ALIAS_SLOTS 4
73   -
74 72 #define KVM_PERMILLE_MMU_PAGES 20
75 73 #define KVM_MIN_ALLOC_MMU_PAGES 64
76 74 #define KVM_MMU_HASH_SHIFT 10
77 75  
... ... @@ -362,24 +360,7 @@
362 360 u64 hv_vapic;
363 361 };
364 362  
365   -struct kvm_mem_alias {
366   - gfn_t base_gfn;
367   - unsigned long npages;
368   - gfn_t target_gfn;
369   -#define KVM_ALIAS_INVALID 1UL
370   - unsigned long flags;
371   -};
372   -
373   -#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
374   -
375   -struct kvm_mem_aliases {
376   - struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
377   - int naliases;
378   -};
379   -
380 363 struct kvm_arch {
381   - struct kvm_mem_aliases *aliases;
382   -
383 364 unsigned int n_free_mmu_pages;
384 365 unsigned int n_requested_mmu_pages;
385 366 unsigned int n_alloc_mmu_pages;
... ... @@ -654,8 +635,6 @@
654 635  
655 636 int complete_pio(struct kvm_vcpu *vcpu);
656 637 bool kvm_check_iopl(struct kvm_vcpu *vcpu);
657   -
658   -struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
659 638  
660 639 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
661 640 {
... ... @@ -434,9 +434,7 @@
434 434 int *write_count;
435 435 int i;
436 436  
437   - gfn = unalias_gfn(kvm, gfn);
438   -
439   - slot = gfn_to_memslot_unaliased(kvm, gfn);
  437 + slot = gfn_to_memslot(kvm, gfn);
440 438 for (i = PT_DIRECTORY_LEVEL;
441 439 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
442 440 write_count = slot_largepage_idx(gfn, slot, i);
... ... @@ -450,8 +448,7 @@
450 448 int *write_count;
451 449 int i;
452 450  
453   - gfn = unalias_gfn(kvm, gfn);
454   - slot = gfn_to_memslot_unaliased(kvm, gfn);
  451 + slot = gfn_to_memslot(kvm, gfn);
455 452 for (i = PT_DIRECTORY_LEVEL;
456 453 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
457 454 write_count = slot_largepage_idx(gfn, slot, i);
... ... @@ -467,8 +464,7 @@
467 464 struct kvm_memory_slot *slot;
468 465 int *largepage_idx;
469 466  
470   - gfn = unalias_gfn(kvm, gfn);
471   - slot = gfn_to_memslot_unaliased(kvm, gfn);
  467 + slot = gfn_to_memslot(kvm, gfn);
472 468 if (slot) {
473 469 largepage_idx = slot_largepage_idx(gfn, slot, level);
474 470 return *largepage_idx;
... ... @@ -521,7 +517,6 @@
521 517  
522 518 /*
523 519 * Take gfn and return the reverse mapping to it.
524   - * Note: gfn must be unaliased before this function get called
525 520 */
526 521  
527 522 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
... ... @@ -561,7 +556,6 @@
561 556  
562 557 if (!is_rmap_spte(*spte))
563 558 return count;
564   - gfn = unalias_gfn(vcpu->kvm, gfn);
565 559 sp = page_header(__pa(spte));
566 560 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
567 561 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
... ... @@ -698,7 +692,6 @@
698 692 u64 *spte;
699 693 int i, write_protected = 0;
700 694  
701   - gfn = unalias_gfn(kvm, gfn);
702 695 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
703 696  
704 697 spte = rmap_next(kvm, rmapp, NULL);
... ... @@ -885,7 +878,6 @@
885 878  
886 879 sp = page_header(__pa(spte));
887 880  
888   - gfn = unalias_gfn(vcpu->kvm, gfn);
889 881 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
890 882  
891 883 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
... ... @@ -3510,8 +3502,7 @@
3510 3502 if (sp->unsync)
3511 3503 continue;
3512 3504  
3513   - gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3514   - slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
  3505 + slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3515 3506 rmapp = &slot->rmap[gfn - slot->base_gfn];
3516 3507  
3517 3508 spte = rmap_next(vcpu->kvm, rmapp, NULL);
arch/x86/kvm/paging_tmpl.h
... ... @@ -576,7 +576,6 @@
576 576 * Using the cached information from sp->gfns is safe because:
577 577 * - The spte has a reference to the struct page, so the pfn for a given gfn
578 578 * can't change unless all sptes pointing to it are nuked first.
579   - * - Alias changes zap the entire shadow cache.
580 579 */
581 580 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
582 581 bool clear_unsync)
... ... @@ -611,7 +610,7 @@
611 610 return -EINVAL;
612 611  
613 612 gfn = gpte_to_gfn(gpte);
614   - if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] ||
  613 + if (gfn != sp->gfns[i] ||
615 614 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
616 615 u64 nonpresent;
617 616  
... ... @@ -2740,115 +2740,6 @@
2740 2740 return kvm->arch.n_alloc_mmu_pages;
2741 2741 }
2742 2742  
2743   -gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2744   -{
2745   - int i;
2746   - struct kvm_mem_alias *alias;
2747   - struct kvm_mem_aliases *aliases;
2748   -
2749   - aliases = kvm_aliases(kvm);
2750   -
2751   - for (i = 0; i < aliases->naliases; ++i) {
2752   - alias = &aliases->aliases[i];
2753   - if (alias->flags & KVM_ALIAS_INVALID)
2754   - continue;
2755   - if (gfn >= alias->base_gfn
2756   - && gfn < alias->base_gfn + alias->npages)
2757   - return alias->target_gfn + gfn - alias->base_gfn;
2758   - }
2759   - return gfn;
2760   -}
2761   -
2762   -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2763   -{
2764   - int i;
2765   - struct kvm_mem_alias *alias;
2766   - struct kvm_mem_aliases *aliases;
2767   -
2768   - aliases = kvm_aliases(kvm);
2769   -
2770   - for (i = 0; i < aliases->naliases; ++i) {
2771   - alias = &aliases->aliases[i];
2772   - if (gfn >= alias->base_gfn
2773   - && gfn < alias->base_gfn + alias->npages)
2774   - return alias->target_gfn + gfn - alias->base_gfn;
2775   - }
2776   - return gfn;
2777   -}
2778   -
2779   -/*
2780   - * Set a new alias region. Aliases map a portion of physical memory into
2781   - * another portion. This is useful for memory windows, for example the PC
2782   - * VGA region.
2783   - */
2784   -static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2785   - struct kvm_memory_alias *alias)
2786   -{
2787   - int r, n;
2788   - struct kvm_mem_alias *p;
2789   - struct kvm_mem_aliases *aliases, *old_aliases;
2790   -
2791   - r = -EINVAL;
2792   - /* General sanity checks */
2793   - if (alias->memory_size & (PAGE_SIZE - 1))
2794   - goto out;
2795   - if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2796   - goto out;
2797   - if (alias->slot >= KVM_ALIAS_SLOTS)
2798   - goto out;
2799   - if (alias->guest_phys_addr + alias->memory_size
2800   - < alias->guest_phys_addr)
2801   - goto out;
2802   - if (alias->target_phys_addr + alias->memory_size
2803   - < alias->target_phys_addr)
2804   - goto out;
2805   -
2806   - r = -ENOMEM;
2807   - aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2808   - if (!aliases)
2809   - goto out;
2810   -
2811   - mutex_lock(&kvm->slots_lock);
2812   -
2813   - /* invalidate any gfn reference in case of deletion/shrinking */
2814   - memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2815   - aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2816   - old_aliases = kvm->arch.aliases;
2817   - rcu_assign_pointer(kvm->arch.aliases, aliases);
2818   - synchronize_srcu_expedited(&kvm->srcu);
2819   - kvm_mmu_zap_all(kvm);
2820   - kfree(old_aliases);
2821   -
2822   - r = -ENOMEM;
2823   - aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2824   - if (!aliases)
2825   - goto out_unlock;
2826   -
2827   - memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2828   -
2829   - p = &aliases->aliases[alias->slot];
2830   - p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2831   - p->npages = alias->memory_size >> PAGE_SHIFT;
2832   - p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2833   - p->flags &= ~(KVM_ALIAS_INVALID);
2834   -
2835   - for (n = KVM_ALIAS_SLOTS; n > 0; --n)
2836   - if (aliases->aliases[n - 1].npages)
2837   - break;
2838   - aliases->naliases = n;
2839   -
2840   - old_aliases = kvm->arch.aliases;
2841   - rcu_assign_pointer(kvm->arch.aliases, aliases);
2842   - synchronize_srcu_expedited(&kvm->srcu);
2843   - kfree(old_aliases);
2844   - r = 0;
2845   -
2846   -out_unlock:
2847   - mutex_unlock(&kvm->slots_lock);
2848   -out:
2849   - return r;
2850   -}
2851   -
2852 2743 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2853 2744 {
2854 2745 int r;
... ... @@ -3056,7 +2947,6 @@
3056 2947 union {
3057 2948 struct kvm_pit_state ps;
3058 2949 struct kvm_pit_state2 ps2;
3059   - struct kvm_memory_alias alias;
3060 2950 struct kvm_pit_config pit_config;
3061 2951 } u;
3062 2952  
... ... @@ -3101,14 +2991,6 @@
3101 2991 case KVM_GET_NR_MMU_PAGES:
3102 2992 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3103 2993 break;
3104   - case KVM_SET_MEMORY_ALIAS:
3105   - r = -EFAULT;
3106   - if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
3107   - goto out;
3108   - r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
3109   - if (r)
3110   - goto out;
3111   - break;
3112 2994 case KVM_CREATE_IRQCHIP: {
3113 2995 struct kvm_pic *vpic;
3114 2996  
... ... @@ -5559,12 +5441,6 @@
5559 5441 if (!kvm)
5560 5442 return ERR_PTR(-ENOMEM);
5561 5443  
5562   - kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5563   - if (!kvm->arch.aliases) {
5564   - kfree(kvm);
5565   - return ERR_PTR(-ENOMEM);
5566   - }
5567   -
5568 5444 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5569 5445 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5570 5446  
... ... @@ -5622,7 +5498,6 @@
5622 5498 if (kvm->arch.ept_identity_pagetable)
5623 5499 put_page(kvm->arch.ept_identity_pagetable);
5624 5500 cleanup_srcu_struct(&kvm->srcu);
5625   - kfree(kvm->arch.aliases);
5626 5501 kfree(kvm);
5627 5502 }
5628 5503  
... ... @@ -65,13 +65,6 @@
65 65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
66 66 }
67 67  
68   -static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm)
69   -{
70   - return rcu_dereference_check(kvm->arch.aliases,
71   - srcu_read_lock_held(&kvm->srcu)
72   - || lockdep_is_held(&kvm->slots_lock));
73   -}
74   -
75 68 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
76 69 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
77 70  
... ... @@ -619,6 +619,7 @@
619 619 */
620 620 #define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
621 621 #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
  622 +/* KVM_SET_MEMORY_ALIAS is obsolete: */
622 623 #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
623 624 #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
624 625 #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
include/linux/kvm_host.h
... ... @@ -286,8 +286,6 @@
286 286 int user_alloc);
287 287 void kvm_disable_largepages(void);
288 288 void kvm_arch_flush_shadow(struct kvm *kvm);
289   -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
290   -gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
291 289  
292 290 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
293 291 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
... ... @@ -562,10 +560,6 @@
562 560 return 1;
563 561 return 0;
564 562 }
565   -#endif
566   -
567   -#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
568   -#define unalias_gfn_instantiation unalias_gfn
569 563 #endif
570 564  
571 565 #ifdef CONFIG_HAVE_KVM_IRQCHIP
... ... @@ -841,7 +841,7 @@
841 841 }
842 842 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
843 843  
844   -struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
  844 +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
845 845 {
846 846 int i;
847 847 struct kvm_memslots *slots = kvm_memslots(kvm);
848 848  
849 849  
... ... @@ -855,20 +855,13 @@
855 855 }
856 856 return NULL;
857 857 }
858   -EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
  858 +EXPORT_SYMBOL_GPL(gfn_to_memslot);
859 859  
860   -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
861   -{
862   - gfn = unalias_gfn(kvm, gfn);
863   - return gfn_to_memslot_unaliased(kvm, gfn);
864   -}
865   -
866 860 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
867 861 {
868 862 int i;
869 863 struct kvm_memslots *slots = kvm_memslots(kvm);
870 864  
871   - gfn = unalias_gfn_instantiation(kvm, gfn);
872 865 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
873 866 struct kvm_memory_slot *memslot = &slots->memslots[i];
874 867  
... ... @@ -913,7 +906,6 @@
913 906 struct kvm_memslots *slots = kvm_memslots(kvm);
914 907 struct kvm_memory_slot *memslot = NULL;
915 908  
916   - gfn = unalias_gfn(kvm, gfn);
917 909 for (i = 0; i < slots->nmemslots; ++i) {
918 910 memslot = &slots->memslots[i];
919 911  
... ... @@ -934,8 +926,7 @@
934 926 {
935 927 struct kvm_memory_slot *slot;
936 928  
937   - gfn = unalias_gfn_instantiation(kvm, gfn);
938   - slot = gfn_to_memslot_unaliased(kvm, gfn);
  929 + slot = gfn_to_memslot(kvm, gfn);
939 930 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
940 931 return bad_hva();
941 932 return gfn_to_hva_memslot(slot, gfn);
... ... @@ -1202,8 +1193,7 @@
1202 1193 {
1203 1194 struct kvm_memory_slot *memslot;
1204 1195  
1205   - gfn = unalias_gfn(kvm, gfn);
1206   - memslot = gfn_to_memslot_unaliased(kvm, gfn);
  1196 + memslot = gfn_to_memslot(kvm, gfn);
1207 1197 if (memslot && memslot->dirty_bitmap) {
1208 1198 unsigned long rel_gfn = gfn - memslot->base_gfn;
1209 1199