Commit bf3e05bc1e2781d5d8d3ddb2d8bf2d6ec207e5cb

Authored by Xiao Guangrong
Committed by Avi Kivity
1 parent 28a37544fb

KVM: sort memslots by its size and use line search

Sort memslots base on its size and use line search to find it, so that the
larger memslots have better fit

The idea is from Avi

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 2 changed files with 72 additions and 25 deletions Side-by-side Diff

include/linux/kvm_host.h
... ... @@ -231,8 +231,12 @@
231 231 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
232 232 #endif
233 233  
  234 +/*
  235 + * Note:
  236 + * memslots are not sorted by id anymore, please use id_to_memslot()
  237 + * to get the memslot by its id.
  238 + */
234 239 struct kvm_memslots {
235   - int nmemslots;
236 240 u64 generation;
237 241 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
238 242 };
... ... @@ -310,7 +314,8 @@
310 314  
311 315 #define kvm_for_each_memslot(memslot, slots) \
312 316 for (memslot = &slots->memslots[0]; \
313   - memslot < slots->memslots + (slots)->nmemslots; memslot++)
  317 + memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
  318 + memslot++)
314 319  
315 320 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
316 321 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
... ... @@ -336,7 +341,14 @@
336 341 static inline struct kvm_memory_slot *
337 342 id_to_memslot(struct kvm_memslots *slots, int id)
338 343 {
339   - return &slots->memslots[id];
  344 + int i;
  345 +
  346 + for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  347 + if (slots->memslots[i].id == id)
  348 + return &slots->memslots[i];
  349 +
  350 + WARN_ON(1);
  351 + return NULL;
340 352 }
341 353  
342 354 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
... ... @@ -440,6 +440,15 @@
440 440  
441 441 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
442 442  
  443 +static void kvm_init_memslots_id(struct kvm *kvm)
  444 +{
  445 + int i;
  446 + struct kvm_memslots *slots = kvm->memslots;
  447 +
  448 + for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  449 + slots->memslots[i].id = i;
  450 +}
  451 +
443 452 static struct kvm *kvm_create_vm(void)
444 453 {
445 454 int r, i;
... ... @@ -465,6 +474,7 @@
465 474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
466 475 if (!kvm->memslots)
467 476 goto out_err_nosrcu;
  477 + kvm_init_memslots_id(kvm);
468 478 if (init_srcu_struct(&kvm->srcu))
469 479 goto out_err_nosrcu;
470 480 for (i = 0; i < KVM_NR_BUSES; i++) {
471 481  
472 482  
... ... @@ -630,15 +640,54 @@
630 640 }
631 641 #endif /* !CONFIG_S390 */
632 642  
  643 +static struct kvm_memory_slot *
  644 +search_memslots(struct kvm_memslots *slots, gfn_t gfn)
  645 +{
  646 + struct kvm_memory_slot *memslot;
  647 +
  648 + kvm_for_each_memslot(memslot, slots)
  649 + if (gfn >= memslot->base_gfn &&
  650 + gfn < memslot->base_gfn + memslot->npages)
  651 + return memslot;
  652 +
  653 + return NULL;
  654 +}
  655 +
  656 +static int cmp_memslot(const void *slot1, const void *slot2)
  657 +{
  658 + struct kvm_memory_slot *s1, *s2;
  659 +
  660 + s1 = (struct kvm_memory_slot *)slot1;
  661 + s2 = (struct kvm_memory_slot *)slot2;
  662 +
  663 + if (s1->npages < s2->npages)
  664 + return 1;
  665 + if (s1->npages > s2->npages)
  666 + return -1;
  667 +
  668 + return 0;
  669 +}
  670 +
  671 +/*
  672 + * Sort the memslots base on its size, so the larger slots
  673 + * will get better fit.
  674 + */
  675 +static void sort_memslots(struct kvm_memslots *slots)
  676 +{
  677 + sort(slots->memslots, KVM_MEM_SLOTS_NUM,
  678 + sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
  679 +}
  680 +
633 681 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
634 682 {
635 683 if (new) {
636 684 int id = new->id;
637 685 struct kvm_memory_slot *old = id_to_memslot(slots, id);
  686 + unsigned long npages = old->npages;
638 687  
639 688 *old = *new;
640   - if (id >= slots->nmemslots)
641   - slots->nmemslots = id + 1;
  689 + if (new->npages != npages)
  690 + sort_memslots(slots);
642 691 }
643 692  
644 693 slots->generation++;
... ... @@ -980,14 +1029,7 @@
980 1029 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
981 1030 gfn_t gfn)
982 1031 {
983   - struct kvm_memory_slot *memslot;
984   -
985   - kvm_for_each_memslot(memslot, slots)
986   - if (gfn >= memslot->base_gfn
987   - && gfn < memslot->base_gfn + memslot->npages)
988   - return memslot;
989   -
990   - return NULL;
  1032 + return search_memslots(slots, gfn);
991 1033 }
992 1034  
993 1035 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
994 1036  
995 1037  
... ... @@ -998,20 +1040,13 @@
998 1040  
999 1041 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1000 1042 {
1001   - int i;
1002   - struct kvm_memslots *slots = kvm_memslots(kvm);
  1043 + struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1003 1044  
1004   - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1005   - struct kvm_memory_slot *memslot = &slots->memslots[i];
  1045 + if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
  1046 + memslot->flags & KVM_MEMSLOT_INVALID)
  1047 + return 0;
1006 1048  
1007   - if (memslot->flags & KVM_MEMSLOT_INVALID)
1008   - continue;
1009   -
1010   - if (gfn >= memslot->base_gfn
1011   - && gfn < memslot->base_gfn + memslot->npages)
1012   - return 1;
1013   - }
1014   - return 0;
  1049 + return 1;
1015 1050 }
1016 1051 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1017 1052