Commit 828554136bbacae6e39fc31b9cd7e7c660ad7530

Authored by Joerg Roedel
Committed by Avi Kivity
1 parent 95c87e2b44

KVM: Remove unnecessary divide operations

This patch converts unnecessary divide and modulo operations
in the KVM large page related code into logical operations.
This allows to convert gfn_t to u64 while not breaking 32
bit builds.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Showing 6 changed files with 15 additions and 11 deletions Side-by-side Diff

arch/ia64/include/asm/kvm_host.h
... ... @@ -235,6 +235,7 @@
235 235 #define KVM_REQ_PTC_G 32
236 236 #define KVM_REQ_RESUME 33
237 237  
  238 +#define KVM_HPAGE_GFN_SHIFT(x) 0
238 239 #define KVM_NR_PAGE_SIZES 1
239 240 #define KVM_PAGES_PER_HPAGE(x) 1
240 241  
arch/powerpc/include/asm/kvm_host.h
... ... @@ -35,6 +35,7 @@
35 35 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
36 36  
37 37 /* We don't currently support large pages. */
  38 +#define KVM_HPAGE_GFN_SHIFT(x) 0
38 39 #define KVM_NR_PAGE_SIZES 1
39 40 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
40 41  
arch/s390/include/asm/kvm_host.h
... ... @@ -41,7 +41,8 @@
41 41 } __attribute__((packed));
42 42  
43 43 #define KVM_NR_PAGE_SIZES 2
44   -#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8)
  44 +#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
  45 +#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
45 46 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
46 47 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
47 48 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
arch/x86/include/asm/kvm_host.h
... ... @@ -44,7 +44,8 @@
44 44  
45 45 /* KVM Hugepage definitions for x86 */
46 46 #define KVM_NR_PAGE_SIZES 3
47   -#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
  47 +#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
  48 +#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
48 49 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
49 50 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
50 51 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
... ... @@ -423,8 +423,8 @@
423 423 {
424 424 unsigned long idx;
425 425  
426   - idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
427   - (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
  426 + idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
  427 + (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
428 428 return &slot->lpage_info[level - 2][idx].write_count;
429 429 }
430 430  
... ... @@ -528,8 +528,8 @@
528 528 if (likely(level == PT_PAGE_TABLE_LEVEL))
529 529 return &slot->rmap[gfn - slot->base_gfn];
530 530  
531   - idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
532   - (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
  531 + idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
  532 + (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
533 533  
534 534 return &slot->lpage_info[level - 2][idx].rmap_pde;
535 535 }
... ... @@ -626,9 +626,9 @@
626 626 if (new.lpage_info[i])
627 627 continue;
628 628  
629   - lpages = 1 + (base_gfn + npages - 1) /
630   - KVM_PAGES_PER_HPAGE(level);
631   - lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
  629 + lpages = 1 + ((base_gfn + npages - 1)
  630 + >> KVM_HPAGE_GFN_SHIFT(level));
  631 + lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
632 632  
633 633 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
634 634  
635 635  
... ... @@ -638,9 +638,9 @@
638 638 memset(new.lpage_info[i], 0,
639 639 lpages * sizeof(*new.lpage_info[i]));
640 640  
641   - if (base_gfn % KVM_PAGES_PER_HPAGE(level))
  641 + if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
642 642 new.lpage_info[i][0].write_count = 1;
643   - if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
  643 + if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
644 644 new.lpage_info[i][lpages - 1].write_count = 1;
645 645 ugfn = new.userspace_addr >> PAGE_SHIFT;
646 646 /*