Commit 4c13629f816b1aeff92971a40819b4c25b0622f5

Authored by Jeremy Fitzhardinge
1 parent ef691947d8

xen: make a pile of mmu pvop functions static

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

Showing 2 changed files with 23 additions and 60 deletions Side-by-side Diff

... ... @@ -283,7 +283,7 @@
283 283 *u = *update;
284 284 }
285 285  
286   -void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
  286 +static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
287 287 {
288 288 struct mmu_update u;
289 289  
... ... @@ -303,7 +303,7 @@
303 303 preempt_enable();
304 304 }
305 305  
306   -void xen_set_pmd(pmd_t *ptr, pmd_t val)
  306 +static void xen_set_pmd(pmd_t *ptr, pmd_t val)
307 307 {
308 308 ADD_STATS(pmd_update, 1);
309 309  
... ... @@ -346,7 +346,7 @@
346 346 return true;
347 347 }
348 348  
349   -void xen_set_pte(pte_t *ptep, pte_t pteval)
  349 +static void xen_set_pte(pte_t *ptep, pte_t pteval)
350 350 {
351 351 ADD_STATS(pte_update, 1);
352 352 // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
... ... @@ -355,7 +355,7 @@
355 355 native_set_pte(ptep, pteval);
356 356 }
357 357  
358   -void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
  358 +static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
359 359 pte_t *ptep, pte_t pteval)
360 360 {
361 361 xen_set_pte(ptep, pteval);
... ... @@ -449,7 +449,7 @@
449 449 return val;
450 450 }
451 451  
452   -pteval_t xen_pte_val(pte_t pte)
  452 +static pteval_t xen_pte_val(pte_t pte)
453 453 {
454 454 pteval_t pteval = pte.pte;
455 455  
... ... @@ -466,7 +466,7 @@
466 466 }
467 467 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
468 468  
469   -pgdval_t xen_pgd_val(pgd_t pgd)
  469 +static pgdval_t xen_pgd_val(pgd_t pgd)
470 470 {
471 471 return pte_mfn_to_pfn(pgd.pgd);
472 472 }
... ... @@ -497,7 +497,7 @@
497 497 WARN_ON(pat != 0x0007010600070106ull);
498 498 }
499 499  
500   -pte_t xen_make_pte(pteval_t pte)
  500 +static pte_t xen_make_pte(pteval_t pte)
501 501 {
502 502 phys_addr_t addr = (pte & PTE_PFN_MASK);
503 503  
504 504  
505 505  
... ... @@ -567,20 +567,20 @@
567 567 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
568 568 #endif
569 569  
570   -pgd_t xen_make_pgd(pgdval_t pgd)
  570 +static pgd_t xen_make_pgd(pgdval_t pgd)
571 571 {
572 572 pgd = pte_pfn_to_mfn(pgd);
573 573 return native_make_pgd(pgd);
574 574 }
575 575 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
576 576  
577   -pmdval_t xen_pmd_val(pmd_t pmd)
  577 +static pmdval_t xen_pmd_val(pmd_t pmd)
578 578 {
579 579 return pte_mfn_to_pfn(pmd.pmd);
580 580 }
581 581 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
582 582  
583   -void xen_set_pud_hyper(pud_t *ptr, pud_t val)
  583 +static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
584 584 {
585 585 struct mmu_update u;
586 586  
... ... @@ -600,7 +600,7 @@
600 600 preempt_enable();
601 601 }
602 602  
603   -void xen_set_pud(pud_t *ptr, pud_t val)
  603 +static void xen_set_pud(pud_t *ptr, pud_t val)
604 604 {
605 605 ADD_STATS(pud_update, 1);
606 606  
607 607  
608 608  
609 609  
... ... @@ -617,24 +617,24 @@
617 617 }
618 618  
619 619 #ifdef CONFIG_X86_PAE
620   -void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
  620 +static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
621 621 {
622 622 set_64bit((u64 *)ptep, native_pte_val(pte));
623 623 }
624 624  
625   -void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  625 +static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
626 626 {
627 627 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
628 628 native_pte_clear(mm, addr, ptep);
629 629 }
630 630  
631   -void xen_pmd_clear(pmd_t *pmdp)
  631 +static void xen_pmd_clear(pmd_t *pmdp)
632 632 {
633 633 set_pmd(pmdp, __pmd(0));
634 634 }
635 635 #endif /* CONFIG_X86_PAE */
636 636  
637   -pmd_t xen_make_pmd(pmdval_t pmd)
  637 +static pmd_t xen_make_pmd(pmdval_t pmd)
638 638 {
639 639 pmd = pte_pfn_to_mfn(pmd);
640 640 return native_make_pmd(pmd);
641 641  
... ... @@ -642,13 +642,13 @@
642 642 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
643 643  
644 644 #if PAGETABLE_LEVELS == 4
645   -pudval_t xen_pud_val(pud_t pud)
  645 +static pudval_t xen_pud_val(pud_t pud)
646 646 {
647 647 return pte_mfn_to_pfn(pud.pud);
648 648 }
649 649 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
650 650  
651   -pud_t xen_make_pud(pudval_t pud)
  651 +static pud_t xen_make_pud(pudval_t pud)
652 652 {
653 653 pud = pte_pfn_to_mfn(pud);
654 654  
... ... @@ -656,7 +656,7 @@
656 656 }
657 657 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
658 658  
659   -pgd_t *xen_get_user_pgd(pgd_t *pgd)
  659 +static pgd_t *xen_get_user_pgd(pgd_t *pgd)
660 660 {
661 661 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
662 662 unsigned offset = pgd - pgd_page;
... ... @@ -688,7 +688,7 @@
688 688 * 2. It is always pinned
689 689 * 3. It has no user pagetable attached to it
690 690 */
691   -void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
  691 +static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
692 692 {
693 693 preempt_disable();
694 694  
... ... @@ -701,7 +701,7 @@
701 701 preempt_enable();
702 702 }
703 703  
704   -void xen_set_pgd(pgd_t *ptr, pgd_t val)
  704 +static void xen_set_pgd(pgd_t *ptr, pgd_t val)
705 705 {
706 706 pgd_t *user_ptr = xen_get_user_pgd(ptr);
707 707  
708 708  
... ... @@ -1122,14 +1122,14 @@
1122 1122 spin_unlock(&pgd_lock);
1123 1123 }
1124 1124  
1125   -void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
  1125 +static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1126 1126 {
1127 1127 spin_lock(&next->page_table_lock);
1128 1128 xen_pgd_pin(next);
1129 1129 spin_unlock(&next->page_table_lock);
1130 1130 }
1131 1131  
1132   -void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  1132 +static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1133 1133 {
1134 1134 spin_lock(&mm->page_table_lock);
1135 1135 xen_pgd_pin(mm);
... ... @@ -1216,7 +1216,7 @@
1216 1216 * pagetable because of lazy tlb flushing. This means we need need to
1217 1217 * switch all CPUs off this pagetable before we can unpin it.
1218 1218 */
1219   -void xen_exit_mmap(struct mm_struct *mm)
  1219 +static void xen_exit_mmap(struct mm_struct *mm)
1220 1220 {
1221 1221 get_cpu(); /* make sure we don't move around */
1222 1222 xen_drop_mm_ref(mm);
... ... @@ -15,43 +15,6 @@
15 15  
16 16 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
17 17  
18   -
19   -void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
20   -void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
21   -void xen_exit_mmap(struct mm_struct *mm);
22   -
23   -pteval_t xen_pte_val(pte_t);
24   -pmdval_t xen_pmd_val(pmd_t);
25   -pgdval_t xen_pgd_val(pgd_t);
26   -
27   -pte_t xen_make_pte(pteval_t);
28   -pmd_t xen_make_pmd(pmdval_t);
29   -pgd_t xen_make_pgd(pgdval_t);
30   -
31   -void xen_set_pte(pte_t *ptep, pte_t pteval);
32   -void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
33   - pte_t *ptep, pte_t pteval);
34   -
35   -#ifdef CONFIG_X86_PAE
36   -void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
37   -void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
38   -void xen_pmd_clear(pmd_t *pmdp);
39   -#endif /* CONFIG_X86_PAE */
40   -
41   -void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
42   -void xen_set_pud(pud_t *ptr, pud_t val);
43   -void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
44   -void xen_set_pud_hyper(pud_t *ptr, pud_t val);
45   -
46   -#if PAGETABLE_LEVELS == 4
47   -pudval_t xen_pud_val(pud_t pud);
48   -pud_t xen_make_pud(pudval_t pudval);
49   -void xen_set_pgd(pgd_t *pgdp, pgd_t pgd);
50   -void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd);
51   -#endif
52   -
53   -pgd_t *xen_get_user_pgd(pgd_t *pgd);
54   -
55 18 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
56 19 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
57 20 pte_t *ptep, pte_t pte);