Commit 642ea3ed9c652bb9e105e55afcb9ad15b040f71f

Authored by Sam Ravnborg
Committed by David S. Miller
1 parent 5471fa6265

sparc32: drop btfixup in pgalloc_32.h

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 63 additions and 81 deletions Side-by-side Diff

arch/sparc/include/asm/pgalloc_32.h
... ... @@ -4,8 +4,11 @@
4 4 #include <linux/kernel.h>
5 5 #include <linux/sched.h>
6 6  
7   -#include <asm/page.h>
  7 +#include <asm/pgtsrmmu.h>
8 8 #include <asm/btfixup.h>
  9 +#include <asm/pgtable.h>
  10 +#include <asm/vaddrs.h>
  11 +#include <asm/page.h>
9 12  
10 13 struct page;
11 14  
... ... @@ -15,6 +18,10 @@
15 18 unsigned long pgtable_cache_sz;
16 19 unsigned long pgd_cache_sz;
17 20 } pgt_quicklists;
  21 +
  22 +unsigned long srmmu_get_nocache(int size, int align);
  23 +void srmmu_free_nocache(unsigned long vaddr, int size);
  24 +
18 25 #define pgd_quicklist (pgt_quicklists.pgd_cache)
19 26 #define pmd_quicklist ((unsigned long *)0)
20 27 #define pte_quicklist (pgt_quicklists.pte_cache)
21 28  
22 29  
23 30  
24 31  
25 32  
26 33  
27 34  
28 35  
29 36  
... ... @@ -23,44 +30,62 @@
23 30  
24 31 #define check_pgt_cache() do { } while (0)
25 32  
26   -BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
27   -#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
  33 +pgd_t *get_pgd_fast(void);
  34 +static inline void free_pgd_fast(pgd_t *pgd)
  35 +{
  36 + srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
  37 +}
28 38  
29   -BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
30   -#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
31   -
32 39 #define pgd_free(mm, pgd) free_pgd_fast(pgd)
33 40 #define pgd_alloc(mm) get_pgd_fast()
34 41  
35   -BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
36   -#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
  42 +static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
  43 +{
  44 + unsigned long pa = __nocache_pa((unsigned long)pmdp);
  45 +
  46 + set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4)));
  47 +}
  48 +
37 49 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
38 50  
39   -BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
40   -#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
  51 +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
  52 + unsigned long address)
  53 +{
  54 + return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
  55 + SRMMU_PMD_TABLE_SIZE);
  56 +}
41 57  
42   -BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
43   -#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
  58 +static inline void free_pmd_fast(pmd_t * pmd)
  59 +{
  60 + srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
  61 +}
44 62  
45 63 #define pmd_free(mm, pmd) free_pmd_fast(pmd)
46 64 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
47 65  
48   -BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
49   -#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
  66 +void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
50 67 #define pmd_pgtable(pmd) pmd_page(pmd)
51   -BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
52   -#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
53 68  
54   -BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
55   -#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
56   -BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
57   -#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
  69 +void pmd_set(pmd_t *pmdp, pte_t *ptep);
  70 +#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
58 71  
59   -BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
60   -#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
  72 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
61 73  
62   -BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
63   -#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
  74 +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  75 + unsigned long address)
  76 +{
  77 + return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
  78 +}
  79 +
  80 +
  81 +static inline void free_pte_fast(pte_t *pte)
  82 +{
  83 + srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
  84 +}
  85 +
  86 +#define pte_free_kernel(mm, pte) free_pte_fast(pte)
  87 +
  88 +void pte_free(struct mm_struct * mm, pgtable_t pte);
64 89 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
65 90  
66 91 #endif /* _SPARC_PGALLOC_H */
arch/sparc/mm/srmmu.c
... ... @@ -132,10 +132,7 @@
132 132 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
133 133 { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
134 134  
135   -static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
136   -{ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
137   -
138   -static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
  135 +void pmd_set(pmd_t *pmdp, pte_t *ptep)
139 136 {
140 137 unsigned long ptp; /* Physical address, shifted right by 4 */
141 138 int i;
... ... @@ -147,7 +144,7 @@
147 144 }
148 145 }
149 146  
150   -static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
  147 +void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
151 148 {
152 149 unsigned long ptp; /* Physical address, shifted right by 4 */
153 150 int i;
... ... @@ -232,7 +229,7 @@
232 229 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
233 230 }
234 231  
235   -static unsigned long srmmu_get_nocache(int size, int align)
  232 +unsigned long srmmu_get_nocache(int size, int align)
236 233 {
237 234 unsigned long tmp;
238 235  
... ... @@ -244,7 +241,7 @@
244 241 return tmp;
245 242 }
246 243  
247   -static void srmmu_free_nocache(unsigned long vaddr, int size)
  244 +void srmmu_free_nocache(unsigned long vaddr, int size)
248 245 {
249 246 int offset;
250 247  
... ... @@ -354,7 +351,7 @@
354 351 flush_tlb_all();
355 352 }
356 353  
357   -static inline pgd_t *srmmu_get_pgd_fast(void)
  354 +pgd_t *get_pgd_fast(void)
358 355 {
359 356 pgd_t *pgd = NULL;
360 357  
... ... @@ -369,21 +366,6 @@
369 366 return pgd;
370 367 }
371 368  
372   -static void srmmu_free_pgd_fast(pgd_t *pgd)
373   -{
374   - srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
375   -}
376   -
377   -static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
378   -{
379   - return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
380   -}
381   -
382   -static void srmmu_pmd_free(pmd_t * pmd)
383   -{
384   - srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
385   -}
386   -
387 369 /*
388 370 * Hardware needs alignment to 256 only, but we align to whole page size
389 371 * to reduce fragmentation problems due to the buddy principle.
390 372  
391 373  
392 374  
393 375  
... ... @@ -392,32 +374,20 @@
392 374 * Alignments up to the page size are the same for physical and virtual
393 375 * addresses of the nocache area.
394 376 */
395   -static pte_t *
396   -srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  377 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
397 378 {
398   - return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
399   -}
400   -
401   -static pgtable_t
402   -srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
403   -{
404 379 unsigned long pte;
405 380 struct page *page;
406 381  
407   - if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
  382 + if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
408 383 return NULL;
409 384 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
410 385 pgtable_page_ctor(page);
411 386 return page;
412 387 }
413 388  
414   -static void srmmu_free_pte_fast(pte_t *pte)
  389 +void pte_free(struct mm_struct *mm, pgtable_t pte)
415 390 {
416   - srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
417   -}
418   -
419   -static void srmmu_pte_free(pgtable_t pte)
420   -{
421 391 unsigned long p;
422 392  
423 393 pgtable_page_dtor(pte);
... ... @@ -977,7 +947,7 @@
977 947 if (pmdp == NULL)
978 948 early_pgtable_allocfail("pmd");
979 949 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
980   - srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
  950 + pgd_set(__nocache_fix(pgdp), pmdp);
981 951 }
982 952 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
983 953 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
... ... @@ -985,7 +955,7 @@
985 955 if (ptep == NULL)
986 956 early_pgtable_allocfail("pte");
987 957 memset(__nocache_fix(ptep), 0, PTE_SIZE);
988   - srmmu_pmd_set(__nocache_fix(pmdp), ptep);
  958 + pmd_set(__nocache_fix(pmdp), ptep);
989 959 }
990 960 if (start > (0xffffffffUL - PMD_SIZE))
991 961 break;
... ... @@ -1007,7 +977,7 @@
1007 977 if (pmdp == NULL)
1008 978 early_pgtable_allocfail("pmd");
1009 979 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
1010   - srmmu_pgd_set(pgdp, pmdp);
  980 + pgd_set(pgdp, pmdp);
1011 981 }
1012 982 pmdp = srmmu_pmd_offset(pgdp, start);
1013 983 if(srmmu_pmd_none(*pmdp)) {
... ... @@ -1016,7 +986,7 @@
1016 986 if (ptep == NULL)
1017 987 early_pgtable_allocfail("pte");
1018 988 memset(ptep, 0, PTE_SIZE);
1019   - srmmu_pmd_set(pmdp, ptep);
  989 + pmd_set(pmdp, ptep);
1020 990 }
1021 991 if (start > (0xffffffffUL - PMD_SIZE))
1022 992 break;
... ... @@ -1073,7 +1043,7 @@
1073 1043 if (pmdp == NULL)
1074 1044 early_pgtable_allocfail("pmd");
1075 1045 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1076   - srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
  1046 + pgd_set(__nocache_fix(pgdp), pmdp);
1077 1047 }
1078 1048 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1079 1049 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
... ... @@ -1082,7 +1052,7 @@
1082 1052 if (ptep == NULL)
1083 1053 early_pgtable_allocfail("pte");
1084 1054 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1085   - srmmu_pmd_set(__nocache_fix(pmdp), ptep);
  1055 + pmd_set(__nocache_fix(pmdp), ptep);
1086 1056 }
1087 1057 if(what == 1) {
1088 1058 /*
1089 1059  
... ... @@ -2047,22 +2017,9 @@
2047 2017  
2048 2018 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
2049 2019  
2050   - BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
2051   - BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
2052   - BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
2053   -
2054 2020 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2055 2021 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2056 2022 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2057   -
2058   - BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
2059   - BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
2060   - BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2061   - BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
2062   - BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
2063   - BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
2064   - BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
2065   - BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
2066 2023  
2067 2024 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2068 2025 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);