Commit 2f569afd9ced9ebec9a6eb3dbf6f83429be0a7b4

Authored by Martin Schwidefsky
Committed by Linus Torvalds
1 parent 13214adf73

CONFIG_HIGHPTE vs. sub-page page tables.

Background: I've implemented 1K/2K page tables for s390.  These sub-page
page tables are required to properly support the s390 virtualization
instruction with KVM.  The SIE instruction requires that the page tables
have 256 page table entries (pte) followed by 256 page status table entries
(pgste).  The pgstes are only required if the process is using the SIE
instruction.  The pgstes are updated by the hardware and by the hypervisor
for a number of reasons, one of them is dirty and reference bit tracking.
To avoid wasting memory the standard pte table allocation should return
1K/2K (31/64 bit) and 2K/4K if the process is using SIE.

Problem: Page size on s390 is 4K, page table size is 1K or 2K.  That means
the s390 version for pte_alloc_one cannot return a pointer to a struct
page.  Trouble is that with the CONFIG_HIGHPTE feature on x86 pte_alloc_one
cannot return a pointer to a pte either, since that would require more than
32 bit for the return value of pte_alloc_one (and the pte * would not be
accessible since its not kmapped).

Solution: The only solution I found to this dilemma is a new typedef: a
pgtable_t.  For s390 pgtable_t will be a (pte *) - to be introduced with a
later patch.  For everybody else it will be a (struct page *).  The
additional problem with the initialization of the ptl lock and the
NR_PAGETABLE accounting is solved with a constructor pgtable_page_ctor and
a destructor pgtable_page_dtor.  The page table allocation and free
functions need to call these two whenever a page table page is allocated or
freed.  pmd_populate will get a pgtable_t instead of a struct page pointer.
 To get the pgtable_t back from a pmd entry that has been installed with
pmd_populate a new function pmd_pgtable is added.  It replaces the pmd_page
call in free_pte_range and apply_to_pte_range.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 53 changed files with 326 additions and 132 deletions Side-by-side Diff

arch/frv/mm/pgalloc.c
... ... @@ -28,7 +28,7 @@
28 28 return pte;
29 29 }
30 30  
31   -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  31 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
32 32 {
33 33 struct page *page;
34 34  
35 35  
... ... @@ -37,9 +37,11 @@
37 37 #else
38 38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
39 39 #endif
40   - if (page)
  40 + if (page) {
41 41 clear_highpage(page);
42   - flush_dcache_page(page);
  42 + pgtable_page_ctor(page);
  43 + flush_dcache_page(page);
  44 + }
43 45 return page;
44 46 }
45 47  
arch/powerpc/mm/pgtable_32.c
... ... @@ -107,19 +107,20 @@
107 107 return pte;
108 108 }
109 109  
110   -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  110 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
111 111 {
112 112 struct page *ptepage;
113 113  
114 114 #ifdef CONFIG_HIGHPTE
115   - gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
  115 + gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
116 116 #else
117   - gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
  117 + gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
118 118 #endif
119 119  
120 120 ptepage = alloc_pages(flags, 0);
121   - if (ptepage)
122   - clear_highpage(ptepage);
  121 + if (!ptepage)
  122 + return NULL;
  123 + pgtable_page_ctor(ptepage);
123 124 return ptepage;
124 125 }
125 126  
126 127  
... ... @@ -131,11 +132,12 @@
131 132 free_page((unsigned long)pte);
132 133 }
133 134  
134   -void pte_free(struct mm_struct *mm, struct page *ptepage)
  135 +void pte_free(struct mm_struct *mm, pgtable_t ptepage)
135 136 {
136 137 #ifdef CONFIG_SMP
137 138 hash_page_sync();
138 139 #endif
  140 + pgtable_page_dtor(ptepage);
139 141 __free_page(ptepage);
140 142 }
141 143  
arch/ppc/mm/pgtable.c
... ... @@ -95,7 +95,7 @@
95 95 return pte;
96 96 }
97 97  
98   -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  98 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
99 99 {
100 100 struct page *ptepage;
101 101  
102 102  
... ... @@ -106,8 +106,10 @@
106 106 #endif
107 107  
108 108 ptepage = alloc_pages(flags, 0);
109   - if (ptepage)
  109 + if (ptepage) {
110 110 clear_highpage(ptepage);
  111 + pgtable_page_ctor(ptepage);
  112 + }
111 113 return ptepage;
112 114 }
113 115  
114 116  
... ... @@ -119,11 +121,12 @@
119 121 free_page((unsigned long)pte);
120 122 }
121 123  
122   -void pte_free(struct mm_struct *mm, struct page *ptepage)
  124 +void pte_free(struct mm_struct *mm, pgtable_t ptepage)
123 125 {
124 126 #ifdef CONFIG_SMP
125 127 hash_page_sync();
126 128 #endif
  129 + pgtable_page_dtor(ptepage);
127 130 __free_page(ptepage);
128 131 }
129 132  
arch/s390/mm/pgtable.c
... ... @@ -78,6 +78,7 @@
78 78 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
79 79 page->index = (addr_t) table;
80 80 }
  81 + pgtable_page_ctor(page);
81 82 table = (unsigned long *) page_to_phys(page);
82 83 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
83 84 return table;
... ... @@ -87,6 +88,7 @@
87 88 {
88 89 unsigned long *shadow = get_shadow_pte(table);
89 90  
  91 + pgtable_page_dtor(virt_to_page(table));
90 92 if (shadow)
91 93 free_page((unsigned long) shadow);
92 94 free_page((unsigned long) table);
arch/sparc/mm/srmmu.c
... ... @@ -489,14 +489,17 @@
489 489 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
490 490 }
491 491  
492   -static struct page *
  492 +static pgtable_t
493 493 srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
494 494 {
495 495 unsigned long pte;
  496 + struct page *page;
496 497  
497 498 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
498 499 return NULL;
499   - return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
  500 + page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
  501 + pgtable_page_ctor(page);
  502 + return page;
500 503 }
501 504  
502 505 static void srmmu_free_pte_fast(pte_t *pte)
503 506  
... ... @@ -504,10 +507,11 @@
504 507 srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
505 508 }
506 509  
507   -static void srmmu_pte_free(struct page *pte)
  510 +static void srmmu_pte_free(pgtable_t pte)
508 511 {
509 512 unsigned long p;
510 513  
  514 + pgtable_page_dtor(pte);
511 515 p = (unsigned long)page_address(pte); /* Cached address (for test) */
512 516 if (p == 0)
513 517 BUG();
arch/sparc/mm/sun4c.c
... ... @@ -1947,12 +1947,17 @@
1947 1947 return pte;
1948 1948 }
1949 1949  
1950   -static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
  1950 +static pgtable_t sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
1951 1951 {
1952   - pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address);
  1952 + pte_t *pte;
  1953 + struct page *page;
  1954 +
  1955 + pte = sun4c_pte_alloc_one_kernel(mm, address);
1953 1956 if (pte == NULL)
1954 1957 return NULL;
1955   - return virt_to_page(pte);
  1958 + page = virt_to_page(pte);
  1959 + pgtable_page_ctor(page);
  1960 + return page;
1956 1961 }
1957 1962  
1958 1963 static inline void sun4c_free_pte_fast(pte_t *pte)
1959 1964  
... ... @@ -1962,8 +1967,9 @@
1962 1967 pgtable_cache_size++;
1963 1968 }
1964 1969  
1965   -static void sun4c_pte_free(struct page *pte)
  1970 +static void sun4c_pte_free(pgtable_t pte)
1966 1971 {
  1972 + pgtable_page_dtor(pte);
1967 1973 sun4c_free_pte_fast(page_address(pte));
1968 1974 }
1969 1975  
arch/um/kernel/mem.c
... ... @@ -354,11 +354,13 @@
354 354 return pte;
355 355 }
356 356  
357   -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  357 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
358 358 {
359 359 struct page *pte;
360 360  
361 361 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  362 + if (pte)
  363 + pgtable_page_ctor(pte);
362 364 return pte;
363 365 }
364 366  
arch/x86/mm/pgtable_32.c
... ... @@ -183,7 +183,7 @@
183 183 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
184 184 }
185 185  
186   -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  186 +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
187 187 {
188 188 struct page *pte;
189 189  
... ... @@ -192,6 +192,8 @@
192 192 #else
193 193 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
194 194 #endif
  195 + if (pte)
  196 + pgtable_page_ctor(pte);
195 197 return pte;
196 198 }
197 199  
... ... @@ -365,6 +367,7 @@
365 367  
366 368 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
367 369 {
  370 + pgtable_page_dtor(pte);
368 371 paravirt_release_pt(page_to_pfn(pte));
369 372 tlb_remove_page(tlb, pte);
370 373 }
include/asm-alpha/page.h
... ... @@ -62,6 +62,8 @@
62 62  
63 63 #endif /* STRICT_MM_TYPECHECKS */
64 64  
  65 +typedef struct page *pgtable_t;
  66 +
65 67 #ifdef USE_48_BIT_KSEG
66 68 #define PAGE_OFFSET 0xffff800000000000UL
67 69 #else
include/asm-alpha/pgalloc.h
... ... @@ -11,10 +11,11 @@
11 11 */
12 12  
13 13 static inline void
14   -pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
  14 +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
15 15 {
16 16 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
17 17 }
  18 +#define pmd_pgtable(pmd) pmd_page(pmd)
18 19  
19 20 static inline void
20 21 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
21 22  
22 23  
23 24  
... ... @@ -57,18 +58,23 @@
57 58 free_page((unsigned long)pte);
58 59 }
59 60  
60   -static inline struct page *
61   -pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  61 +static inline pgtable_t
  62 +pte_alloc_one(struct mm_struct *mm, unsigned long address)
62 63 {
63   - pte_t *pte = pte_alloc_one_kernel(mm, addr);
64   - if (pte)
65   - return virt_to_page(pte);
66   - return NULL;
  64 + pte_t *pte = pte_alloc_one_kernel(mm, address);
  65 + struct page *page;
  66 +
  67 + if (!pte)
  68 + return NULL;
  69 + page = virt_to_page(pte);
  70 + pgtable_page_ctor(page);
  71 + return page;
67 72 }
68 73  
69 74 static inline void
70   -pte_free(struct mm_struct *mm, struct page *page)
  75 +pte_free(struct mm_struct *mm, pgtable_t page)
71 76 {
  77 + pgtable_page_dtor(page);
72 78 __free_page(page);
73 79 }
74 80  
include/asm-arm/page.h
... ... @@ -171,6 +171,8 @@
171 171  
172 172 #endif /* STRICT_MM_TYPECHECKS */
173 173  
  174 +typedef struct page *pgtable_t;
  175 +
174 176 #endif /* CONFIG_MMU */
175 177  
176 178 #include <asm/memory.h>
include/asm-arm/pgalloc.h
... ... @@ -66,7 +66,7 @@
66 66 return pte;
67 67 }
68 68  
69   -static inline struct page *
  69 +static inline pgtable_t
70 70 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
71 71 {
72 72 struct page *pte;
... ... @@ -75,6 +75,7 @@
75 75 if (pte) {
76 76 void *page = page_address(pte);
77 77 clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
  78 + pgtable_page_ctor(pte);
78 79 }
79 80  
80 81 return pte;
81 82  
... ... @@ -91,8 +92,9 @@
91 92 }
92 93 }
93 94  
94   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  95 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
95 96 {
  97 + pgtable_page_dtor(pte);
96 98 __free_page(pte);
97 99 }
98 100  
99 101  
... ... @@ -123,10 +125,11 @@
123 125 }
124 126  
125 127 static inline void
126   -pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
  128 +pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
127 129 {
128 130 __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
129 131 }
  132 +#define pmd_pgtable(pmd) pmd_page(pmd)
130 133  
131 134 #endif /* CONFIG_MMU */
132 135  
include/asm-avr32/page.h
... ... @@ -34,6 +34,7 @@
34 34 typedef struct { unsigned long pte; } pte_t;
35 35 typedef struct { unsigned long pgd; } pgd_t;
36 36 typedef struct { unsigned long pgprot; } pgprot_t;
  37 +typedef struct page *pgtable_t;
37 38  
38 39 #define pte_val(x) ((x).pte)
39 40 #define pgd_val(x) ((x).pgd)
include/asm-avr32/pgalloc.h
... ... @@ -17,10 +17,11 @@
17 17 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
18 18  
19 19 static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
20   - struct page *pte)
  20 + pgtable_t pte)
21 21 {
22 22 set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
23 23 }
  24 +#define pmd_pgtable(pmd) pmd_page(pmd)
24 25  
25 26 /*
26 27 * Allocate and free page tables
... ... @@ -51,7 +52,9 @@
51 52 struct page *pte;
52 53  
53 54 pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
54   -
  55 + if (!pte)
  56 + return NULL;
  57 + pgtable_page_ctor(pte);
55 58 return pte;
56 59 }
57 60  
58 61  
59 62  
... ... @@ -60,12 +63,17 @@
60 63 free_page((unsigned long)pte);
61 64 }
62 65  
63   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  66 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
64 67 {
  68 + pgtable_page_dtor(pte);
65 69 __free_page(pte);
66 70 }
67 71  
68   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  72 +#define __pte_free_tlb(tlb,pte) \
  73 +do { \
  74 + pgtable_page_dtor(pte); \
  75 + tlb_remove_page((tlb), pte); \
  76 +} while (0)
69 77  
70 78 #define check_pgt_cache() do { } while(0)
71 79  
include/asm-cris/page.h
... ... @@ -26,6 +26,7 @@
26 26 typedef struct { unsigned long pte; } pte_t;
27 27 typedef struct { unsigned long pgd; } pgd_t;
28 28 typedef struct { unsigned long pgprot; } pgprot_t;
  29 +typedef struct page *pgtable_t;
29 30 #endif
30 31  
31 32 #define pte_val(x) ((x).pte)
include/asm-cris/pgalloc.h
... ... @@ -6,6 +6,7 @@
6 6  
7 7 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
8 8 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte))
  9 +#define pmd_pgtable(pmd) pmd_page(pmd)
9 10  
10 11 /*
11 12 * Allocate and free page tables.
12 13  
... ... @@ -27,10 +28,11 @@
27 28 return pte;
28 29 }
29 30  
30   -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  31 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
31 32 {
32 33 struct page *pte;
33 34 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  35 + pgtable_page_ctor(pte);
34 36 return pte;
35 37 }
36 38  
37 39  
38 40  
... ... @@ -39,13 +41,17 @@
39 41 free_page((unsigned long)pte);
40 42 }
41 43  
42   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  44 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
43 45 {
  46 + pgtable_page_dtor(pte);
44 47 __free_page(pte);
45 48 }
46 49  
47   -
48   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  50 +#define __pte_free_tlb(tlb,pte) \
  51 +do { \
  52 + pgtable_page_dtor(pte); \
  53 + tlb_remove_page((tlb), pte); \
  54 +} while (0)
49 55  
50 56 #define check_pgt_cache() do { } while (0)
51 57  
include/asm-frv/page.h
... ... @@ -25,6 +25,7 @@
25 25 typedef struct { pmd_t pue[1]; } pud_t;
26 26 typedef struct { pud_t pge[1]; } pgd_t;
27 27 typedef struct { unsigned long pgprot; } pgprot_t;
  28 +typedef struct page *pgtable_t;
28 29  
29 30 #define pte_val(x) ((x).pte)
30 31 #define pmd_val(x) ((x).ste[0])
include/asm-frv/pgalloc.h
... ... @@ -25,6 +25,7 @@
25 25 do { \
26 26 __set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE); \
27 27 } while(0)
  28 +#define pmd_pgtable(pmd) pmd_page(pmd)
28 29  
29 30 /*
30 31 * Allocate and free page tables.
31 32  
32 33  
33 34  
... ... @@ -35,19 +36,24 @@
35 36  
36 37 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
37 38  
38   -extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
  39 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
39 40  
40 41 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
41 42 {
42 43 free_page((unsigned long)pte);
43 44 }
44 45  
45   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  46 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
46 47 {
  48 + pgtable_page_dtor(pte);
47 49 __free_page(pte);
48 50 }
49 51  
50   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  52 +#define __pte_free_tlb(tlb,pte) \
  53 +do { \
  54 + pgtable_page_dtor(pte); \
  55 + tlb_remove_page((tlb),(pte)); \
  56 +} while (0)
51 57  
52 58 /*
53 59 * allocating and freeing a pmd is trivial: the 1-entry pmd is
include/asm-ia64/page.h
... ... @@ -185,6 +185,7 @@
185 185 #endif
186 186 typedef struct { unsigned long pgd; } pgd_t;
187 187 typedef struct { unsigned long pgprot; } pgprot_t;
  188 + typedef struct page *pgtable_t;
188 189  
189 190 # define pte_val(x) ((x).pte)
190 191 # define pmd_val(x) ((x).pmd)
... ... @@ -206,6 +207,7 @@
206 207 typedef unsigned long pmd_t;
207 208 typedef unsigned long pgd_t;
208 209 typedef unsigned long pgprot_t;
  210 + typedef struct page *pgtable_t;
209 211 # endif
210 212  
211 213 # define pte_val(x) (x)
include/asm-ia64/pgalloc.h
... ... @@ -70,10 +70,11 @@
70 70 #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
71 71  
72 72 static inline void
73   -pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
  73 +pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
74 74 {
75 75 pmd_val(*pmd_entry) = page_to_phys(pte);
76 76 }
  77 +#define pmd_pgtable(pmd) pmd_page(pmd)
77 78  
78 79 static inline void
79 80 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
80 81  
... ... @@ -81,11 +82,17 @@
81 82 pmd_val(*pmd_entry) = __pa(pte);
82 83 }
83 84  
84   -static inline struct page *pte_alloc_one(struct mm_struct *mm,
85   - unsigned long addr)
  85 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
86 86 {
87   - void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
88   - return pg ? virt_to_page(pg) : NULL;
  87 + struct page *page;
  88 + void *pg;
  89 +
  90 + pg = quicklist_alloc(0, GFP_KERNEL, NULL);
  91 + if (!pg)
  92 + return NULL;
  93 + page = virt_to_page(pg);
  94 + pgtable_page_ctor(page);
  95 + return page;
89 96 }
90 97  
91 98 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
92 99  
... ... @@ -94,8 +101,9 @@
94 101 return quicklist_alloc(0, GFP_KERNEL, NULL);
95 102 }
96 103  
97   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  104 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
98 105 {
  106 + pgtable_page_dtor(pte);
99 107 quicklist_free_page(0, NULL, pte);
100 108 }
101 109  
include/asm-m32r/page.h
... ... @@ -28,6 +28,7 @@
28 28 #define PTE_MASK PAGE_MASK
29 29  
30 30 typedef struct { unsigned long pgprot; } pgprot_t;
  31 +typedef struct page *pgtable_t;
31 32  
32 33 #define pmd_val(x) ((x).pmd)
33 34 #define pgd_val(x) ((x).pgd)
include/asm-m32r/pgalloc.h
... ... @@ -9,10 +9,11 @@
9 9 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
10 10  
11 11 static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
12   - struct page *pte)
  12 + pgtable_t pte)
13 13 {
14 14 set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
15 15 }
  16 +#define pmd_pgtable(pmd) pmd_page(pmd)
16 17  
17 18 /*
18 19 * Allocate and free page tables.
19 20  
... ... @@ -37,12 +38,12 @@
37 38 return pte;
38 39 }
39 40  
40   -static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
  41 +static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm,
41 42 unsigned long address)
42 43 {
43 44 struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
44 45  
45   -
  46 + pgtable_page_ctor(pte);
46 47 return pte;
47 48 }
48 49  
49 50  
... ... @@ -51,8 +52,9 @@
51 52 free_page((unsigned long)pte);
52 53 }
53 54  
54   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  55 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
55 56 {
  57 + pgtable_page_dtor(pte);
56 58 __free_page(pte);
57 59 }
58 60  
include/asm-m68k/motorola_pgalloc.h
... ... @@ -7,7 +7,6 @@
7 7 extern pmd_t *get_pointer_table(void);
8 8 extern int free_pointer_table(pmd_t *);
9 9  
10   -
11 10 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
12 11 {
13 12 pte_t *pte;
... ... @@ -28,7 +27,7 @@
28 27 free_page((unsigned long) pte);
29 28 }
30 29  
31   -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  30 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
32 31 {
33 32 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
34 33 pte_t *pte;
35 34  
36 35  
37 36  
38 37  
... ... @@ -43,19 +42,21 @@
43 42 nocache_page(pte);
44 43 }
45 44 kunmap(pte);
46   -
  45 + pgtable_page_ctor(page);
47 46 return page;
48 47 }
49 48  
50   -static inline void pte_free(struct mm_struct *mm, struct page *page)
  49 +static inline void pte_free(struct mm_struct *mm, pgtable_t page)
51 50 {
  51 + pgtable_page_dtor(page);
52 52 cache_page(kmap(page));
53 53 kunmap(page);
54 54 __free_page(page);
55 55 }
56 56  
57   -static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
  57 +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
58 58 {
  59 + pgtable_page_dtor(page);
59 60 cache_page(kmap(page));
60 61 kunmap(page);
61 62 __free_page(page);
62 63  
... ... @@ -94,10 +95,11 @@
94 95 pmd_set(pmd, pte);
95 96 }
96 97  
97   -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  98 +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
98 99 {
99 100 pmd_set(pmd, page_address(page));
100 101 }
  102 +#define pmd_pgtable(pmd) pmd_page(pmd)
101 103  
102 104 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
103 105 {
include/asm-m68k/page.h
... ... @@ -91,6 +91,7 @@
91 91 typedef struct { unsigned long pmd[16]; } pmd_t;
92 92 typedef struct { unsigned long pgd; } pgd_t;
93 93 typedef struct { unsigned long pgprot; } pgprot_t;
  94 +typedef struct page *pgtable_t;
94 95  
95 96 #define pte_val(x) ((x).pte)
96 97 #define pmd_val(x) ((&x)->pmd[0])
include/asm-m68k/sun3_pgalloc.h
... ... @@ -26,12 +26,17 @@
26 26 free_page((unsigned long) pte);
27 27 }
28 28  
29   -static inline void pte_free(struct mm_struct *mm, struct page *page)
  29 +static inline void pte_free(struct mm_struct *mm, pgtable_t page)
30 30 {
  31 + pgtable_page_dtor(page);
31 32 __free_page(page);
32 33 }
33 34  
34   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  35 +#define __pte_free_tlb(tlb,pte) \
  36 +do { \
  37 + pgtable_page_dtor(pte); \
  38 + tlb_remove_page((tlb), pte); \
  39 +} while (0)
35 40  
36 41 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
37 42 unsigned long address)
... ... @@ -45,8 +50,8 @@
45 50 return (pte_t *) (page);
46 51 }
47 52  
48   -static inline struct page *pte_alloc_one(struct mm_struct *mm,
49   - unsigned long address)
  53 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  54 + unsigned long address)
50 55 {
51 56 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
52 57  
... ... @@ -54,6 +59,7 @@
54 59 return NULL;
55 60  
56 61 clear_highpage(page);
  62 + pgtable_page_ctor(page);
57 63 return page;
58 64  
59 65 }
60 66  
... ... @@ -63,10 +69,11 @@
63 69 pmd_val(*pmd) = __pa((unsigned long)pte);
64 70 }
65 71  
66   -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  72 +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
67 73 {
68 74 pmd_val(*pmd) = __pa((unsigned long)page_address(page));
69 75 }
  76 +#define pmd_pgtable(pmd) pmd_page(pmd)
70 77  
71 78 /*
72 79 * allocating and freeing a pmd is trivial: the 1-entry pmd is
include/asm-mips/page.h
... ... @@ -90,6 +90,7 @@
90 90 #define pte_val(x) ((x).pte)
91 91 #define __pte(x) ((pte_t) { (x) } )
92 92 #endif
  93 +typedef struct page *pgtable_t;
93 94  
94 95 /*
95 96 * For 3-level pagetables we defines these ourselves, for 2-level the
include/asm-mips/pgalloc.h
... ... @@ -20,10 +20,11 @@
20 20 }
21 21  
22 22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
23   - struct page *pte)
  23 + pgtable_t pte)
24 24 {
25 25 set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
26 26 }
  27 +#define pmd_pgtable(pmd) pmd_page(pmd)
27 28  
28 29 /*
29 30 * Initialize a new pmd table with invalid pointers.
30 31  
... ... @@ -79,9 +80,10 @@
79 80 struct page *pte;
80 81  
81 82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
82   - if (pte)
  83 + if (pte) {
83 84 clear_highpage(pte);
84   -
  85 + pgtable_page_ctor(pte);
  86 + }
85 87 return pte;
86 88 }
87 89  
88 90  
89 91  
... ... @@ -90,12 +92,17 @@
90 92 free_pages((unsigned long)pte, PTE_ORDER);
91 93 }
92 94  
93   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  95 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
94 96 {
  97 + pgtable_page_dtor(pte);
95 98 __free_pages(pte, PTE_ORDER);
96 99 }
97 100  
98   -#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
  101 +#define __pte_free_tlb(tlb,pte) \
  102 +do { \
  103 + pgtable_page_dtor(pte); \
  104 + tlb_remove_page((tlb), pte); \
  105 +} while (0)
99 106  
100 107 #ifdef CONFIG_32BIT
101 108  
include/asm-parisc/page.h
... ... @@ -91,6 +91,7 @@
91 91  
92 92 #endif /* STRICT_MM_TYPECHECKS */
93 93  
  94 +typedef struct page *pgtable_t;
94 95  
95 96 typedef struct __physmem_range {
96 97 unsigned long start_pfn;
include/asm-parisc/pgalloc.h
... ... @@ -115,11 +115,14 @@
115 115  
116 116 #define pmd_populate(mm, pmd, pte_page) \
117 117 pmd_populate_kernel(mm, pmd, page_address(pte_page))
  118 +#define pmd_pgtable(pmd) pmd_page(pmd)
118 119  
119   -static inline struct page *
  120 +static inline pgtable_t
120 121 pte_alloc_one(struct mm_struct *mm, unsigned long address)
121 122 {
122 123 struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  124 + if (page)
  125 + pgtable_page_ctor(page);
123 126 return page;
124 127 }
125 128  
... ... @@ -135,7 +138,11 @@
135 138 free_page((unsigned long)pte);
136 139 }
137 140  
138   -#define pte_free(mm, page) pte_free_kernel(page_address(page))
  141 +static inline void pte_free_kernel(struct mm_struct *mm, struct page *pte)
  142 +{
  143 + pgtable_page_dtor(pte);
  144 + pte_free_kernel(page_address((pte));
  145 +}
139 146  
140 147 #define check_pgt_cache() do { } while (0)
141 148  
include/asm-powerpc/page.h
... ... @@ -190,6 +190,8 @@
190 190  
191 191 struct vm_area_struct;
192 192  
  193 +typedef struct page *pgtable_t;
  194 +
193 195 #include <asm-generic/memory_model.h>
194 196 #endif /* __ASSEMBLY__ */
195 197  
include/asm-powerpc/pgalloc-32.h
... ... @@ -22,17 +22,19 @@
22 22 (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
23 23 #define pmd_populate(mm, pmd, pte) \
24 24 (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
  25 +#define pmd_pgtable(pmd) pmd_page(pmd)
25 26 #else
26 27 #define pmd_populate_kernel(mm, pmd, pte) \
27 28 (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
28 29 #define pmd_populate(mm, pmd, pte) \
29 30 (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
  31 +#define pmd_pgtable(pmd) pmd_page(pmd)
30 32 #endif
31 33  
32 34 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
33   -extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
  35 +extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
34 36 extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
35   -extern void pte_free(struct mm_struct *mm, struct page *pte);
  37 +extern void pte_free(struct mm_struct *mm, pgtable_t pte);
36 38  
37 39 #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
38 40  
include/asm-powerpc/pgalloc-64.h
... ... @@ -58,6 +58,7 @@
58 58 #define pmd_populate(mm, pmd, pte_page) \
59 59 pmd_populate_kernel(mm, pmd, page_address(pte_page))
60 60 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
  61 +#define pmd_pgtable(pmd) pmd_page(pmd)
61 62  
62 63  
63 64 #else /* CONFIG_PPC_64K_PAGES */
... ... @@ -72,6 +73,7 @@
72 73  
73 74 #define pmd_populate(mm, pmd, pte_page) \
74 75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
  76 +#define pmd_pgtable(pmd) pmd_page(pmd)
75 77  
76 78 #endif /* CONFIG_PPC_64K_PAGES */
77 79  
78 80  
... ... @@ -92,11 +94,18 @@
92 94 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
93 95 }
94 96  
95   -static inline struct page *pte_alloc_one(struct mm_struct *mm,
96   - unsigned long address)
  97 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  98 + unsigned long address)
97 99 {
98   - pte_t *pte = pte_alloc_one_kernel(mm, address);
99   - return pte ? virt_to_page(pte) : NULL;
  100 + struct page *page;
  101 + pte_t *pte;
  102 +
  103 + pte = pte_alloc_one_kernel(mm, address);
  104 + if (!pte)
  105 + return NULL;
  106 + page = virt_to_page(pte);
  107 + pgtable_page_ctor(page);
  108 + return page;
100 109 }
101 110  
102 111 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
103 112  
... ... @@ -104,8 +113,9 @@
104 113 free_page((unsigned long)pte);
105 114 }
106 115  
107   -static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
  116 +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
108 117 {
  118 + pgtable_page_dtor(ptepage);
109 119 __free_page(ptepage);
110 120 }
111 121  
112 122  
... ... @@ -136,9 +146,12 @@
136 146  
137 147 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
138 148  
139   -#define __pte_free_tlb(tlb, ptepage) \
  149 +#define __pte_free_tlb(tlb,ptepage) \
  150 +do { \
  151 + pgtable_page_dtor(ptepage); \
140 152 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
141   - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
  153 + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
  154 +} while (0)
142 155 #define __pmd_free_tlb(tlb, pmd) \
143 156 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
144 157 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
include/asm-ppc/pgalloc.h
... ... @@ -23,17 +23,19 @@
23 23 (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
24 24 #define pmd_populate(mm, pmd, pte) \
25 25 (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
  26 +#define pmd_pgtable(pmd) pmd_page(pmd)
26 27 #else
27 28 #define pmd_populate_kernel(mm, pmd, pte) \
28 29 (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
29 30 #define pmd_populate(mm, pmd, pte) \
30 31 (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
  32 +#define pmd_pgtable(pmd) pmd_page(pmd)
31 33 #endif
32 34  
33 35 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
34   -extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
  36 +extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
35 37 extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
36   -extern void pte_free(struct mm_struct *mm, struct page *pte);
  38 +extern void pte_free(struct mm_struct *mm, pgtable_t pte);
37 39  
38 40 #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
39 41  
include/asm-s390/page.h
... ... @@ -109,6 +109,8 @@
109 109  
110 110 #endif /* __s390x__ */
111 111  
  112 +typedef struct page *pgtable_t;
  113 +
112 114 #define __pte(x) ((pte_t) { (x) } )
113 115 #define __pmd(x) ((pmd_t) { (x) } )
114 116 #define __pgd(x) ((pgd_t) { (x) } )
include/asm-s390/pgalloc.h
... ... @@ -132,7 +132,7 @@
132 132 }
133 133  
134 134 static inline void
135   -pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  135 +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
136 136 {
137 137 pte_t *pte = (pte_t *)page_to_phys(page);
138 138 pmd_t *shadow_pmd = get_shadow_table(pmd);
... ... @@ -142,6 +142,7 @@
142 142 if (shadow_pmd && shadow_pte)
143 143 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
144 144 }
  145 +#define pmd_pgtable(pmd) pmd_page(pmd)
145 146  
146 147 /*
147 148 * page table entry allocation/free routines.
include/asm-s390/tlb.h
... ... @@ -95,7 +95,7 @@
95 95 * pte_free_tlb frees a pte table and clears the CRSTE for the
96 96 * page table from the tlb.
97 97 */
98   -static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
  98 +static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
99 99 {
100 100 if (!tlb->fullmm) {
101 101 tlb->array[tlb->nr_ptes++] = page;
include/asm-sh/page.h
... ... @@ -100,6 +100,8 @@
100 100 #define __pgd(x) ((pgd_t) { (x) } )
101 101 #define __pgprot(x) ((pgprot_t) { (x) } )
102 102  
  103 +typedef struct page *pgtable_t;
  104 +
103 105 #endif /* !__ASSEMBLY__ */
104 106  
105 107 /*
include/asm-sh/pgalloc.h
... ... @@ -14,10 +14,11 @@
14 14 }
15 15  
16 16 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
17   - struct page *pte)
  17 + pgtable_t pte)
18 18 {
19 19 set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
20 20 }
  21 +#define pmd_pgtable(pmd) pmd_page(pmd)
21 22  
22 23 static inline void pgd_ctor(void *x)
23 24 {
24 25  
... ... @@ -47,11 +48,18 @@
47 48 return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
48 49 }
49 50  
50   -static inline struct page *pte_alloc_one(struct mm_struct *mm,
51   - unsigned long address)
  51 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  52 + unsigned long address)
52 53 {
53   - void *pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
54   - return pg ? virt_to_page(pg) : NULL;
  54 + struct page *page;
  55 + void *pg;
  56 +
  57 + pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
  58 + if (!pg)
  59 + return NULL;
  60 + page = virt_to_page(pg);
  61 + pgtable_page_ctor(page);
  62 + return page;
55 63 }
56 64  
57 65 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
58 66  
59 67  
... ... @@ -59,12 +67,17 @@
59 67 quicklist_free(QUICK_PT, NULL, pte);
60 68 }
61 69  
62   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  70 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
63 71 {
  72 + pgtable_page_dtor(pte);
64 73 quicklist_free_page(QUICK_PT, NULL, pte);
65 74 }
66 75  
67   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  76 +#define __pte_free_tlb(tlb,pte) \
  77 +do { \
  78 + pgtable_page_dtor(pte); \
  79 + tlb_remove_page((tlb), (pte)); \
  80 +} while (0)
68 81  
69 82 /*
70 83 * allocating and freeing a pmd is trivial: the 1-entry pmd is
include/asm-sparc/page.h
... ... @@ -123,6 +123,8 @@
123 123  
124 124 #endif
125 125  
  126 +typedef struct page *pgtable_t;
  127 +
126 128 extern unsigned long sparc_unmapped_base;
127 129  
128 130 BTFIXUPDEF_SETHI(sparc_unmapped_base)
include/asm-sparc/pgalloc.h
... ... @@ -50,10 +50,11 @@
50 50  
51 51 BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
52 52 #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
  53 +#define pmd_pgtable(pmd) pmd_page(pmd)
53 54 BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
54 55 #define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
55 56  
56   -BTFIXUPDEF_CALL(struct page *, pte_alloc_one, struct mm_struct *, unsigned long)
  57 +BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
57 58 #define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
58 59 BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
59 60 #define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
... ... @@ -61,7 +62,7 @@
61 62 BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
62 63 #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
63 64  
64   -BTFIXUPDEF_CALL(void, pte_free, struct page *)
  65 +BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
65 66 #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
66 67 #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
67 68  
include/asm-sparc64/page.h
... ... @@ -104,6 +104,8 @@
104 104  
105 105 #endif /* (STRICT_MM_TYPECHECKS) */
106 106  
  107 +typedef struct page *pgtable_t;
  108 +
107 109 #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
108 110 (_AC(0x0000000070000000,UL)) : \
109 111 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
include/asm-sparc64/pgalloc.h
... ... @@ -43,11 +43,18 @@
43 43 return quicklist_alloc(0, GFP_KERNEL, NULL);
44 44 }
45 45  
46   -static inline struct page *pte_alloc_one(struct mm_struct *mm,
47   - unsigned long address)
  46 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  47 + unsigned long address)
48 48 {
49   - void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
50   - return pg ? virt_to_page(pg) : NULL;
  49 + struct page *page;
  50 + void *pg;
  51 +
  52 + pg = quicklist_alloc(0, GFP_KERNEL, NULL);
  53 + if (!pg)
  54 + return NULL;
  55 + page = virt_to_page(pg);
  56 + pgtable_page_ctor(page);
  57 + return page;
51 58 }
52 59  
53 60 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
54 61  
... ... @@ -55,8 +62,9 @@
55 62 quicklist_free(0, NULL, pte);
56 63 }
57 64  
58   -static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
  65 +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
59 66 {
  67 + pgtable_page_dtor(ptepage);
60 68 quicklist_free_page(0, NULL, ptepage);
61 69 }
62 70  
... ... @@ -64,6 +72,7 @@
64 72 #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
65 73 #define pmd_populate(MM,PMD,PTE_PAGE) \
66 74 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
  75 +#define pmd_pgtable(pmd) pmd_page(pmd)
67 76  
68 77 static inline void check_pgt_cache(void)
69 78 {
include/asm-um/page.h
... ... @@ -79,6 +79,8 @@
79 79  
80 80 typedef struct { unsigned long pgprot; } pgprot_t;
81 81  
  82 +typedef struct page *pgtable_t;
  83 +
82 84 #define pgd_val(x) ((x).pgd)
83 85 #define pgprot_val(x) ((x).pgprot)
84 86  
include/asm-um/pgalloc.h
... ... @@ -18,6 +18,7 @@
18 18 set_pmd(pmd, __pmd(_PAGE_TABLE + \
19 19 ((unsigned long long)page_to_pfn(pte) << \
20 20 (unsigned long long) PAGE_SHIFT)))
  21 +#define pmd_pgtable(pmd) pmd_page(pmd)
21 22  
22 23 /*
23 24 * Allocate and free page tables.
24 25  
25 26  
26 27  
... ... @@ -26,19 +27,24 @@
26 27 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
27 28  
28 29 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
29   -extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
  30 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
30 31  
31 32 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
32 33 {
33 34 free_page((unsigned long) pte);
34 35 }
35 36  
36   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  37 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
37 38 {
  39 + pgtable_page_dtor(pte);
38 40 __free_page(pte);
39 41 }
40 42  
41   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  43 +#define __pte_free_tlb(tlb,pte) \
  44 +do { \
  45 + pgtable_page_dtor(pte); \
  46 + tlb_remove_page((tlb),(pte)); \
  47 +} while (0)
42 48  
43 49 #ifdef CONFIG_3_LEVEL_PGTABLES
44 50  
include/asm-x86/page_32.h
... ... @@ -50,6 +50,8 @@
50 50 typedef union { pteval_t pte, pte_low; } pte_t;
51 51 typedef pte_t boot_pte_t;
52 52  
  53 +typedef struct page *pgtable_t;
  54 +
53 55 #endif /* __ASSEMBLY__ */
54 56 #endif /* CONFIG_X86_PAE */
55 57  
include/asm-x86/page_64.h
... ... @@ -71,6 +71,8 @@
71 71 typedef unsigned long pgprotval_t;
72 72 typedef unsigned long phys_addr_t;
73 73  
  74 +typedef struct page *pgtable_t;
  75 +
74 76 typedef struct { pteval_t pte; } pte_t;
75 77  
76 78 #define vmemmap ((struct page *)VMEMMAP_START)
include/asm-x86/pgalloc_32.h
... ... @@ -31,6 +31,7 @@
31 31 paravirt_alloc_pt(mm, pfn);
32 32 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
33 33 }
  34 +#define pmd_pgtable(pmd) pmd_page(pmd)
34 35  
35 36 /*
36 37 * Allocate and free page tables.
37 38  
38 39  
... ... @@ -39,15 +40,16 @@
39 40 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
40 41  
41 42 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
42   -extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
  43 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
43 44  
44 45 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
45 46 {
46 47 free_page((unsigned long)pte);
47 48 }
48 49  
49   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  50 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
50 51 {
  52 + pgtable_page_dtor(pte);
51 53 __free_page(pte);
52 54 }
53 55  
include/asm-x86/pgalloc_64.h
... ... @@ -12,6 +12,8 @@
12 12 #define pgd_populate(mm, pgd, pud) \
13 13 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
14 14  
  15 +#define pmd_pgtable(pmd) pmd_page(pmd)
  16 +
15 17 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
16 18 {
17 19 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
18 20  
19 21  
... ... @@ -91,12 +93,17 @@
91 93 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
92 94 }
93 95  
94   -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  96 +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
95 97 {
96   - void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  98 + struct page *page;
  99 + void *p;
  100 +
  101 + p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
97 102 if (!p)
98 103 return NULL;
99   - return virt_to_page(p);
  104 + page = virt_to_page(p);
  105 + pgtable_page_ctor(page);
  106 + return page;
100 107 }
101 108  
102 109 /* Should really implement gc for free page table pages. This could be
103 110  
104 111  
... ... @@ -108,12 +115,17 @@
108 115 free_page((unsigned long)pte);
109 116 }
110 117  
111   -static inline void pte_free(struct mm_struct *mm, struct page *pte)
  118 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
112 119 {
  120 + pgtable_page_dtor(pte);
113 121 __free_page(pte);
114 122 }
115 123  
116   -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  124 +#define __pte_free_tlb(tlb,pte) \
  125 +do { \
  126 + pgtable_page_dtor((pte)); \
  127 + tlb_remove_page((tlb), (pte)); \
  128 +} while (0)
117 129  
118 130 #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
119 131 #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
include/asm-xtensa/page.h
... ... @@ -98,6 +98,7 @@
98 98 typedef struct { unsigned long pte; } pte_t; /* page table entry */
99 99 typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
100 100 typedef struct { unsigned long pgprot; } pgprot_t;
  101 +typedef struct page *pgtable_t;
101 102  
102 103 #define pte_val(x) ((x).pte)
103 104 #define pgd_val(x) ((x).pgd)
include/asm-xtensa/pgalloc.h
... ... @@ -24,6 +24,7 @@
24 24 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
25 25 #define pmd_populate(mm, pmdp, page) \
26 26 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
  27 +#define pmd_pgtable(pmd) pmd_page(pmd)
27 28  
28 29 static inline pgd_t*
29 30 pgd_alloc(struct mm_struct *mm)
30 31  
... ... @@ -46,10 +47,14 @@
46 47 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
47 48 }
48 49  
49   -static inline struct page *pte_alloc_one(struct mm_struct *mm,
50   - unsigned long addr)
  50 +static inline pte_token_t pte_alloc_one(struct mm_struct *mm,
  51 + unsigned long addr)
51 52 {
52   - return virt_to_page(pte_alloc_one_kernel(mm, addr));
  53 + struct page *page;
  54 +
  55 + page = virt_to_page(pte_alloc_one_kernel(mm, addr));
  56 + pgtable_page_ctor(page);
  57 + return page;
53 58 }
54 59  
55 60 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
56 61  
57 62  
... ... @@ -57,10 +62,12 @@
57 62 kmem_cache_free(pgtable_cache, pte);
58 63 }
59 64  
60   -static inline void pte_free(struct mm_struct *mm, struct page *page)
  65 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
61 66 {
62   - kmem_cache_free(pgtable_cache, page_address(page));
  67 + pgtable_page_dtor(pte);
  68 + kmem_cache_free(pgtable_cache, page_address(pte));
63 69 }
  70 +#define pmd_pgtable(pmd) pmd_page(pmd)
64 71  
65 72 #endif /* __KERNEL__ */
66 73 #endif /* _XTENSA_PGALLOC_H */
... ... @@ -894,6 +894,18 @@
894 894 #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
895 895 #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
896 896  
  897 +static inline void pgtable_page_ctor(struct page *page)
  898 +{
  899 + pte_lock_init(page);
  900 + inc_zone_page_state(page, NR_PAGETABLE);
  901 +}
  902 +
  903 +static inline void pgtable_page_dtor(struct page *page)
  904 +{
  905 + pte_lock_deinit(page);
  906 + dec_zone_page_state(page, NR_PAGETABLE);
  907 +}
  908 +
897 909 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
898 910 ({ \
899 911 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
... ... @@ -1136,7 +1148,7 @@
1136 1148 #define FOLL_GET 0x04 /* do get_page on page */
1137 1149 #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
1138 1150  
1139   -typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
  1151 +typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1140 1152 void *data);
1141 1153 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1142 1154 unsigned long size, pte_fn_t fn, void *data);
... ... @@ -134,11 +134,9 @@
134 134 */
135 135 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
136 136 {
137   - struct page *page = pmd_page(*pmd);
  137 + pgtable_t token = pmd_pgtable(*pmd);
138 138 pmd_clear(pmd);
139   - pte_lock_deinit(page);
140   - pte_free_tlb(tlb, page);
141   - dec_zone_page_state(page, NR_PAGETABLE);
  139 + pte_free_tlb(tlb, token);
142 140 tlb->mm->nr_ptes--;
143 141 }
144 142  
145 143  
146 144  
147 145  
148 146  
149 147  
... ... @@ -309,21 +307,19 @@
309 307  
310 308 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311 309 {
312   - struct page *new = pte_alloc_one(mm, address);
  310 + pgtable_t new = pte_alloc_one(mm, address);
313 311 if (!new)
314 312 return -ENOMEM;
315 313  
316   - pte_lock_init(new);
317 314 spin_lock(&mm->page_table_lock);
318   - if (pmd_present(*pmd)) { /* Another has populated it */
319   - pte_lock_deinit(new);
320   - pte_free(mm, new);
321   - } else {
  315 + if (!pmd_present(*pmd)) { /* Has another populated it ? */
322 316 mm->nr_ptes++;
323   - inc_zone_page_state(new, NR_PAGETABLE);
324 317 pmd_populate(mm, pmd, new);
  318 + new = NULL;
325 319 }
326 320 spin_unlock(&mm->page_table_lock);
  321 + if (new)
  322 + pte_free(mm, new);
327 323 return 0;
328 324 }
329 325  
330 326  
331 327  
... ... @@ -334,11 +330,13 @@
334 330 return -ENOMEM;
335 331  
336 332 spin_lock(&init_mm.page_table_lock);
337   - if (pmd_present(*pmd)) /* Another has populated it */
338   - pte_free_kernel(&init_mm, new);
339   - else
  333 + if (!pmd_present(*pmd)) { /* Has another populated it ? */
340 334 pmd_populate_kernel(&init_mm, pmd, new);
  335 + new = NULL;
  336 + }
341 337 spin_unlock(&init_mm.page_table_lock);
  338 + if (new)
  339 + pte_free_kernel(&init_mm, new);
342 340 return 0;
343 341 }
344 342  
... ... @@ -1390,7 +1388,7 @@
1390 1388 {
1391 1389 pte_t *pte;
1392 1390 int err;
1393   - struct page *pmd_page;
  1391 + pgtable_t token;
1394 1392 spinlock_t *uninitialized_var(ptl);
1395 1393  
1396 1394 pte = (mm == &init_mm) ?
1397 1395  
... ... @@ -1401,10 +1399,10 @@
1401 1399  
1402 1400 BUG_ON(pmd_huge(*pmd));
1403 1401  
1404   - pmd_page = pmd_page(*pmd);
  1402 + token = pmd_pgtable(*pmd);
1405 1403  
1406 1404 do {
1407   - err = fn(pte, pmd_page, addr, data);
  1405 + err = fn(pte, token, addr, data);
1408 1406 if (err)
1409 1407 break;
1410 1408 } while (pte++, addr += PAGE_SIZE, addr != end);
... ... @@ -820,7 +820,7 @@
820 820 }
821 821  
822 822  
823   -static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
  823 +static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
824 824 {
825 825 /* apply_to_page_range() does all the hard work. */
826 826 return 0;