Commit 7ef939054139ef857cebbec07cbd12d7cf7beedd

Authored by Jeff Dike
Committed by Linus Torvalds
1 parent f9dfefe423

[PATCH] uml: fix x86_64 page leak

We were leaking pmd pages when 3_LEVEL_PGTABLES was enabled.  This fixes that.

Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 4 changed files with 22 additions and 12 deletions Inline Diff

arch/um/kernel/skas/include/mmu-skas.h
1 /* 1 /*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6 #ifndef __SKAS_MMU_H 6 #ifndef __SKAS_MMU_H
7 #define __SKAS_MMU_H 7 #define __SKAS_MMU_H
8 8
9 #include "linux/config.h"
9 #include "mm_id.h" 10 #include "mm_id.h"
10 11
11 struct mmu_context_skas { 12 struct mmu_context_skas {
12 struct mm_id id; 13 struct mm_id id;
13 unsigned long last_page_table; 14 unsigned long last_page_table;
15 #ifdef CONFIG_3_LEVEL_PGTABLES
16 unsigned long last_pmd;
17 #endif
14 }; 18 };
15 19
16 extern void switch_mm_skas(struct mm_id * mm_idp); 20 extern void switch_mm_skas(struct mm_id * mm_idp);
17 21
18 #endif 22 #endif
19 23
20 /* 24 /*
21 * Overrides for Emacs so that we follow Linus's tabbing style. 25 * Overrides for Emacs so that we follow Linus's tabbing style.
22 * Emacs will notice this stuff at the end of the file and automatically 26 * Emacs will notice this stuff at the end of the file and automatically
23 * adjust the settings for this buffer only. This must remain at the end 27 * adjust the settings for this buffer only. This must remain at the end
24 * of the file. 28 * of the file.
25 * --------------------------------------------------------------------------- 29 * ---------------------------------------------------------------------------
26 * Local variables: 30 * Local variables:
27 * c-file-style: "linux" 31 * c-file-style: "linux"
28 * End: 32 * End:
29 */ 33 */
30 34
arch/um/kernel/skas/mmu.c
1 /* 1 /*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6 #include "linux/config.h" 6 #include "linux/config.h"
7 #include "linux/sched.h" 7 #include "linux/sched.h"
8 #include "linux/list.h" 8 #include "linux/list.h"
9 #include "linux/spinlock.h" 9 #include "linux/spinlock.h"
10 #include "linux/slab.h" 10 #include "linux/slab.h"
11 #include "linux/errno.h" 11 #include "linux/errno.h"
12 #include "linux/mm.h" 12 #include "linux/mm.h"
13 #include "asm/current.h" 13 #include "asm/current.h"
14 #include "asm/segment.h" 14 #include "asm/segment.h"
15 #include "asm/mmu.h" 15 #include "asm/mmu.h"
16 #include "asm/pgalloc.h" 16 #include "asm/pgalloc.h"
17 #include "asm/pgtable.h" 17 #include "asm/pgtable.h"
18 #include "os.h" 18 #include "os.h"
19 #include "skas.h" 19 #include "skas.h"
20 20
21 extern int __syscall_stub_start; 21 extern int __syscall_stub_start;
22 22
23 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, 23 static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
24 unsigned long kernel) 24 unsigned long kernel)
25 { 25 {
26 pgd_t *pgd; 26 pgd_t *pgd;
27 pud_t *pud; 27 pud_t *pud;
28 pmd_t *pmd; 28 pmd_t *pmd;
29 pte_t *pte; 29 pte_t *pte;
30 30
31 spin_lock(&mm->page_table_lock); 31 spin_lock(&mm->page_table_lock);
32 pgd = pgd_offset(mm, proc); 32 pgd = pgd_offset(mm, proc);
33 pud = pud_alloc(mm, pgd, proc); 33 pud = pud_alloc(mm, pgd, proc);
34 if (!pud) 34 if (!pud)
35 goto out; 35 goto out;
36 36
37 pmd = pmd_alloc(mm, pud, proc); 37 pmd = pmd_alloc(mm, pud, proc);
38 if (!pmd) 38 if (!pmd)
39 goto out_pmd; 39 goto out_pmd;
40 40
41 pte = pte_alloc_map(mm, pmd, proc); 41 pte = pte_alloc_map(mm, pmd, proc);
42 if (!pte) 42 if (!pte)
43 goto out_pte; 43 goto out_pte;
44 44
45 /* There's an interaction between the skas0 stub pages, stack 45 /* There's an interaction between the skas0 stub pages, stack
46 * randomization, and the BUG at the end of exit_mmap. exit_mmap 46 * randomization, and the BUG at the end of exit_mmap. exit_mmap
47 * checks that the number of page tables freed is the same as had 47 * checks that the number of page tables freed is the same as had
48 * been allocated. If the stack is on the last page table page, 48 * been allocated. If the stack is on the last page table page,
49 * then the stack pte page will be freed, and if not, it won't. To 49 * then the stack pte page will be freed, and if not, it won't. To
50 * avoid having to know where the stack is, or if the process mapped 50 * avoid having to know where the stack is, or if the process mapped
51 * something at the top of its address space for some other reason, 51 * something at the top of its address space for some other reason,
52 * we set TASK_SIZE to end at the start of the last page table. 52 * we set TASK_SIZE to end at the start of the last page table.
53 * This keeps exit_mmap off the last page, but introduces a leak 53 * This keeps exit_mmap off the last page, but introduces a leak
54 * of that page. So, we hang onto it here and free it in 54 * of that page. So, we hang onto it here and free it in
55 * destroy_context_skas. 55 * destroy_context_skas.
56 */ 56 */
57 57
58 mm->context.skas.last_page_table = pmd_page_kernel(*pmd); 58 mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
59 #ifdef CONFIG_3_LEVEL_PGTABLES
60 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
61 #endif
59 62
60 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 63 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
61 *pte = pte_mkexec(*pte); 64 *pte = pte_mkexec(*pte);
62 *pte = pte_wrprotect(*pte); 65 *pte = pte_wrprotect(*pte);
63 spin_unlock(&mm->page_table_lock); 66 spin_unlock(&mm->page_table_lock);
64 return(0); 67 return(0);
65 68
66 out_pmd: 69 out_pmd:
67 pud_free(pud); 70 pud_free(pud);
68 out_pte: 71 out_pte:
69 pmd_free(pmd); 72 pmd_free(pmd);
70 out: 73 out:
71 spin_unlock(&mm->page_table_lock); 74 spin_unlock(&mm->page_table_lock);
72 return(-ENOMEM); 75 return(-ENOMEM);
73 } 76 }
74 77
75 int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) 78 int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
76 { 79 {
77 struct mm_struct *cur_mm = current->mm; 80 struct mm_struct *cur_mm = current->mm;
78 struct mm_id *cur_mm_id = &cur_mm->context.skas.id; 81 struct mm_id *cur_mm_id = &cur_mm->context.skas.id;
79 struct mm_id *mm_id = &mm->context.skas.id; 82 struct mm_id *mm_id = &mm->context.skas.id;
80 unsigned long stack = 0; 83 unsigned long stack = 0;
81 int from, ret = -ENOMEM; 84 int from, ret = -ENOMEM;
82 85
83 if(!proc_mm || !ptrace_faultinfo){ 86 if(!proc_mm || !ptrace_faultinfo){
84 stack = get_zeroed_page(GFP_KERNEL); 87 stack = get_zeroed_page(GFP_KERNEL);
85 if(stack == 0) 88 if(stack == 0)
86 goto out; 89 goto out;
87 90
88 /* This zeros the entry that pgd_alloc didn't, needed since 91 /* This zeros the entry that pgd_alloc didn't, needed since
89 * we are about to reinitialize it, and want mm.nr_ptes to 92 * we are about to reinitialize it, and want mm.nr_ptes to
90 * be accurate. 93 * be accurate.
91 */ 94 */
92 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); 95 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
93 96
94 ret = init_stub_pte(mm, CONFIG_STUB_CODE, 97 ret = init_stub_pte(mm, CONFIG_STUB_CODE,
95 (unsigned long) &__syscall_stub_start); 98 (unsigned long) &__syscall_stub_start);
96 if(ret) 99 if(ret)
97 goto out_free; 100 goto out_free;
98 101
99 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); 102 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
100 if(ret) 103 if(ret)
101 goto out_free; 104 goto out_free;
102 105
103 mm->nr_ptes--; 106 mm->nr_ptes--;
104 } 107 }
105 mm_id->stack = stack; 108 mm_id->stack = stack;
106 109
107 if(proc_mm){ 110 if(proc_mm){
108 if((cur_mm != NULL) && (cur_mm != &init_mm)) 111 if((cur_mm != NULL) && (cur_mm != &init_mm))
109 from = cur_mm_id->u.mm_fd; 112 from = cur_mm_id->u.mm_fd;
110 else from = -1; 113 else from = -1;
111 114
112 ret = new_mm(from, stack); 115 ret = new_mm(from, stack);
113 if(ret < 0){ 116 if(ret < 0){
114 printk("init_new_context_skas - new_mm failed, " 117 printk("init_new_context_skas - new_mm failed, "
115 "errno = %d\n", ret); 118 "errno = %d\n", ret);
116 goto out_free; 119 goto out_free;
117 } 120 }
118 mm_id->u.mm_fd = ret; 121 mm_id->u.mm_fd = ret;
119 } 122 }
120 else { 123 else {
121 if((cur_mm != NULL) && (cur_mm != &init_mm)) 124 if((cur_mm != NULL) && (cur_mm != &init_mm))
122 mm_id->u.pid = copy_context_skas0(stack, 125 mm_id->u.pid = copy_context_skas0(stack,
123 cur_mm_id->u.pid); 126 cur_mm_id->u.pid);
124 else mm_id->u.pid = start_userspace(stack); 127 else mm_id->u.pid = start_userspace(stack);
125 } 128 }
126 129
127 return 0; 130 return 0;
128 131
129 out_free: 132 out_free:
130 if(mm_id->stack != 0) 133 if(mm_id->stack != 0)
131 free_page(mm_id->stack); 134 free_page(mm_id->stack);
132 out: 135 out:
133 return ret; 136 return ret;
134 } 137 }
135 138
136 void destroy_context_skas(struct mm_struct *mm) 139 void destroy_context_skas(struct mm_struct *mm)
137 { 140 {
138 struct mmu_context_skas *mmu = &mm->context.skas; 141 struct mmu_context_skas *mmu = &mm->context.skas;
139 142
140 if(proc_mm) 143 if(proc_mm)
141 os_close_file(mmu->id.u.mm_fd); 144 os_close_file(mmu->id.u.mm_fd);
142 else 145 else
143 os_kill_ptraced_process(mmu->id.u.pid, 1); 146 os_kill_ptraced_process(mmu->id.u.pid, 1);
144 147
145 if(!proc_mm || !ptrace_faultinfo){ 148 if(!proc_mm || !ptrace_faultinfo){
146 free_page(mmu->id.stack); 149 free_page(mmu->id.stack);
147 free_page(mmu->last_page_table); 150 pte_free_kernel((pte_t *) mmu->last_page_table);
151 dec_page_state(nr_page_table_pages);
152 #ifdef CONFIG_3_LEVEL_PGTABLES
153 pmd_free((pmd_t *) mmu->last_pmd);
154 #endif
148 } 155 }
149 } 156 }
150 157
include/asm-um/pgalloc.h
1 /* 1 /*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc. 3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h 4 * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
5 * Licensed under the GPL 5 * Licensed under the GPL
6 */ 6 */
7 7
8 #ifndef __UM_PGALLOC_H 8 #ifndef __UM_PGALLOC_H
9 #define __UM_PGALLOC_H 9 #define __UM_PGALLOC_H
10 10
11 #include "linux/config.h" 11 #include "linux/config.h"
12 #include "linux/mm.h" 12 #include "linux/mm.h"
13 #include "asm/fixmap.h" 13 #include "asm/fixmap.h"
14 14
15 #define pmd_populate_kernel(mm, pmd, pte) \ 15 #define pmd_populate_kernel(mm, pmd, pte) \
16 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) 16 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
17 17
18 #define pmd_populate(mm, pmd, pte) \ 18 #define pmd_populate(mm, pmd, pte) \
19 set_pmd(pmd, __pmd(_PAGE_TABLE + \ 19 set_pmd(pmd, __pmd(_PAGE_TABLE + \
20 ((unsigned long long)page_to_pfn(pte) << \ 20 ((unsigned long long)page_to_pfn(pte) << \
21 (unsigned long long) PAGE_SHIFT))) 21 (unsigned long long) PAGE_SHIFT)))
22 22
23 /* 23 /*
24 * Allocate and free page tables. 24 * Allocate and free page tables.
25 */ 25 */
26 extern pgd_t *pgd_alloc(struct mm_struct *); 26 extern pgd_t *pgd_alloc(struct mm_struct *);
27 extern void pgd_free(pgd_t *pgd); 27 extern void pgd_free(pgd_t *pgd);
28 28
29 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); 29 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
30 extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); 30 extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
31 31
32 static inline void pte_free_kernel(pte_t *pte) 32 static inline void pte_free_kernel(pte_t *pte)
33 { 33 {
34 free_page((unsigned long) pte); 34 free_page((unsigned long) pte);
35 } 35 }
36 36
37 static inline void pte_free(struct page *pte) 37 static inline void pte_free(struct page *pte)
38 { 38 {
39 __free_page(pte); 39 __free_page(pte);
40 } 40 }
41 41
42 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 42 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
43 43
44 #ifdef CONFIG_3_LEVEL_PGTABLES 44 #ifdef CONFIG_3_LEVEL_PGTABLES
45 /* 45
46 * In the 3-level case we free the pmds as part of the pgd. 46 extern __inline__ void pmd_free(pmd_t *pmd)
47 */ 47 {
48 #define pmd_free(x) do { } while (0) 48 free_page((unsigned long)pmd);
49 #define __pmd_free_tlb(tlb,x) do { } while (0) 49 }
50
51 #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
50 #endif 52 #endif
51 53
52 #define check_pgt_cache() do { } while (0) 54 #define check_pgt_cache() do { } while (0)
53 55
54 #endif 56 #endif
55 57
56 /* 58 /*
57 * Overrides for Emacs so that we follow Linus's tabbing style. 59 * Overrides for Emacs so that we follow Linus's tabbing style.
58 * Emacs will notice this stuff at the end of the file and automatically 60 * Emacs will notice this stuff at the end of the file and automatically
59 * adjust the settings for this buffer only. This must remain at the end 61 * adjust the settings for this buffer only. This must remain at the end
60 * of the file. 62 * of the file.
61 * --------------------------------------------------------------------------- 63 * ---------------------------------------------------------------------------
62 * Local variables: 64 * Local variables:
63 * c-file-style: "linux" 65 * c-file-style: "linux"
64 * End: 66 * End:
65 */ 67 */
66 68
include/asm-um/pgtable-3level.h
1 /* 1 /*
2 * Copyright 2003 PathScale Inc 2 * Copyright 2003 PathScale Inc
3 * Derived from include/asm-i386/pgtable.h 3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL 4 * Licensed under the GPL
5 */ 5 */
6 6
7 #ifndef __UM_PGTABLE_3LEVEL_H 7 #ifndef __UM_PGTABLE_3LEVEL_H
8 #define __UM_PGTABLE_3LEVEL_H 8 #define __UM_PGTABLE_3LEVEL_H
9 9
10 #include <asm-generic/pgtable-nopud.h> 10 #include <asm-generic/pgtable-nopud.h>
11 11
12 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 12 /* PGDIR_SHIFT determines what a third-level page table entry can map */
13 13
14 #define PGDIR_SHIFT 30 14 #define PGDIR_SHIFT 30
15 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 15 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
16 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 16 #define PGDIR_MASK (~(PGDIR_SIZE-1))
17 17
18 /* PMD_SHIFT determines the size of the area a second-level page table can 18 /* PMD_SHIFT determines the size of the area a second-level page table can
19 * map 19 * map
20 */ 20 */
21 21
22 #define PMD_SHIFT 21 22 #define PMD_SHIFT 21
23 #define PMD_SIZE (1UL << PMD_SHIFT) 23 #define PMD_SIZE (1UL << PMD_SHIFT)
24 #define PMD_MASK (~(PMD_SIZE-1)) 24 #define PMD_MASK (~(PMD_SIZE-1))
25 25
26 /* 26 /*
27 * entries per page directory level 27 * entries per page directory level
28 */ 28 */
29 29
30 #define PTRS_PER_PTE 512 30 #define PTRS_PER_PTE 512
31 #define PTRS_PER_PMD 512 31 #define PTRS_PER_PMD 512
32 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) 32 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
33 #define PTRS_PER_PGD 512 33 #define PTRS_PER_PGD 512
34 #define FIRST_USER_ADDRESS 0 34 #define FIRST_USER_ADDRESS 0
35 35
36 #define pte_ERROR(e) \ 36 #define pte_ERROR(e) \
37 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \ 37 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
38 pte_val(e)) 38 pte_val(e))
39 #define pmd_ERROR(e) \ 39 #define pmd_ERROR(e) \
40 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ 40 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
41 pmd_val(e)) 41 pmd_val(e))
42 #define pgd_ERROR(e) \ 42 #define pgd_ERROR(e) \
43 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ 43 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
44 pgd_val(e)) 44 pgd_val(e))
45 45
46 #define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE)) 46 #define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
47 #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 47 #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
48 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT) 48 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
49 #define pud_populate(mm, pud, pmd) \ 49 #define pud_populate(mm, pud, pmd) \
50 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) 50 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
51 51
52 #define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval)) 52 #define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
53 static inline int pgd_newpage(pgd_t pgd) 53 static inline int pgd_newpage(pgd_t pgd)
54 { 54 {
55 return(pgd_val(pgd) & _PAGE_NEWPAGE); 55 return(pgd_val(pgd) & _PAGE_NEWPAGE);
56 } 56 }
57 57
58 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } 58 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
59 59
60 #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval)) 60 #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
61 61
62 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 62 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
63 { 63 {
64 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); 64 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
65 65
66 if(pmd) 66 if(pmd)
67 memset(pmd, 0, PAGE_SIZE); 67 memset(pmd, 0, PAGE_SIZE);
68 68
69 return pmd; 69 return pmd;
70 } 70 }
71 71
72 static inline void pmd_free(pmd_t *pmd){ 72 extern inline void pud_clear (pud_t *pud)
73 free_page((unsigned long) pmd); 73 {
74 set_pud(pud, __pud(0));
74 } 75 }
75
76 #define __pmd_free_tlb(tlb,x) do { } while (0)
77
78 static inline void pud_clear (pud_t * pud) { }
79 76
80 #define pud_page(pud) \ 77 #define pud_page(pud) \
81 ((struct page *) __va(pud_val(pud) & PAGE_MASK)) 78 ((struct page *) __va(pud_val(pud) & PAGE_MASK))
82 79
83 /* Find an entry in the second-level page table.. */ 80 /* Find an entry in the second-level page table.. */
84 #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ 81 #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
85 pmd_index(address)) 82 pmd_index(address))
86 83
87 static inline unsigned long pte_pfn(pte_t pte) 84 static inline unsigned long pte_pfn(pte_t pte)
88 { 85 {
89 return phys_to_pfn(pte_val(pte)); 86 return phys_to_pfn(pte_val(pte));
90 } 87 }
91 88
92 static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot) 89 static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
93 { 90 {
94 pte_t pte; 91 pte_t pte;
95 phys_t phys = pfn_to_phys(page_nr); 92 phys_t phys = pfn_to_phys(page_nr);
96 93
97 pte_set_val(pte, phys, pgprot); 94 pte_set_val(pte, phys, pgprot);
98 return pte; 95 return pte;
99 } 96 }
100 97
101 static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot) 98 static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
102 { 99 {
103 return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)); 100 return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
104 } 101 }
105 102
106 /* 103 /*
107 * Bits 0 through 3 are taken in the low part of the pte, 104 * Bits 0 through 3 are taken in the low part of the pte,
108 * put the 32 bits of offset into the high part. 105 * put the 32 bits of offset into the high part.
109 */ 106 */
110 #define PTE_FILE_MAX_BITS 32 107 #define PTE_FILE_MAX_BITS 32
111 108
112 #ifdef CONFIG_64BIT 109 #ifdef CONFIG_64BIT
113 110
114 #define pte_to_pgoff(p) ((p).pte >> 32) 111 #define pte_to_pgoff(p) ((p).pte >> 32)
115 112
116 #define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE }) 113 #define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
117 114
118 #else 115 #else
119 116
120 #define pte_to_pgoff(pte) ((pte).pte_high) 117 #define pte_to_pgoff(pte) ((pte).pte_high)
121 118
122 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) 119 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
123 120
124 #endif 121 #endif
125 122
126 #endif 123 #endif
127 124
128 /* 125 /*
129 * Overrides for Emacs so that we follow Linus's tabbing style. 126 * Overrides for Emacs so that we follow Linus's tabbing style.
130 * Emacs will notice this stuff at the end of the file and automatically 127 * Emacs will notice this stuff at the end of the file and automatically
131 * adjust the settings for this buffer only. This must remain at the end 128 * adjust the settings for this buffer only. This must remain at the end
132 * of the file. 129 * of the file.
133 * --------------------------------------------------------------------------- 130 * ---------------------------------------------------------------------------
134 * Local variables: 131 * Local variables:
135 * c-file-style: "linux" 132 * c-file-style: "linux"
136 * End: 133 * End:
137 */ 134 */