Commit 325f8a0a31df567dbafafc48f8e60f3c1f101a46
Committed by
Ralf Baechle
1 parent
ef6c1fd662
Exists in
master
and in
7 other branches
MIPS: Two-level pagetables for 64-bit kernels with 64KB pages.
For 64-bit kernels with 64KB pages and two level page tables, there are 42 bits worth of virtual address space This is larger than the 40 bits of virtual address space obtained with the default 4KB Page size and three levels, so there are no draw backs for using two level tables with this configuration. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Cc: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/761/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Showing 8 changed files with 71 additions and 35 deletions Inline Diff
arch/mips/include/asm/page.h
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle | 6 | * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | */ | 8 | */ |
9 | #ifndef _ASM_PAGE_H | 9 | #ifndef _ASM_PAGE_H |
10 | #define _ASM_PAGE_H | 10 | #define _ASM_PAGE_H |
11 | 11 | ||
12 | #include <spaces.h> | 12 | #include <spaces.h> |
13 | #include <linux/const.h> | 13 | #include <linux/const.h> |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * PAGE_SHIFT determines the page size | 16 | * PAGE_SHIFT determines the page size |
17 | */ | 17 | */ |
18 | #ifdef CONFIG_PAGE_SIZE_4KB | 18 | #ifdef CONFIG_PAGE_SIZE_4KB |
19 | #define PAGE_SHIFT 12 | 19 | #define PAGE_SHIFT 12 |
20 | #endif | 20 | #endif |
21 | #ifdef CONFIG_PAGE_SIZE_8KB | 21 | #ifdef CONFIG_PAGE_SIZE_8KB |
22 | #define PAGE_SHIFT 13 | 22 | #define PAGE_SHIFT 13 |
23 | #endif | 23 | #endif |
24 | #ifdef CONFIG_PAGE_SIZE_16KB | 24 | #ifdef CONFIG_PAGE_SIZE_16KB |
25 | #define PAGE_SHIFT 14 | 25 | #define PAGE_SHIFT 14 |
26 | #endif | 26 | #endif |
27 | #ifdef CONFIG_PAGE_SIZE_32KB | 27 | #ifdef CONFIG_PAGE_SIZE_32KB |
28 | #define PAGE_SHIFT 15 | 28 | #define PAGE_SHIFT 15 |
29 | #endif | 29 | #endif |
30 | #ifdef CONFIG_PAGE_SIZE_64KB | 30 | #ifdef CONFIG_PAGE_SIZE_64KB |
31 | #define PAGE_SHIFT 16 | 31 | #define PAGE_SHIFT 16 |
32 | #endif | 32 | #endif |
33 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 33 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
34 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) | 34 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
35 | 35 | ||
36 | #ifdef CONFIG_HUGETLB_PAGE | 36 | #ifdef CONFIG_HUGETLB_PAGE |
37 | #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) | 37 | #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) |
38 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) | 38 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
39 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | 39 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
40 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 40 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
41 | #endif /* CONFIG_HUGETLB_PAGE */ | 41 | #endif /* CONFIG_HUGETLB_PAGE */ |
42 | 42 | ||
43 | #ifndef __ASSEMBLY__ | 43 | #ifndef __ASSEMBLY__ |
44 | 44 | ||
45 | #include <linux/pfn.h> | 45 | #include <linux/pfn.h> |
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | 47 | ||
48 | extern void build_clear_page(void); | 48 | extern void build_clear_page(void); |
49 | extern void build_copy_page(void); | 49 | extern void build_copy_page(void); |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * It's normally defined only for FLATMEM config but it's | 52 | * It's normally defined only for FLATMEM config but it's |
53 | * used in our early mem init code for all memory models. | 53 | * used in our early mem init code for all memory models. |
54 | * So always define it. | 54 | * So always define it. |
55 | */ | 55 | */ |
56 | #define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) | 56 | #define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) |
57 | 57 | ||
58 | extern void clear_page(void * page); | 58 | extern void clear_page(void * page); |
59 | extern void copy_page(void * to, void * from); | 59 | extern void copy_page(void * to, void * from); |
60 | 60 | ||
61 | extern unsigned long shm_align_mask; | 61 | extern unsigned long shm_align_mask; |
62 | 62 | ||
63 | static inline unsigned long pages_do_alias(unsigned long addr1, | 63 | static inline unsigned long pages_do_alias(unsigned long addr1, |
64 | unsigned long addr2) | 64 | unsigned long addr2) |
65 | { | 65 | { |
66 | return (addr1 ^ addr2) & shm_align_mask; | 66 | return (addr1 ^ addr2) & shm_align_mask; |
67 | } | 67 | } |
68 | 68 | ||
69 | struct page; | 69 | struct page; |
70 | 70 | ||
71 | static inline void clear_user_page(void *addr, unsigned long vaddr, | 71 | static inline void clear_user_page(void *addr, unsigned long vaddr, |
72 | struct page *page) | 72 | struct page *page) |
73 | { | 73 | { |
74 | extern void (*flush_data_cache_page)(unsigned long addr); | 74 | extern void (*flush_data_cache_page)(unsigned long addr); |
75 | 75 | ||
76 | clear_page(addr); | 76 | clear_page(addr); |
77 | if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) | 77 | if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) |
78 | flush_data_cache_page((unsigned long)addr); | 78 | flush_data_cache_page((unsigned long)addr); |
79 | } | 79 | } |
80 | 80 | ||
81 | extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | 81 | extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, |
82 | struct page *to); | 82 | struct page *to); |
83 | struct vm_area_struct; | 83 | struct vm_area_struct; |
84 | extern void copy_user_highpage(struct page *to, struct page *from, | 84 | extern void copy_user_highpage(struct page *to, struct page *from, |
85 | unsigned long vaddr, struct vm_area_struct *vma); | 85 | unsigned long vaddr, struct vm_area_struct *vma); |
86 | 86 | ||
87 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | 87 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * These are used to make use of C type-checking.. | 90 | * These are used to make use of C type-checking.. |
91 | */ | 91 | */ |
92 | #ifdef CONFIG_64BIT_PHYS_ADDR | 92 | #ifdef CONFIG_64BIT_PHYS_ADDR |
93 | #ifdef CONFIG_CPU_MIPS32 | 93 | #ifdef CONFIG_CPU_MIPS32 |
94 | typedef struct { unsigned long pte_low, pte_high; } pte_t; | 94 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
95 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | 95 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) |
96 | #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) | 96 | #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) |
97 | #else | 97 | #else |
98 | typedef struct { unsigned long long pte; } pte_t; | 98 | typedef struct { unsigned long long pte; } pte_t; |
99 | #define pte_val(x) ((x).pte) | 99 | #define pte_val(x) ((x).pte) |
100 | #define __pte(x) ((pte_t) { (x) } ) | 100 | #define __pte(x) ((pte_t) { (x) } ) |
101 | #endif | 101 | #endif |
102 | #else | 102 | #else |
103 | typedef struct { unsigned long pte; } pte_t; | 103 | typedef struct { unsigned long pte; } pte_t; |
104 | #define pte_val(x) ((x).pte) | 104 | #define pte_val(x) ((x).pte) |
105 | #define __pte(x) ((pte_t) { (x) } ) | 105 | #define __pte(x) ((pte_t) { (x) } ) |
106 | #endif | 106 | #endif |
107 | typedef struct page *pgtable_t; | 107 | typedef struct page *pgtable_t; |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * For 3-level pagetables we defines these ourselves, for 2-level the | ||
111 | * definitions are supplied by <asm-generic/pgtable-nopmd.h>. | ||
112 | */ | ||
113 | #ifdef CONFIG_64BIT | ||
114 | |||
115 | typedef struct { unsigned long pmd; } pmd_t; | ||
116 | #define pmd_val(x) ((x).pmd) | ||
117 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
118 | |||
119 | #endif | ||
120 | |||
121 | /* | ||
122 | * Right now we don't support 4-level pagetables, so all pud-related | 110 | * Right now we don't support 4-level pagetables, so all pud-related |
123 | * definitions come from <asm-generic/pgtable-nopud.h>. | 111 | * definitions come from <asm-generic/pgtable-nopud.h>. |
124 | */ | 112 | */ |
125 | 113 | ||
126 | /* | 114 | /* |
127 | * Finall the top of the hierarchy, the pgd | 115 | * Finall the top of the hierarchy, the pgd |
128 | */ | 116 | */ |
129 | typedef struct { unsigned long pgd; } pgd_t; | 117 | typedef struct { unsigned long pgd; } pgd_t; |
130 | #define pgd_val(x) ((x).pgd) | 118 | #define pgd_val(x) ((x).pgd) |
131 | #define __pgd(x) ((pgd_t) { (x) } ) | 119 | #define __pgd(x) ((pgd_t) { (x) } ) |
132 | 120 | ||
133 | /* | 121 | /* |
134 | * Manipulate page protection bits | 122 | * Manipulate page protection bits |
135 | */ | 123 | */ |
136 | typedef struct { unsigned long pgprot; } pgprot_t; | 124 | typedef struct { unsigned long pgprot; } pgprot_t; |
137 | #define pgprot_val(x) ((x).pgprot) | 125 | #define pgprot_val(x) ((x).pgprot) |
138 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 126 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
139 | 127 | ||
140 | /* | 128 | /* |
141 | * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd | 129 | * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd |
142 | * pair of pages we only have a single global bit per pair of pages. When | 130 | * pair of pages we only have a single global bit per pair of pages. When |
143 | * writing to the TLB make sure we always have the bit set for both pages | 131 | * writing to the TLB make sure we always have the bit set for both pages |
144 | * or none. This macro is used to access the `buddy' of the pte we're just | 132 | * or none. This macro is used to access the `buddy' of the pte we're just |
145 | * working on. | 133 | * working on. |
146 | */ | 134 | */ |
147 | #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) | 135 | #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) |
148 | 136 | ||
149 | #endif /* !__ASSEMBLY__ */ | 137 | #endif /* !__ASSEMBLY__ */ |
150 | 138 | ||
151 | /* | 139 | /* |
152 | * __pa()/__va() should be used only during mem init. | 140 | * __pa()/__va() should be used only during mem init. |
153 | */ | 141 | */ |
154 | #ifdef CONFIG_64BIT | 142 | #ifdef CONFIG_64BIT |
155 | #define __pa(x) \ | 143 | #define __pa(x) \ |
156 | ({ \ | 144 | ({ \ |
157 | unsigned long __x = (unsigned long)(x); \ | 145 | unsigned long __x = (unsigned long)(x); \ |
158 | __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \ | 146 | __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \ |
159 | }) | 147 | }) |
160 | #else | 148 | #else |
161 | #define __pa(x) \ | 149 | #define __pa(x) \ |
162 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) |
163 | #endif | 151 | #endif |
164 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
165 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) | 153 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
166 | 154 | ||
167 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 155 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
168 | 156 | ||
169 | #ifdef CONFIG_FLATMEM | 157 | #ifdef CONFIG_FLATMEM |
170 | 158 | ||
171 | #define pfn_valid(pfn) \ | 159 | #define pfn_valid(pfn) \ |
172 | ({ \ | 160 | ({ \ |
173 | unsigned long __pfn = (pfn); \ | 161 | unsigned long __pfn = (pfn); \ |
174 | /* avoid <linux/bootmem.h> include hell */ \ | 162 | /* avoid <linux/bootmem.h> include hell */ \ |
175 | extern unsigned long min_low_pfn; \ | 163 | extern unsigned long min_low_pfn; \ |
176 | \ | 164 | \ |
177 | __pfn >= min_low_pfn && __pfn < max_mapnr; \ | 165 | __pfn >= min_low_pfn && __pfn < max_mapnr; \ |
178 | }) | 166 | }) |
179 | 167 | ||
180 | #elif defined(CONFIG_SPARSEMEM) | 168 | #elif defined(CONFIG_SPARSEMEM) |
181 | 169 | ||
182 | /* pfn_valid is defined in linux/mmzone.h */ | 170 | /* pfn_valid is defined in linux/mmzone.h */ |
183 | 171 | ||
184 | #elif defined(CONFIG_NEED_MULTIPLE_NODES) | 172 | #elif defined(CONFIG_NEED_MULTIPLE_NODES) |
185 | 173 | ||
186 | #define pfn_valid(pfn) \ | 174 | #define pfn_valid(pfn) \ |
187 | ({ \ | 175 | ({ \ |
188 | unsigned long __pfn = (pfn); \ | 176 | unsigned long __pfn = (pfn); \ |
189 | int __n = pfn_to_nid(__pfn); \ | 177 | int __n = pfn_to_nid(__pfn); \ |
190 | ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \ | 178 | ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \ |
191 | NODE_DATA(__n)->node_spanned_pages) \ | 179 | NODE_DATA(__n)->node_spanned_pages) \ |
192 | : 0); \ | 180 | : 0); \ |
193 | }) | 181 | }) |
194 | 182 | ||
195 | #endif | 183 | #endif |
196 | 184 | ||
197 | #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) | 185 | #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) |
198 | #define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr))) | 186 | #define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr))) |
199 | 187 | ||
200 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 188 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
201 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 189 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
202 | 190 | ||
203 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) | 191 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) |
204 | #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) | 192 | #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) |
205 | 193 | ||
206 | #include <asm-generic/memory_model.h> | 194 | #include <asm-generic/memory_model.h> |
207 | #include <asm-generic/getorder.h> | 195 | #include <asm-generic/getorder.h> |
208 | 196 | ||
209 | #endif /* _ASM_PAGE_H */ | 197 | #endif /* _ASM_PAGE_H */ |
210 | 198 |
arch/mips/include/asm/pgalloc.h
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle | 6 | * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle |
7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. |
8 | */ | 8 | */ |
9 | #ifndef _ASM_PGALLOC_H | 9 | #ifndef _ASM_PGALLOC_H |
10 | #define _ASM_PGALLOC_H | 10 | #define _ASM_PGALLOC_H |
11 | 11 | ||
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | 15 | ||
16 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | 16 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
17 | pte_t *pte) | 17 | pte_t *pte) |
18 | { | 18 | { |
19 | set_pmd(pmd, __pmd((unsigned long)pte)); | 19 | set_pmd(pmd, __pmd((unsigned long)pte)); |
20 | } | 20 | } |
21 | 21 | ||
22 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | 22 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
23 | pgtable_t pte) | 23 | pgtable_t pte) |
24 | { | 24 | { |
25 | set_pmd(pmd, __pmd((unsigned long)page_address(pte))); | 25 | set_pmd(pmd, __pmd((unsigned long)page_address(pte))); |
26 | } | 26 | } |
27 | #define pmd_pgtable(pmd) pmd_page(pmd) | 27 | #define pmd_pgtable(pmd) pmd_page(pmd) |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Initialize a new pmd table with invalid pointers. | 30 | * Initialize a new pmd table with invalid pointers. |
31 | */ | 31 | */ |
32 | extern void pmd_init(unsigned long page, unsigned long pagetable); | 32 | extern void pmd_init(unsigned long page, unsigned long pagetable); |
33 | 33 | ||
34 | #ifdef CONFIG_64BIT | 34 | #ifndef __PAGETABLE_PMD_FOLDED |
35 | 35 | ||
36 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | 36 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
37 | { | 37 | { |
38 | set_pud(pud, __pud((unsigned long)pmd)); | 38 | set_pud(pud, __pud((unsigned long)pmd)); |
39 | } | 39 | } |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Initialize a new pgd / pmd table with invalid pointers. | 43 | * Initialize a new pgd / pmd table with invalid pointers. |
44 | */ | 44 | */ |
45 | extern void pgd_init(unsigned long page); | 45 | extern void pgd_init(unsigned long page); |
46 | 46 | ||
47 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 47 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
48 | { | 48 | { |
49 | pgd_t *ret, *init; | 49 | pgd_t *ret, *init; |
50 | 50 | ||
51 | ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); | 51 | ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); |
52 | if (ret) { | 52 | if (ret) { |
53 | init = pgd_offset(&init_mm, 0UL); | 53 | init = pgd_offset(&init_mm, 0UL); |
54 | pgd_init((unsigned long)ret); | 54 | pgd_init((unsigned long)ret); |
55 | memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | 55 | memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, |
56 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 56 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
57 | } | 57 | } |
58 | 58 | ||
59 | return ret; | 59 | return ret; |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 62 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
63 | { | 63 | { |
64 | free_pages((unsigned long)pgd, PGD_ORDER); | 64 | free_pages((unsigned long)pgd, PGD_ORDER); |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 67 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
68 | unsigned long address) | 68 | unsigned long address) |
69 | { | 69 | { |
70 | pte_t *pte; | 70 | pte_t *pte; |
71 | 71 | ||
72 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); | 72 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); |
73 | 73 | ||
74 | return pte; | 74 | return pte; |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | 77 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
78 | unsigned long address) | 78 | unsigned long address) |
79 | { | 79 | { |
80 | struct page *pte; | 80 | struct page *pte; |
81 | 81 | ||
82 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | 82 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); |
83 | if (pte) { | 83 | if (pte) { |
84 | clear_highpage(pte); | 84 | clear_highpage(pte); |
85 | pgtable_page_ctor(pte); | 85 | pgtable_page_ctor(pte); |
86 | } | 86 | } |
87 | return pte; | 87 | return pte; |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 90 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
91 | { | 91 | { |
92 | free_pages((unsigned long)pte, PTE_ORDER); | 92 | free_pages((unsigned long)pte, PTE_ORDER); |
93 | } | 93 | } |
94 | 94 | ||
95 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | 95 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) |
96 | { | 96 | { |
97 | pgtable_page_dtor(pte); | 97 | pgtable_page_dtor(pte); |
98 | __free_pages(pte, PTE_ORDER); | 98 | __free_pages(pte, PTE_ORDER); |
99 | } | 99 | } |
100 | 100 | ||
101 | #define __pte_free_tlb(tlb,pte,address) \ | 101 | #define __pte_free_tlb(tlb,pte,address) \ |
102 | do { \ | 102 | do { \ |
103 | pgtable_page_dtor(pte); \ | 103 | pgtable_page_dtor(pte); \ |
104 | tlb_remove_page((tlb), pte); \ | 104 | tlb_remove_page((tlb), pte); \ |
105 | } while (0) | 105 | } while (0) |
106 | 106 | ||
107 | #ifdef CONFIG_64BIT | 107 | #ifndef __PAGETABLE_PMD_FOLDED |
108 | 108 | ||
109 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 109 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
110 | { | 110 | { |
111 | pmd_t *pmd; | 111 | pmd_t *pmd; |
112 | 112 | ||
113 | pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); | 113 | pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); |
114 | if (pmd) | 114 | if (pmd) |
115 | pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); | 115 | pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); |
116 | return pmd; | 116 | return pmd; |
117 | } | 117 | } |
118 | 118 | ||
119 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 119 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
120 | { | 120 | { |
121 | free_pages((unsigned long)pmd, PMD_ORDER); | 121 | free_pages((unsigned long)pmd, PMD_ORDER); |
122 | } | 122 | } |
123 | 123 | ||
124 | #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) | 124 | #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) |
125 | 125 | ||
126 | #endif | 126 | #endif |
127 | 127 | ||
128 | #define check_pgt_cache() do { } while (0) | 128 | #define check_pgt_cache() do { } while (0) |
129 | 129 | ||
130 | extern void pagetable_init(void); | 130 | extern void pagetable_init(void); |
131 | 131 | ||
132 | #endif /* _ASM_PGALLOC_H */ | 132 | #endif /* _ASM_PGALLOC_H */ |
133 | 133 |
arch/mips/include/asm/pgtable-64.h
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle | 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle |
7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. |
8 | */ | 8 | */ |
9 | #ifndef _ASM_PGTABLE_64_H | 9 | #ifndef _ASM_PGTABLE_64_H |
10 | #define _ASM_PGTABLE_64_H | 10 | #define _ASM_PGTABLE_64_H |
11 | 11 | ||
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | 13 | ||
14 | #include <asm/addrspace.h> | 14 | #include <asm/addrspace.h> |
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/cachectl.h> | 16 | #include <asm/cachectl.h> |
17 | #include <asm/fixmap.h> | 17 | #include <asm/fixmap.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PAGE_SIZE_64KB | ||
20 | #include <asm-generic/pgtable-nopmd.h> | ||
21 | #else | ||
19 | #include <asm-generic/pgtable-nopud.h> | 22 | #include <asm-generic/pgtable-nopud.h> |
23 | #endif | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * Each address space has 2 4K pages as its page directory, giving 1024 | 26 | * Each address space has 2 4K pages as its page directory, giving 1024 |
23 | * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a | 27 | * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a |
24 | * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page | 28 | * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page |
25 | * tables. Each page table is also a single 4K page, giving 512 (== | 29 | * tables. Each page table is also a single 4K page, giving 512 (== |
26 | * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to | 30 | * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to |
27 | * invalid_pmd_table, each pmd entry is initialized to point to | 31 | * invalid_pmd_table, each pmd entry is initialized to point to |
28 | * invalid_pte_table, each pte is initialized to 0. When memory is low, | 32 | * invalid_pte_table, each pte is initialized to 0. When memory is low, |
29 | * and a pmd table or a page table allocation fails, empty_bad_pmd_table | 33 | * and a pmd table or a page table allocation fails, empty_bad_pmd_table |
30 | * and empty_bad_page_table is returned back to higher layer code, so | 34 | * and empty_bad_page_table is returned back to higher layer code, so |
31 | * that the failure is recognized later on. Linux does not seem to | 35 | * that the failure is recognized later on. Linux does not seem to |
32 | * handle these failures very well though. The empty_bad_page_table has | 36 | * handle these failures very well though. The empty_bad_page_table has |
33 | * invalid pte entries in it, to force page faults. | 37 | * invalid pte entries in it, to force page faults. |
34 | * | 38 | * |
35 | * Kernel mappings: kernel mappings are held in the swapper_pg_table. | 39 | * Kernel mappings: kernel mappings are held in the swapper_pg_table. |
36 | * The layout is identical to userspace except it's indexed with the | 40 | * The layout is identical to userspace except it's indexed with the |
37 | * fault address - VMALLOC_START. | 41 | * fault address - VMALLOC_START. |
38 | */ | 42 | */ |
39 | 43 | ||
44 | |||
45 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
46 | #ifdef __PAGETABLE_PMD_FOLDED | ||
47 | #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3) | ||
48 | #else | ||
49 | |||
40 | /* PMD_SHIFT determines the size of the area a second-level page table can map */ | 50 | /* PMD_SHIFT determines the size of the area a second-level page table can map */ |
41 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) | 51 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) |
42 | #define PMD_SIZE (1UL << PMD_SHIFT) | 52 | #define PMD_SIZE (1UL << PMD_SHIFT) |
43 | #define PMD_MASK (~(PMD_SIZE-1)) | 53 | #define PMD_MASK (~(PMD_SIZE-1)) |
44 | 54 | ||
45 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 55 | |
46 | #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) | 56 | #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) |
57 | #endif | ||
47 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 58 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
48 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 59 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
49 | 60 | ||
50 | /* | 61 | /* |
51 | * For 4kB page size we use a 3 level page tree and an 8kB pud, which | 62 | * For 4kB page size we use a 3 level page tree and an 8kB pud, which |
52 | * permits us mapping 40 bits of virtual address space. | 63 | * permits us mapping 40 bits of virtual address space. |
53 | * | 64 | * |
54 | * We used to implement 41 bits by having an order 1 pmd level but that seemed | 65 | * We used to implement 41 bits by having an order 1 pmd level but that seemed |
55 | * rather pointless. | 66 | * rather pointless. |
56 | * | 67 | * |
57 | * For 8kB page size we use a 3 level page tree which permits a total of | 68 | * For 8kB page size we use a 3 level page tree which permits a total of |
58 | * 8TB of address space. Alternatively a 33-bit / 8GB organization using | 69 | * 8TB of address space. Alternatively a 33-bit / 8GB organization using |
59 | * two levels would be easy to implement. | 70 | * two levels would be easy to implement. |
60 | * | 71 | * |
61 | * For 16kB page size we use a 2 level page tree which permits a total of | 72 | * For 16kB page size we use a 2 level page tree which permits a total of |
62 | * 36 bits of virtual address space. We could add a third level but it seems | 73 | * 36 bits of virtual address space. We could add a third level but it seems |
63 | * like at the moment there's no need for this. | 74 | * like at the moment there's no need for this. |
64 | * | 75 | * |
65 | * For 64kB page size we use a 2 level page table tree for a total of 42 bits | 76 | * For 64kB page size we use a 2 level page table tree for a total of 42 bits |
66 | * of virtual address space. | 77 | * of virtual address space. |
67 | */ | 78 | */ |
68 | #ifdef CONFIG_PAGE_SIZE_4KB | 79 | #ifdef CONFIG_PAGE_SIZE_4KB |
69 | #define PGD_ORDER 1 | 80 | #define PGD_ORDER 1 |
70 | #define PUD_ORDER aieeee_attempt_to_allocate_pud | 81 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
71 | #define PMD_ORDER 0 | 82 | #define PMD_ORDER 0 |
72 | #define PTE_ORDER 0 | 83 | #define PTE_ORDER 0 |
73 | #endif | 84 | #endif |
74 | #ifdef CONFIG_PAGE_SIZE_8KB | 85 | #ifdef CONFIG_PAGE_SIZE_8KB |
75 | #define PGD_ORDER 0 | 86 | #define PGD_ORDER 0 |
76 | #define PUD_ORDER aieeee_attempt_to_allocate_pud | 87 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
77 | #define PMD_ORDER 0 | 88 | #define PMD_ORDER 0 |
78 | #define PTE_ORDER 0 | 89 | #define PTE_ORDER 0 |
79 | #endif | 90 | #endif |
80 | #ifdef CONFIG_PAGE_SIZE_16KB | 91 | #ifdef CONFIG_PAGE_SIZE_16KB |
81 | #define PGD_ORDER 0 | 92 | #define PGD_ORDER 0 |
82 | #define PUD_ORDER aieeee_attempt_to_allocate_pud | 93 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
83 | #define PMD_ORDER 0 | 94 | #define PMD_ORDER 0 |
84 | #define PTE_ORDER 0 | 95 | #define PTE_ORDER 0 |
85 | #endif | 96 | #endif |
86 | #ifdef CONFIG_PAGE_SIZE_32KB | 97 | #ifdef CONFIG_PAGE_SIZE_32KB |
87 | #define PGD_ORDER 0 | 98 | #define PGD_ORDER 0 |
88 | #define PUD_ORDER aieeee_attempt_to_allocate_pud | 99 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
89 | #define PMD_ORDER 0 | 100 | #define PMD_ORDER 0 |
90 | #define PTE_ORDER 0 | 101 | #define PTE_ORDER 0 |
91 | #endif | 102 | #endif |
92 | #ifdef CONFIG_PAGE_SIZE_64KB | 103 | #ifdef CONFIG_PAGE_SIZE_64KB |
93 | #define PGD_ORDER 0 | 104 | #define PGD_ORDER 0 |
94 | #define PUD_ORDER aieeee_attempt_to_allocate_pud | 105 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
95 | #define PMD_ORDER 0 | 106 | #define PMD_ORDER aieeee_attempt_to_allocate_pmd |
96 | #define PTE_ORDER 0 | 107 | #define PTE_ORDER 0 |
97 | #endif | 108 | #endif |
98 | 109 | ||
99 | #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) | 110 | #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) |
111 | #ifndef __PAGETABLE_PMD_FOLDED | ||
100 | #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) | 112 | #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) |
113 | #endif | ||
101 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) | 114 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) |
102 | 115 | ||
103 | #if PGDIR_SIZE >= TASK_SIZE | 116 | #if PGDIR_SIZE >= TASK_SIZE |
104 | #define USER_PTRS_PER_PGD (1) | 117 | #define USER_PTRS_PER_PGD (1) |
105 | #else | 118 | #else |
106 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | 119 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
107 | #endif | 120 | #endif |
108 | #define FIRST_USER_ADDRESS 0UL | 121 | #define FIRST_USER_ADDRESS 0UL |
109 | 122 | ||
110 | #define VMALLOC_START MAP_BASE | 123 | #define VMALLOC_START MAP_BASE |
111 | #define VMALLOC_END \ | 124 | #define VMALLOC_END \ |
112 | (VMALLOC_START + \ | 125 | (VMALLOC_START + \ |
113 | min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \ | 126 | min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \ |
114 | (1UL << cpu_vmbits)) - (1UL << 32)) | 127 | (1UL << cpu_vmbits)) - (1UL << 32)) |
115 | 128 | ||
116 | #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \ | 129 | #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \ |
117 | VMALLOC_START != CKSSEG | 130 | VMALLOC_START != CKSSEG |
118 | /* Load modules into 32bit-compatible segment. */ | 131 | /* Load modules into 32bit-compatible segment. */ |
119 | #define MODULE_START CKSSEG | 132 | #define MODULE_START CKSSEG |
120 | #define MODULE_END (FIXADDR_START-2*PAGE_SIZE) | 133 | #define MODULE_END (FIXADDR_START-2*PAGE_SIZE) |
121 | #endif | 134 | #endif |
122 | 135 | ||
123 | #define pte_ERROR(e) \ | 136 | #define pte_ERROR(e) \ |
124 | printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) | 137 | printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) |
138 | #ifndef __PAGETABLE_PMD_FOLDED | ||
125 | #define pmd_ERROR(e) \ | 139 | #define pmd_ERROR(e) \ |
126 | printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) | 140 | printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) |
141 | #endif | ||
127 | #define pgd_ERROR(e) \ | 142 | #define pgd_ERROR(e) \ |
128 | printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) | 143 | printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) |
129 | 144 | ||
130 | extern pte_t invalid_pte_table[PTRS_PER_PTE]; | 145 | extern pte_t invalid_pte_table[PTRS_PER_PTE]; |
131 | extern pte_t empty_bad_page_table[PTRS_PER_PTE]; | 146 | extern pte_t empty_bad_page_table[PTRS_PER_PTE]; |
147 | |||
148 | |||
149 | #ifndef __PAGETABLE_PMD_FOLDED | ||
150 | /* | ||
151 | * For 3-level pagetables we defines these ourselves, for 2-level the | ||
152 | * definitions are supplied by <asm-generic/pgtable-nopmd.h>. | ||
153 | */ | ||
154 | typedef struct { unsigned long pmd; } pmd_t; | ||
155 | #define pmd_val(x) ((x).pmd) | ||
156 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
157 | |||
158 | |||
132 | extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; | 159 | extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; |
133 | extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; | 160 | extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; |
161 | #endif | ||
134 | 162 | ||
135 | /* | 163 | /* |
136 | * Empty pgd/pmd entries point to the invalid_pte_table. | 164 | * Empty pgd/pmd entries point to the invalid_pte_table. |
137 | */ | 165 | */ |
138 | static inline int pmd_none(pmd_t pmd) | 166 | static inline int pmd_none(pmd_t pmd) |
139 | { | 167 | { |
140 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; | 168 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; |
141 | } | 169 | } |
142 | 170 | ||
143 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) | 171 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
144 | 172 | ||
145 | static inline int pmd_present(pmd_t pmd) | 173 | static inline int pmd_present(pmd_t pmd) |
146 | { | 174 | { |
147 | return pmd_val(pmd) != (unsigned long) invalid_pte_table; | 175 | return pmd_val(pmd) != (unsigned long) invalid_pte_table; |
148 | } | 176 | } |
149 | 177 | ||
150 | static inline void pmd_clear(pmd_t *pmdp) | 178 | static inline void pmd_clear(pmd_t *pmdp) |
151 | { | 179 | { |
152 | pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); | 180 | pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); |
153 | } | 181 | } |
182 | #ifndef __PAGETABLE_PMD_FOLDED | ||
154 | 183 | ||
155 | /* | 184 | /* |
156 | * Empty pud entries point to the invalid_pmd_table. | 185 | * Empty pud entries point to the invalid_pmd_table. |
157 | */ | 186 | */ |
158 | static inline int pud_none(pud_t pud) | 187 | static inline int pud_none(pud_t pud) |
159 | { | 188 | { |
160 | return pud_val(pud) == (unsigned long) invalid_pmd_table; | 189 | return pud_val(pud) == (unsigned long) invalid_pmd_table; |
161 | } | 190 | } |
162 | 191 | ||
163 | static inline int pud_bad(pud_t pud) | 192 | static inline int pud_bad(pud_t pud) |
164 | { | 193 | { |
165 | return pud_val(pud) & ~PAGE_MASK; | 194 | return pud_val(pud) & ~PAGE_MASK; |
166 | } | 195 | } |
167 | 196 | ||
168 | static inline int pud_present(pud_t pud) | 197 | static inline int pud_present(pud_t pud) |
169 | { | 198 | { |
170 | return pud_val(pud) != (unsigned long) invalid_pmd_table; | 199 | return pud_val(pud) != (unsigned long) invalid_pmd_table; |
171 | } | 200 | } |
172 | 201 | ||
173 | static inline void pud_clear(pud_t *pudp) | 202 | static inline void pud_clear(pud_t *pudp) |
174 | { | 203 | { |
175 | pud_val(*pudp) = ((unsigned long) invalid_pmd_table); | 204 | pud_val(*pudp) = ((unsigned long) invalid_pmd_table); |
176 | } | 205 | } |
206 | #endif | ||
177 | 207 | ||
178 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 208 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
179 | 209 | ||
180 | #ifdef CONFIG_CPU_VR41XX | 210 | #ifdef CONFIG_CPU_VR41XX |
181 | #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) | 211 | #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) |
182 | #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) | 212 | #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) |
183 | #else | 213 | #else |
184 | #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) | 214 | #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) |
185 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 215 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
186 | #endif | 216 | #endif |
187 | 217 | ||
188 | #define __pgd_offset(address) pgd_index(address) | 218 | #define __pgd_offset(address) pgd_index(address) |
189 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | 219 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
190 | #define __pmd_offset(address) pmd_index(address) | 220 | #define __pmd_offset(address) pmd_index(address) |
191 | 221 | ||
192 | /* to find an entry in a kernel page-table-directory */ | 222 | /* to find an entry in a kernel page-table-directory */ |
193 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 223 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
194 | 224 | ||
195 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 225 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
196 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 226 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
197 | 227 | ||
198 | /* to find an entry in a page-table-directory */ | 228 | /* to find an entry in a page-table-directory */ |
199 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | 229 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
200 | 230 | ||
231 | #ifndef __PAGETABLE_PMD_FOLDED | ||
201 | static inline unsigned long pud_page_vaddr(pud_t pud) | 232 | static inline unsigned long pud_page_vaddr(pud_t pud) |
202 | { | 233 | { |
203 | return pud_val(pud); | 234 | return pud_val(pud); |
204 | } | 235 | } |
205 | #define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) | 236 | #define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) |
206 | #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) | 237 | #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) |
207 | 238 | ||
208 | /* Find an entry in the second-level page table.. */ | 239 | /* Find an entry in the second-level page table.. */ |
209 | static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) | 240 | static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) |
210 | { | 241 | { |
211 | return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address); | 242 | return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address); |
212 | } | 243 | } |
244 | #endif | ||
213 | 245 | ||
214 | /* Find an entry in the third-level page table.. */ | 246 | /* Find an entry in the third-level page table.. */ |
215 | #define __pte_offset(address) \ | 247 | #define __pte_offset(address) \ |
216 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 248 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
217 | #define pte_offset(dir, address) \ | 249 | #define pte_offset(dir, address) \ |
218 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | 250 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
219 | #define pte_offset_kernel(dir, address) \ | 251 | #define pte_offset_kernel(dir, address) \ |
220 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | 252 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
221 | #define pte_offset_map(dir, address) \ | 253 | #define pte_offset_map(dir, address) \ |
222 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 254 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
223 | #define pte_offset_map_nested(dir, address) \ | 255 | #define pte_offset_map_nested(dir, address) \ |
224 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 256 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
225 | #define pte_unmap(pte) ((void)(pte)) | 257 | #define pte_unmap(pte) ((void)(pte)) |
226 | #define pte_unmap_nested(pte) ((void)(pte)) | 258 | #define pte_unmap_nested(pte) ((void)(pte)) |
227 | 259 | ||
228 | /* | 260 | /* |
229 | * Initialize a new pgd / pmd table with invalid pointers. | 261 | * Initialize a new pgd / pmd table with invalid pointers. |
230 | */ | 262 | */ |
231 | extern void pgd_init(unsigned long page); | 263 | extern void pgd_init(unsigned long page); |
232 | extern void pmd_init(unsigned long page, unsigned long pagetable); | 264 | extern void pmd_init(unsigned long page, unsigned long pagetable); |
233 | 265 | ||
234 | /* | 266 | /* |
235 | * Non-present pages: high 24 bits are offset, next 8 bits type, | 267 | * Non-present pages: high 24 bits are offset, next 8 bits type, |
236 | * low 32 bits zero. | 268 | * low 32 bits zero. |
237 | */ | 269 | */ |
238 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | 270 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
239 | { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } | 271 | { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } |
240 | 272 | ||
241 | #define __swp_type(x) (((x).val >> 32) & 0xff) | 273 | #define __swp_type(x) (((x).val >> 32) & 0xff) |
242 | #define __swp_offset(x) ((x).val >> 40) | 274 | #define __swp_offset(x) ((x).val >> 40) |
243 | #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) | 275 | #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) |
244 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 276 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
245 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 277 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
246 | 278 | ||
247 | /* | 279 | /* |
248 | * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to | 280 | * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to |
249 | * make things easier, and only use the upper 56 bits for the page offset... | 281 | * make things easier, and only use the upper 56 bits for the page offset... |
250 | */ | 282 | */ |
251 | #define PTE_FILE_MAX_BITS 56 | 283 | #define PTE_FILE_MAX_BITS 56 |
252 | 284 | ||
253 | #define pte_to_pgoff(_pte) ((_pte).pte >> 8) | 285 | #define pte_to_pgoff(_pte) ((_pte).pte >> 8) |
254 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE }) | 286 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE }) |
255 | 287 | ||
256 | #endif /* _ASM_PGTABLE_64_H */ | 288 | #endif /* _ASM_PGTABLE_64_H */ |
257 | 289 |
arch/mips/include/asm/pgtable.h
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 Ralf Baechle | 6 | * Copyright (C) 2003 Ralf Baechle |
7 | */ | 7 | */ |
8 | #ifndef _ASM_PGTABLE_H | 8 | #ifndef _ASM_PGTABLE_H |
9 | #define _ASM_PGTABLE_H | 9 | #define _ASM_PGTABLE_H |
10 | 10 | ||
11 | #ifdef CONFIG_32BIT | 11 | #ifdef CONFIG_32BIT |
12 | #include <asm/pgtable-32.h> | 12 | #include <asm/pgtable-32.h> |
13 | #endif | 13 | #endif |
14 | #ifdef CONFIG_64BIT | 14 | #ifdef CONFIG_64BIT |
15 | #include <asm/pgtable-64.h> | 15 | #include <asm/pgtable-64.h> |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | #include <asm/pgtable-bits.h> | 19 | #include <asm/pgtable-bits.h> |
20 | 20 | ||
21 | struct mm_struct; | 21 | struct mm_struct; |
22 | struct vm_area_struct; | 22 | struct vm_area_struct; |
23 | 23 | ||
24 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) | 24 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) |
25 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | 25 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
26 | _page_cachable_default) | 26 | _page_cachable_default) |
27 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ | 27 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
28 | _page_cachable_default) | 28 | _page_cachable_default) |
29 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ | 29 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
30 | _page_cachable_default) | 30 | _page_cachable_default) |
31 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ | 31 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
32 | _PAGE_GLOBAL | _page_cachable_default) | 32 | _PAGE_GLOBAL | _page_cachable_default) |
33 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | 33 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
34 | _page_cachable_default) | 34 | _page_cachable_default) |
35 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ | 35 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ |
36 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) | 36 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * MIPS can't do page protection for execute, and considers that the same like | 39 | * MIPS can't do page protection for execute, and considers that the same like |
40 | * read. Also, write permissions imply read permissions. This is the closest | 40 | * read. Also, write permissions imply read permissions. This is the closest |
41 | * we can get by reasonable means.. | 41 | * we can get by reasonable means.. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Dummy values to fill the table in mmap.c | 45 | * Dummy values to fill the table in mmap.c |
46 | * The real values will be generated at runtime | 46 | * The real values will be generated at runtime |
47 | */ | 47 | */ |
48 | #define __P000 __pgprot(0) | 48 | #define __P000 __pgprot(0) |
49 | #define __P001 __pgprot(0) | 49 | #define __P001 __pgprot(0) |
50 | #define __P010 __pgprot(0) | 50 | #define __P010 __pgprot(0) |
51 | #define __P011 __pgprot(0) | 51 | #define __P011 __pgprot(0) |
52 | #define __P100 __pgprot(0) | 52 | #define __P100 __pgprot(0) |
53 | #define __P101 __pgprot(0) | 53 | #define __P101 __pgprot(0) |
54 | #define __P110 __pgprot(0) | 54 | #define __P110 __pgprot(0) |
55 | #define __P111 __pgprot(0) | 55 | #define __P111 __pgprot(0) |
56 | 56 | ||
57 | #define __S000 __pgprot(0) | 57 | #define __S000 __pgprot(0) |
58 | #define __S001 __pgprot(0) | 58 | #define __S001 __pgprot(0) |
59 | #define __S010 __pgprot(0) | 59 | #define __S010 __pgprot(0) |
60 | #define __S011 __pgprot(0) | 60 | #define __S011 __pgprot(0) |
61 | #define __S100 __pgprot(0) | 61 | #define __S100 __pgprot(0) |
62 | #define __S101 __pgprot(0) | 62 | #define __S101 __pgprot(0) |
63 | #define __S110 __pgprot(0) | 63 | #define __S110 __pgprot(0) |
64 | #define __S111 __pgprot(0) | 64 | #define __S111 __pgprot(0) |
65 | 65 | ||
66 | extern unsigned long _page_cachable_default; | 66 | extern unsigned long _page_cachable_default; |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * ZERO_PAGE is a global shared page that is always zero; used | 69 | * ZERO_PAGE is a global shared page that is always zero; used |
70 | * for zero-mapped memory areas etc.. | 70 | * for zero-mapped memory areas etc.. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | extern unsigned long empty_zero_page; | 73 | extern unsigned long empty_zero_page; |
74 | extern unsigned long zero_page_mask; | 74 | extern unsigned long zero_page_mask; |
75 | 75 | ||
76 | #define ZERO_PAGE(vaddr) \ | 76 | #define ZERO_PAGE(vaddr) \ |
77 | (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) | 77 | (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) |
78 | 78 | ||
79 | #define is_zero_pfn is_zero_pfn | 79 | #define is_zero_pfn is_zero_pfn |
80 | static inline int is_zero_pfn(unsigned long pfn) | 80 | static inline int is_zero_pfn(unsigned long pfn) |
81 | { | 81 | { |
82 | extern unsigned long zero_pfn; | 82 | extern unsigned long zero_pfn; |
83 | unsigned long offset_from_zero_pfn = pfn - zero_pfn; | 83 | unsigned long offset_from_zero_pfn = pfn - zero_pfn; |
84 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); | 84 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); |
85 | } | 85 | } |
86 | 86 | ||
87 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) | 87 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) |
88 | 88 | ||
89 | extern void paging_init(void); | 89 | extern void paging_init(void); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Conversion functions: convert a page and protection to a page entry, | 92 | * Conversion functions: convert a page and protection to a page entry, |
93 | * and a page entry and page directory to the page they refer to. | 93 | * and a page entry and page directory to the page they refer to. |
94 | */ | 94 | */ |
95 | #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) | 95 | #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) |
96 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) | 96 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) |
97 | #define pmd_page_vaddr(pmd) pmd_val(pmd) | 97 | #define pmd_page_vaddr(pmd) pmd_val(pmd) |
98 | 98 | ||
99 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 99 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
100 | 100 | ||
101 | #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) | 101 | #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) |
102 | #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) | 102 | #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) |
103 | 103 | ||
104 | static inline void set_pte(pte_t *ptep, pte_t pte) | 104 | static inline void set_pte(pte_t *ptep, pte_t pte) |
105 | { | 105 | { |
106 | ptep->pte_high = pte.pte_high; | 106 | ptep->pte_high = pte.pte_high; |
107 | smp_wmb(); | 107 | smp_wmb(); |
108 | ptep->pte_low = pte.pte_low; | 108 | ptep->pte_low = pte.pte_low; |
109 | //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low); | 109 | //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low); |
110 | 110 | ||
111 | if (pte.pte_low & _PAGE_GLOBAL) { | 111 | if (pte.pte_low & _PAGE_GLOBAL) { |
112 | pte_t *buddy = ptep_buddy(ptep); | 112 | pte_t *buddy = ptep_buddy(ptep); |
113 | /* | 113 | /* |
114 | * Make sure the buddy is global too (if it's !none, | 114 | * Make sure the buddy is global too (if it's !none, |
115 | * it better already be global) | 115 | * it better already be global) |
116 | */ | 116 | */ |
117 | if (pte_none(*buddy)) { | 117 | if (pte_none(*buddy)) { |
118 | buddy->pte_low |= _PAGE_GLOBAL; | 118 | buddy->pte_low |= _PAGE_GLOBAL; |
119 | buddy->pte_high |= _PAGE_GLOBAL; | 119 | buddy->pte_high |= _PAGE_GLOBAL; |
120 | } | 120 | } |
121 | } | 121 | } |
122 | } | 122 | } |
123 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | 123 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) |
124 | 124 | ||
125 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 125 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
126 | { | 126 | { |
127 | pte_t null = __pte(0); | 127 | pte_t null = __pte(0); |
128 | 128 | ||
129 | /* Preserve global status for the pair */ | 129 | /* Preserve global status for the pair */ |
130 | if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) | 130 | if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) |
131 | null.pte_low = null.pte_high = _PAGE_GLOBAL; | 131 | null.pte_low = null.pte_high = _PAGE_GLOBAL; |
132 | 132 | ||
133 | set_pte_at(mm, addr, ptep, null); | 133 | set_pte_at(mm, addr, ptep, null); |
134 | } | 134 | } |
135 | #else | 135 | #else |
136 | 136 | ||
137 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) | 137 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) |
138 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | 138 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Certain architectures need to do special things when pte's | 141 | * Certain architectures need to do special things when pte's |
142 | * within a page table are directly modified. Thus, the following | 142 | * within a page table are directly modified. Thus, the following |
143 | * hook is made available. | 143 | * hook is made available. |
144 | */ | 144 | */ |
145 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 145 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
146 | { | 146 | { |
147 | *ptep = pteval; | 147 | *ptep = pteval; |
148 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) | 148 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
149 | if (pte_val(pteval) & _PAGE_GLOBAL) { | 149 | if (pte_val(pteval) & _PAGE_GLOBAL) { |
150 | pte_t *buddy = ptep_buddy(ptep); | 150 | pte_t *buddy = ptep_buddy(ptep); |
151 | /* | 151 | /* |
152 | * Make sure the buddy is global too (if it's !none, | 152 | * Make sure the buddy is global too (if it's !none, |
153 | * it better already be global) | 153 | * it better already be global) |
154 | */ | 154 | */ |
155 | if (pte_none(*buddy)) | 155 | if (pte_none(*buddy)) |
156 | pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; | 156 | pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; |
157 | } | 157 | } |
158 | #endif | 158 | #endif |
159 | } | 159 | } |
160 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | 160 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) |
161 | 161 | ||
162 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 162 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
163 | { | 163 | { |
164 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) | 164 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
165 | /* Preserve global status for the pair */ | 165 | /* Preserve global status for the pair */ |
166 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) | 166 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) |
167 | set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); | 167 | set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); |
168 | else | 168 | else |
169 | #endif | 169 | #endif |
170 | set_pte_at(mm, addr, ptep, __pte(0)); | 170 | set_pte_at(mm, addr, ptep, __pte(0)); |
171 | } | 171 | } |
172 | #endif | 172 | #endif |
173 | 173 | ||
174 | /* | 174 | /* |
175 | * (pmds are folded into puds so this doesn't get actually called, | 175 | * (pmds are folded into puds so this doesn't get actually called, |
176 | * but the define is needed for a generic inline function.) | 176 | * but the define is needed for a generic inline function.) |
177 | */ | 177 | */ |
178 | #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) | 178 | #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) |
179 | 179 | ||
180 | #ifdef CONFIG_64BIT | 180 | #ifndef __PAGETABLE_PMD_FOLDED |
181 | /* | 181 | /* |
182 | * (puds are folded into pgds so this doesn't get actually called, | 182 | * (puds are folded into pgds so this doesn't get actually called, |
183 | * but the define is needed for a generic inline function.) | 183 | * but the define is needed for a generic inline function.) |
184 | */ | 184 | */ |
185 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) | 185 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) |
186 | #endif | 186 | #endif |
187 | 187 | ||
188 | #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) | 188 | #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) |
189 | #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) | 189 | #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) |
190 | #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) | 190 | #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * We used to declare this array with size but gcc 3.3 and older are not able | 193 | * We used to declare this array with size but gcc 3.3 and older are not able |
194 | * to find that this expression is a constant, so the size is dropped. | 194 | * to find that this expression is a constant, so the size is dropped. |
195 | */ | 195 | */ |
196 | extern pgd_t swapper_pg_dir[]; | 196 | extern pgd_t swapper_pg_dir[]; |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * The following only work if pte_present() is true. | 199 | * The following only work if pte_present() is true. |
200 | * Undefined behaviour if not.. | 200 | * Undefined behaviour if not.. |
201 | */ | 201 | */ |
202 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 202 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
203 | static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } | 203 | static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } |
204 | static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } | 204 | static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } |
205 | static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } | 205 | static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } |
206 | static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; } | 206 | static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; } |
207 | 207 | ||
208 | static inline pte_t pte_wrprotect(pte_t pte) | 208 | static inline pte_t pte_wrprotect(pte_t pte) |
209 | { | 209 | { |
210 | pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 210 | pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); |
211 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 211 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
212 | return pte; | 212 | return pte; |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline pte_t pte_mkclean(pte_t pte) | 215 | static inline pte_t pte_mkclean(pte_t pte) |
216 | { | 216 | { |
217 | pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); | 217 | pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); |
218 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 218 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
219 | return pte; | 219 | return pte; |
220 | } | 220 | } |
221 | 221 | ||
222 | static inline pte_t pte_mkold(pte_t pte) | 222 | static inline pte_t pte_mkold(pte_t pte) |
223 | { | 223 | { |
224 | pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); | 224 | pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); |
225 | pte.pte_high &= ~_PAGE_SILENT_READ; | 225 | pte.pte_high &= ~_PAGE_SILENT_READ; |
226 | return pte; | 226 | return pte; |
227 | } | 227 | } |
228 | 228 | ||
229 | static inline pte_t pte_mkwrite(pte_t pte) | 229 | static inline pte_t pte_mkwrite(pte_t pte) |
230 | { | 230 | { |
231 | pte.pte_low |= _PAGE_WRITE; | 231 | pte.pte_low |= _PAGE_WRITE; |
232 | if (pte.pte_low & _PAGE_MODIFIED) { | 232 | if (pte.pte_low & _PAGE_MODIFIED) { |
233 | pte.pte_low |= _PAGE_SILENT_WRITE; | 233 | pte.pte_low |= _PAGE_SILENT_WRITE; |
234 | pte.pte_high |= _PAGE_SILENT_WRITE; | 234 | pte.pte_high |= _PAGE_SILENT_WRITE; |
235 | } | 235 | } |
236 | return pte; | 236 | return pte; |
237 | } | 237 | } |
238 | 238 | ||
239 | static inline pte_t pte_mkdirty(pte_t pte) | 239 | static inline pte_t pte_mkdirty(pte_t pte) |
240 | { | 240 | { |
241 | pte.pte_low |= _PAGE_MODIFIED; | 241 | pte.pte_low |= _PAGE_MODIFIED; |
242 | if (pte.pte_low & _PAGE_WRITE) { | 242 | if (pte.pte_low & _PAGE_WRITE) { |
243 | pte.pte_low |= _PAGE_SILENT_WRITE; | 243 | pte.pte_low |= _PAGE_SILENT_WRITE; |
244 | pte.pte_high |= _PAGE_SILENT_WRITE; | 244 | pte.pte_high |= _PAGE_SILENT_WRITE; |
245 | } | 245 | } |
246 | return pte; | 246 | return pte; |
247 | } | 247 | } |
248 | 248 | ||
249 | static inline pte_t pte_mkyoung(pte_t pte) | 249 | static inline pte_t pte_mkyoung(pte_t pte) |
250 | { | 250 | { |
251 | pte.pte_low |= _PAGE_ACCESSED; | 251 | pte.pte_low |= _PAGE_ACCESSED; |
252 | if (pte.pte_low & _PAGE_READ) { | 252 | if (pte.pte_low & _PAGE_READ) { |
253 | pte.pte_low |= _PAGE_SILENT_READ; | 253 | pte.pte_low |= _PAGE_SILENT_READ; |
254 | pte.pte_high |= _PAGE_SILENT_READ; | 254 | pte.pte_high |= _PAGE_SILENT_READ; |
255 | } | 255 | } |
256 | return pte; | 256 | return pte; |
257 | } | 257 | } |
258 | #else | 258 | #else |
259 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | 259 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } |
260 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } | 260 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } |
261 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 261 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
262 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 262 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
263 | 263 | ||
264 | static inline pte_t pte_wrprotect(pte_t pte) | 264 | static inline pte_t pte_wrprotect(pte_t pte) |
265 | { | 265 | { |
266 | pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 266 | pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); |
267 | return pte; | 267 | return pte; |
268 | } | 268 | } |
269 | 269 | ||
270 | static inline pte_t pte_mkclean(pte_t pte) | 270 | static inline pte_t pte_mkclean(pte_t pte) |
271 | { | 271 | { |
272 | pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); | 272 | pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); |
273 | return pte; | 273 | return pte; |
274 | } | 274 | } |
275 | 275 | ||
276 | static inline pte_t pte_mkold(pte_t pte) | 276 | static inline pte_t pte_mkold(pte_t pte) |
277 | { | 277 | { |
278 | pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); | 278 | pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); |
279 | return pte; | 279 | return pte; |
280 | } | 280 | } |
281 | 281 | ||
282 | static inline pte_t pte_mkwrite(pte_t pte) | 282 | static inline pte_t pte_mkwrite(pte_t pte) |
283 | { | 283 | { |
284 | pte_val(pte) |= _PAGE_WRITE; | 284 | pte_val(pte) |= _PAGE_WRITE; |
285 | if (pte_val(pte) & _PAGE_MODIFIED) | 285 | if (pte_val(pte) & _PAGE_MODIFIED) |
286 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 286 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
287 | return pte; | 287 | return pte; |
288 | } | 288 | } |
289 | 289 | ||
290 | static inline pte_t pte_mkdirty(pte_t pte) | 290 | static inline pte_t pte_mkdirty(pte_t pte) |
291 | { | 291 | { |
292 | pte_val(pte) |= _PAGE_MODIFIED; | 292 | pte_val(pte) |= _PAGE_MODIFIED; |
293 | if (pte_val(pte) & _PAGE_WRITE) | 293 | if (pte_val(pte) & _PAGE_WRITE) |
294 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 294 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
295 | return pte; | 295 | return pte; |
296 | } | 296 | } |
297 | 297 | ||
298 | static inline pte_t pte_mkyoung(pte_t pte) | 298 | static inline pte_t pte_mkyoung(pte_t pte) |
299 | { | 299 | { |
300 | pte_val(pte) |= _PAGE_ACCESSED; | 300 | pte_val(pte) |= _PAGE_ACCESSED; |
301 | if (pte_val(pte) & _PAGE_READ) | 301 | if (pte_val(pte) & _PAGE_READ) |
302 | pte_val(pte) |= _PAGE_SILENT_READ; | 302 | pte_val(pte) |= _PAGE_SILENT_READ; |
303 | return pte; | 303 | return pte; |
304 | } | 304 | } |
305 | 305 | ||
306 | #ifdef _PAGE_HUGE | 306 | #ifdef _PAGE_HUGE |
307 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } | 307 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } |
308 | 308 | ||
309 | static inline pte_t pte_mkhuge(pte_t pte) | 309 | static inline pte_t pte_mkhuge(pte_t pte) |
310 | { | 310 | { |
311 | pte_val(pte) |= _PAGE_HUGE; | 311 | pte_val(pte) |= _PAGE_HUGE; |
312 | return pte; | 312 | return pte; |
313 | } | 313 | } |
314 | #endif /* _PAGE_HUGE */ | 314 | #endif /* _PAGE_HUGE */ |
315 | #endif | 315 | #endif |
316 | static inline int pte_special(pte_t pte) { return 0; } | 316 | static inline int pte_special(pte_t pte) { return 0; } |
317 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 317 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * Macro to make mark a page protection value as "uncacheable". Note | 320 | * Macro to make mark a page protection value as "uncacheable". Note |
321 | * that "protection" is really a misnomer here as the protection value | 321 | * that "protection" is really a misnomer here as the protection value |
322 | * contains the memory attribute bits, dirty bits, and various other | 322 | * contains the memory attribute bits, dirty bits, and various other |
323 | * bits as well. | 323 | * bits as well. |
324 | */ | 324 | */ |
325 | #define pgprot_noncached pgprot_noncached | 325 | #define pgprot_noncached pgprot_noncached |
326 | 326 | ||
327 | static inline pgprot_t pgprot_noncached(pgprot_t _prot) | 327 | static inline pgprot_t pgprot_noncached(pgprot_t _prot) |
328 | { | 328 | { |
329 | unsigned long prot = pgprot_val(_prot); | 329 | unsigned long prot = pgprot_val(_prot); |
330 | 330 | ||
331 | prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; | 331 | prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; |
332 | 332 | ||
333 | return __pgprot(prot); | 333 | return __pgprot(prot); |
334 | } | 334 | } |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * Conversion functions: convert a page and protection to a page entry, | 337 | * Conversion functions: convert a page and protection to a page entry, |
338 | * and a page entry and page directory to the page they refer to. | 338 | * and a page entry and page directory to the page they refer to. |
339 | */ | 339 | */ |
340 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 340 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
341 | 341 | ||
342 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 342 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
343 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 343 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
344 | { | 344 | { |
345 | pte.pte_low &= _PAGE_CHG_MASK; | 345 | pte.pte_low &= _PAGE_CHG_MASK; |
346 | pte.pte_high &= ~0x3f; | 346 | pte.pte_high &= ~0x3f; |
347 | pte.pte_low |= pgprot_val(newprot); | 347 | pte.pte_low |= pgprot_val(newprot); |
348 | pte.pte_high |= pgprot_val(newprot) & 0x3f; | 348 | pte.pte_high |= pgprot_val(newprot) & 0x3f; |
349 | return pte; | 349 | return pte; |
350 | } | 350 | } |
351 | #else | 351 | #else |
352 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 352 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
353 | { | 353 | { |
354 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | 354 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
355 | } | 355 | } |
356 | #endif | 356 | #endif |
357 | 357 | ||
358 | 358 | ||
359 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, | 359 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, |
360 | pte_t pte); | 360 | pte_t pte); |
361 | extern void __update_cache(struct vm_area_struct *vma, unsigned long address, | 361 | extern void __update_cache(struct vm_area_struct *vma, unsigned long address, |
362 | pte_t pte); | 362 | pte_t pte); |
363 | 363 | ||
364 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 364 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
365 | unsigned long address, pte_t pte) | 365 | unsigned long address, pte_t pte) |
366 | { | 366 | { |
367 | __update_tlb(vma, address, pte); | 367 | __update_tlb(vma, address, pte); |
368 | __update_cache(vma, address, pte); | 368 | __update_cache(vma, address, pte); |
369 | } | 369 | } |
370 | 370 | ||
371 | #define kern_addr_valid(addr) (1) | 371 | #define kern_addr_valid(addr) (1) |
372 | 372 | ||
373 | #ifdef CONFIG_64BIT_PHYS_ADDR | 373 | #ifdef CONFIG_64BIT_PHYS_ADDR |
374 | extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); | 374 | extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); |
375 | 375 | ||
376 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, | 376 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, |
377 | unsigned long vaddr, | 377 | unsigned long vaddr, |
378 | unsigned long pfn, | 378 | unsigned long pfn, |
379 | unsigned long size, | 379 | unsigned long size, |
380 | pgprot_t prot) | 380 | pgprot_t prot) |
381 | { | 381 | { |
382 | phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); | 382 | phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); |
383 | return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); | 383 | return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); |
384 | } | 384 | } |
385 | #else | 385 | #else |
386 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 386 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
387 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 387 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
388 | #endif | 388 | #endif |
389 | 389 | ||
390 | #include <asm-generic/pgtable.h> | 390 | #include <asm-generic/pgtable.h> |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * uncached accelerated TLB map for video memory access | 393 | * uncached accelerated TLB map for video memory access |
394 | */ | 394 | */ |
395 | #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED | 395 | #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED |
396 | #define __HAVE_PHYS_MEM_ACCESS_PROT | 396 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
397 | 397 | ||
398 | struct file; | 398 | struct file; |
399 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 399 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
400 | unsigned long size, pgprot_t vma_prot); | 400 | unsigned long size, pgprot_t vma_prot); |
401 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | 401 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
402 | unsigned long size, pgprot_t *vma_prot); | 402 | unsigned long size, pgprot_t *vma_prot); |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * We provide our own get_unmapped area to cope with the virtual aliasing | 406 | * We provide our own get_unmapped area to cope with the virtual aliasing |
407 | * constraints placed on us by the cache architecture. | 407 | * constraints placed on us by the cache architecture. |
408 | */ | 408 | */ |
409 | #define HAVE_ARCH_UNMAPPED_AREA | 409 | #define HAVE_ARCH_UNMAPPED_AREA |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * No page table caches to initialise | 412 | * No page table caches to initialise |
413 | */ | 413 | */ |
414 | #define pgtable_cache_init() do { } while (0) | 414 | #define pgtable_cache_init() do { } while (0) |
415 | 415 | ||
416 | #endif /* _ASM_PGTABLE_H */ | 416 | #endif /* _ASM_PGTABLE_H */ |
417 | 417 |
arch/mips/kernel/asm-offsets.c
1 | /* | 1 | /* |
2 | * offset.c: Calculate pt_regs and task_struct offsets. | 2 | * offset.c: Calculate pt_regs and task_struct offsets. |
3 | * | 3 | * |
4 | * Copyright (C) 1996 David S. Miller | 4 | * Copyright (C) 1996 David S. Miller |
5 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle | 5 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle |
6 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 6 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
7 | * | 7 | * |
8 | * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 8 | * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2000 MIPS Technologies, Inc. | 9 | * Copyright (C) 2000 MIPS Technologies, Inc. |
10 | */ | 10 | */ |
11 | #include <linux/compat.h> | 11 | #include <linux/compat.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/kbuild.h> | 16 | #include <linux/kbuild.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | 20 | ||
21 | void output_ptreg_defines(void) | 21 | void output_ptreg_defines(void) |
22 | { | 22 | { |
23 | COMMENT("MIPS pt_regs offsets."); | 23 | COMMENT("MIPS pt_regs offsets."); |
24 | OFFSET(PT_R0, pt_regs, regs[0]); | 24 | OFFSET(PT_R0, pt_regs, regs[0]); |
25 | OFFSET(PT_R1, pt_regs, regs[1]); | 25 | OFFSET(PT_R1, pt_regs, regs[1]); |
26 | OFFSET(PT_R2, pt_regs, regs[2]); | 26 | OFFSET(PT_R2, pt_regs, regs[2]); |
27 | OFFSET(PT_R3, pt_regs, regs[3]); | 27 | OFFSET(PT_R3, pt_regs, regs[3]); |
28 | OFFSET(PT_R4, pt_regs, regs[4]); | 28 | OFFSET(PT_R4, pt_regs, regs[4]); |
29 | OFFSET(PT_R5, pt_regs, regs[5]); | 29 | OFFSET(PT_R5, pt_regs, regs[5]); |
30 | OFFSET(PT_R6, pt_regs, regs[6]); | 30 | OFFSET(PT_R6, pt_regs, regs[6]); |
31 | OFFSET(PT_R7, pt_regs, regs[7]); | 31 | OFFSET(PT_R7, pt_regs, regs[7]); |
32 | OFFSET(PT_R8, pt_regs, regs[8]); | 32 | OFFSET(PT_R8, pt_regs, regs[8]); |
33 | OFFSET(PT_R9, pt_regs, regs[9]); | 33 | OFFSET(PT_R9, pt_regs, regs[9]); |
34 | OFFSET(PT_R10, pt_regs, regs[10]); | 34 | OFFSET(PT_R10, pt_regs, regs[10]); |
35 | OFFSET(PT_R11, pt_regs, regs[11]); | 35 | OFFSET(PT_R11, pt_regs, regs[11]); |
36 | OFFSET(PT_R12, pt_regs, regs[12]); | 36 | OFFSET(PT_R12, pt_regs, regs[12]); |
37 | OFFSET(PT_R13, pt_regs, regs[13]); | 37 | OFFSET(PT_R13, pt_regs, regs[13]); |
38 | OFFSET(PT_R14, pt_regs, regs[14]); | 38 | OFFSET(PT_R14, pt_regs, regs[14]); |
39 | OFFSET(PT_R15, pt_regs, regs[15]); | 39 | OFFSET(PT_R15, pt_regs, regs[15]); |
40 | OFFSET(PT_R16, pt_regs, regs[16]); | 40 | OFFSET(PT_R16, pt_regs, regs[16]); |
41 | OFFSET(PT_R17, pt_regs, regs[17]); | 41 | OFFSET(PT_R17, pt_regs, regs[17]); |
42 | OFFSET(PT_R18, pt_regs, regs[18]); | 42 | OFFSET(PT_R18, pt_regs, regs[18]); |
43 | OFFSET(PT_R19, pt_regs, regs[19]); | 43 | OFFSET(PT_R19, pt_regs, regs[19]); |
44 | OFFSET(PT_R20, pt_regs, regs[20]); | 44 | OFFSET(PT_R20, pt_regs, regs[20]); |
45 | OFFSET(PT_R21, pt_regs, regs[21]); | 45 | OFFSET(PT_R21, pt_regs, regs[21]); |
46 | OFFSET(PT_R22, pt_regs, regs[22]); | 46 | OFFSET(PT_R22, pt_regs, regs[22]); |
47 | OFFSET(PT_R23, pt_regs, regs[23]); | 47 | OFFSET(PT_R23, pt_regs, regs[23]); |
48 | OFFSET(PT_R24, pt_regs, regs[24]); | 48 | OFFSET(PT_R24, pt_regs, regs[24]); |
49 | OFFSET(PT_R25, pt_regs, regs[25]); | 49 | OFFSET(PT_R25, pt_regs, regs[25]); |
50 | OFFSET(PT_R26, pt_regs, regs[26]); | 50 | OFFSET(PT_R26, pt_regs, regs[26]); |
51 | OFFSET(PT_R27, pt_regs, regs[27]); | 51 | OFFSET(PT_R27, pt_regs, regs[27]); |
52 | OFFSET(PT_R28, pt_regs, regs[28]); | 52 | OFFSET(PT_R28, pt_regs, regs[28]); |
53 | OFFSET(PT_R29, pt_regs, regs[29]); | 53 | OFFSET(PT_R29, pt_regs, regs[29]); |
54 | OFFSET(PT_R30, pt_regs, regs[30]); | 54 | OFFSET(PT_R30, pt_regs, regs[30]); |
55 | OFFSET(PT_R31, pt_regs, regs[31]); | 55 | OFFSET(PT_R31, pt_regs, regs[31]); |
56 | OFFSET(PT_LO, pt_regs, lo); | 56 | OFFSET(PT_LO, pt_regs, lo); |
57 | OFFSET(PT_HI, pt_regs, hi); | 57 | OFFSET(PT_HI, pt_regs, hi); |
58 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | 58 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
59 | OFFSET(PT_ACX, pt_regs, acx); | 59 | OFFSET(PT_ACX, pt_regs, acx); |
60 | #endif | 60 | #endif |
61 | OFFSET(PT_EPC, pt_regs, cp0_epc); | 61 | OFFSET(PT_EPC, pt_regs, cp0_epc); |
62 | OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); | 62 | OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); |
63 | OFFSET(PT_STATUS, pt_regs, cp0_status); | 63 | OFFSET(PT_STATUS, pt_regs, cp0_status); |
64 | OFFSET(PT_CAUSE, pt_regs, cp0_cause); | 64 | OFFSET(PT_CAUSE, pt_regs, cp0_cause); |
65 | #ifdef CONFIG_MIPS_MT_SMTC | 65 | #ifdef CONFIG_MIPS_MT_SMTC |
66 | OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); | 66 | OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); |
67 | #endif /* CONFIG_MIPS_MT_SMTC */ | 67 | #endif /* CONFIG_MIPS_MT_SMTC */ |
68 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 68 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
69 | OFFSET(PT_MPL, pt_regs, mpl); | 69 | OFFSET(PT_MPL, pt_regs, mpl); |
70 | OFFSET(PT_MTP, pt_regs, mtp); | 70 | OFFSET(PT_MTP, pt_regs, mtp); |
71 | #endif /* CONFIG_CPU_CAVIUM_OCTEON */ | 71 | #endif /* CONFIG_CPU_CAVIUM_OCTEON */ |
72 | DEFINE(PT_SIZE, sizeof(struct pt_regs)); | 72 | DEFINE(PT_SIZE, sizeof(struct pt_regs)); |
73 | BLANK(); | 73 | BLANK(); |
74 | } | 74 | } |
75 | 75 | ||
76 | void output_task_defines(void) | 76 | void output_task_defines(void) |
77 | { | 77 | { |
78 | COMMENT("MIPS task_struct offsets."); | 78 | COMMENT("MIPS task_struct offsets."); |
79 | OFFSET(TASK_STATE, task_struct, state); | 79 | OFFSET(TASK_STATE, task_struct, state); |
80 | OFFSET(TASK_THREAD_INFO, task_struct, stack); | 80 | OFFSET(TASK_THREAD_INFO, task_struct, stack); |
81 | OFFSET(TASK_FLAGS, task_struct, flags); | 81 | OFFSET(TASK_FLAGS, task_struct, flags); |
82 | OFFSET(TASK_MM, task_struct, mm); | 82 | OFFSET(TASK_MM, task_struct, mm); |
83 | OFFSET(TASK_PID, task_struct, pid); | 83 | OFFSET(TASK_PID, task_struct, pid); |
84 | DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); | 84 | DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); |
85 | BLANK(); | 85 | BLANK(); |
86 | } | 86 | } |
87 | 87 | ||
88 | void output_thread_info_defines(void) | 88 | void output_thread_info_defines(void) |
89 | { | 89 | { |
90 | COMMENT("MIPS thread_info offsets."); | 90 | COMMENT("MIPS thread_info offsets."); |
91 | OFFSET(TI_TASK, thread_info, task); | 91 | OFFSET(TI_TASK, thread_info, task); |
92 | OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); | 92 | OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); |
93 | OFFSET(TI_FLAGS, thread_info, flags); | 93 | OFFSET(TI_FLAGS, thread_info, flags); |
94 | OFFSET(TI_TP_VALUE, thread_info, tp_value); | 94 | OFFSET(TI_TP_VALUE, thread_info, tp_value); |
95 | OFFSET(TI_CPU, thread_info, cpu); | 95 | OFFSET(TI_CPU, thread_info, cpu); |
96 | OFFSET(TI_PRE_COUNT, thread_info, preempt_count); | 96 | OFFSET(TI_PRE_COUNT, thread_info, preempt_count); |
97 | OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); | 97 | OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); |
98 | OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); | 98 | OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); |
99 | OFFSET(TI_REGS, thread_info, regs); | 99 | OFFSET(TI_REGS, thread_info, regs); |
100 | DEFINE(_THREAD_SIZE, THREAD_SIZE); | 100 | DEFINE(_THREAD_SIZE, THREAD_SIZE); |
101 | DEFINE(_THREAD_MASK, THREAD_MASK); | 101 | DEFINE(_THREAD_MASK, THREAD_MASK); |
102 | BLANK(); | 102 | BLANK(); |
103 | } | 103 | } |
104 | 104 | ||
105 | void output_thread_defines(void) | 105 | void output_thread_defines(void) |
106 | { | 106 | { |
107 | COMMENT("MIPS specific thread_struct offsets."); | 107 | COMMENT("MIPS specific thread_struct offsets."); |
108 | OFFSET(THREAD_REG16, task_struct, thread.reg16); | 108 | OFFSET(THREAD_REG16, task_struct, thread.reg16); |
109 | OFFSET(THREAD_REG17, task_struct, thread.reg17); | 109 | OFFSET(THREAD_REG17, task_struct, thread.reg17); |
110 | OFFSET(THREAD_REG18, task_struct, thread.reg18); | 110 | OFFSET(THREAD_REG18, task_struct, thread.reg18); |
111 | OFFSET(THREAD_REG19, task_struct, thread.reg19); | 111 | OFFSET(THREAD_REG19, task_struct, thread.reg19); |
112 | OFFSET(THREAD_REG20, task_struct, thread.reg20); | 112 | OFFSET(THREAD_REG20, task_struct, thread.reg20); |
113 | OFFSET(THREAD_REG21, task_struct, thread.reg21); | 113 | OFFSET(THREAD_REG21, task_struct, thread.reg21); |
114 | OFFSET(THREAD_REG22, task_struct, thread.reg22); | 114 | OFFSET(THREAD_REG22, task_struct, thread.reg22); |
115 | OFFSET(THREAD_REG23, task_struct, thread.reg23); | 115 | OFFSET(THREAD_REG23, task_struct, thread.reg23); |
116 | OFFSET(THREAD_REG29, task_struct, thread.reg29); | 116 | OFFSET(THREAD_REG29, task_struct, thread.reg29); |
117 | OFFSET(THREAD_REG30, task_struct, thread.reg30); | 117 | OFFSET(THREAD_REG30, task_struct, thread.reg30); |
118 | OFFSET(THREAD_REG31, task_struct, thread.reg31); | 118 | OFFSET(THREAD_REG31, task_struct, thread.reg31); |
119 | OFFSET(THREAD_STATUS, task_struct, | 119 | OFFSET(THREAD_STATUS, task_struct, |
120 | thread.cp0_status); | 120 | thread.cp0_status); |
121 | OFFSET(THREAD_FPU, task_struct, thread.fpu); | 121 | OFFSET(THREAD_FPU, task_struct, thread.fpu); |
122 | 122 | ||
123 | OFFSET(THREAD_BVADDR, task_struct, \ | 123 | OFFSET(THREAD_BVADDR, task_struct, \ |
124 | thread.cp0_badvaddr); | 124 | thread.cp0_badvaddr); |
125 | OFFSET(THREAD_BUADDR, task_struct, \ | 125 | OFFSET(THREAD_BUADDR, task_struct, \ |
126 | thread.cp0_baduaddr); | 126 | thread.cp0_baduaddr); |
127 | OFFSET(THREAD_ECODE, task_struct, \ | 127 | OFFSET(THREAD_ECODE, task_struct, \ |
128 | thread.error_code); | 128 | thread.error_code); |
129 | OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); | 129 | OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); |
130 | OFFSET(THREAD_TRAMP, task_struct, \ | 130 | OFFSET(THREAD_TRAMP, task_struct, \ |
131 | thread.irix_trampoline); | 131 | thread.irix_trampoline); |
132 | OFFSET(THREAD_OLDCTX, task_struct, \ | 132 | OFFSET(THREAD_OLDCTX, task_struct, \ |
133 | thread.irix_oldctx); | 133 | thread.irix_oldctx); |
134 | BLANK(); | 134 | BLANK(); |
135 | } | 135 | } |
136 | 136 | ||
137 | void output_thread_fpu_defines(void) | 137 | void output_thread_fpu_defines(void) |
138 | { | 138 | { |
139 | OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); | 139 | OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); |
140 | OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); | 140 | OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); |
141 | OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); | 141 | OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); |
142 | OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); | 142 | OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); |
143 | OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); | 143 | OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); |
144 | OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); | 144 | OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); |
145 | OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); | 145 | OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); |
146 | OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); | 146 | OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); |
147 | OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); | 147 | OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); |
148 | OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); | 148 | OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); |
149 | OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); | 149 | OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); |
150 | OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); | 150 | OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); |
151 | OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); | 151 | OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); |
152 | OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); | 152 | OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); |
153 | OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); | 153 | OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); |
154 | OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); | 154 | OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); |
155 | OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); | 155 | OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); |
156 | OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); | 156 | OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); |
157 | OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); | 157 | OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); |
158 | OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); | 158 | OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); |
159 | OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); | 159 | OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); |
160 | OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); | 160 | OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); |
161 | OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); | 161 | OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); |
162 | OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); | 162 | OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); |
163 | OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); | 163 | OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); |
164 | OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); | 164 | OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); |
165 | OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); | 165 | OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); |
166 | OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); | 166 | OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); |
167 | OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); | 167 | OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); |
168 | OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); | 168 | OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); |
169 | OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); | 169 | OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); |
170 | OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); | 170 | OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); |
171 | 171 | ||
172 | OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); | 172 | OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); |
173 | BLANK(); | 173 | BLANK(); |
174 | } | 174 | } |
175 | 175 | ||
176 | void output_mm_defines(void) | 176 | void output_mm_defines(void) |
177 | { | 177 | { |
178 | COMMENT("Size of struct page"); | 178 | COMMENT("Size of struct page"); |
179 | DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); | 179 | DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); |
180 | BLANK(); | 180 | BLANK(); |
181 | COMMENT("Linux mm_struct offsets."); | 181 | COMMENT("Linux mm_struct offsets."); |
182 | OFFSET(MM_USERS, mm_struct, mm_users); | 182 | OFFSET(MM_USERS, mm_struct, mm_users); |
183 | OFFSET(MM_PGD, mm_struct, pgd); | 183 | OFFSET(MM_PGD, mm_struct, pgd); |
184 | OFFSET(MM_CONTEXT, mm_struct, context); | 184 | OFFSET(MM_CONTEXT, mm_struct, context); |
185 | BLANK(); | 185 | BLANK(); |
186 | DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); | 186 | DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); |
187 | DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); | 187 | DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); |
188 | DEFINE(_PTE_T_SIZE, sizeof(pte_t)); | 188 | DEFINE(_PTE_T_SIZE, sizeof(pte_t)); |
189 | BLANK(); | 189 | BLANK(); |
190 | DEFINE(_PGD_T_LOG2, PGD_T_LOG2); | 190 | DEFINE(_PGD_T_LOG2, PGD_T_LOG2); |
191 | #ifndef __PAGETABLE_PMD_FOLDED | ||
191 | DEFINE(_PMD_T_LOG2, PMD_T_LOG2); | 192 | DEFINE(_PMD_T_LOG2, PMD_T_LOG2); |
193 | #endif | ||
192 | DEFINE(_PTE_T_LOG2, PTE_T_LOG2); | 194 | DEFINE(_PTE_T_LOG2, PTE_T_LOG2); |
193 | BLANK(); | 195 | BLANK(); |
194 | DEFINE(_PGD_ORDER, PGD_ORDER); | 196 | DEFINE(_PGD_ORDER, PGD_ORDER); |
197 | #ifndef __PAGETABLE_PMD_FOLDED | ||
195 | DEFINE(_PMD_ORDER, PMD_ORDER); | 198 | DEFINE(_PMD_ORDER, PMD_ORDER); |
199 | #endif | ||
196 | DEFINE(_PTE_ORDER, PTE_ORDER); | 200 | DEFINE(_PTE_ORDER, PTE_ORDER); |
197 | BLANK(); | 201 | BLANK(); |
198 | DEFINE(_PMD_SHIFT, PMD_SHIFT); | 202 | DEFINE(_PMD_SHIFT, PMD_SHIFT); |
199 | DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); | 203 | DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); |
200 | BLANK(); | 204 | BLANK(); |
201 | DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); | 205 | DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); |
202 | DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); | 206 | DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); |
203 | DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); | 207 | DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); |
204 | BLANK(); | 208 | BLANK(); |
205 | } | 209 | } |
206 | 210 | ||
207 | #ifdef CONFIG_32BIT | 211 | #ifdef CONFIG_32BIT |
208 | void output_sc_defines(void) | 212 | void output_sc_defines(void) |
209 | { | 213 | { |
210 | COMMENT("Linux sigcontext offsets."); | 214 | COMMENT("Linux sigcontext offsets."); |
211 | OFFSET(SC_REGS, sigcontext, sc_regs); | 215 | OFFSET(SC_REGS, sigcontext, sc_regs); |
212 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); | 216 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); |
213 | OFFSET(SC_ACX, sigcontext, sc_acx); | 217 | OFFSET(SC_ACX, sigcontext, sc_acx); |
214 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); | 218 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); |
215 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); | 219 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); |
216 | OFFSET(SC_PC, sigcontext, sc_pc); | 220 | OFFSET(SC_PC, sigcontext, sc_pc); |
217 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); | 221 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); |
218 | OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); | 222 | OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); |
219 | OFFSET(SC_HI1, sigcontext, sc_hi1); | 223 | OFFSET(SC_HI1, sigcontext, sc_hi1); |
220 | OFFSET(SC_LO1, sigcontext, sc_lo1); | 224 | OFFSET(SC_LO1, sigcontext, sc_lo1); |
221 | OFFSET(SC_HI2, sigcontext, sc_hi2); | 225 | OFFSET(SC_HI2, sigcontext, sc_hi2); |
222 | OFFSET(SC_LO2, sigcontext, sc_lo2); | 226 | OFFSET(SC_LO2, sigcontext, sc_lo2); |
223 | OFFSET(SC_HI3, sigcontext, sc_hi3); | 227 | OFFSET(SC_HI3, sigcontext, sc_hi3); |
224 | OFFSET(SC_LO3, sigcontext, sc_lo3); | 228 | OFFSET(SC_LO3, sigcontext, sc_lo3); |
225 | BLANK(); | 229 | BLANK(); |
226 | } | 230 | } |
227 | #endif | 231 | #endif |
228 | 232 | ||
229 | #ifdef CONFIG_64BIT | 233 | #ifdef CONFIG_64BIT |
230 | void output_sc_defines(void) | 234 | void output_sc_defines(void) |
231 | { | 235 | { |
232 | COMMENT("Linux sigcontext offsets."); | 236 | COMMENT("Linux sigcontext offsets."); |
233 | OFFSET(SC_REGS, sigcontext, sc_regs); | 237 | OFFSET(SC_REGS, sigcontext, sc_regs); |
234 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); | 238 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); |
235 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); | 239 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); |
236 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); | 240 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); |
237 | OFFSET(SC_PC, sigcontext, sc_pc); | 241 | OFFSET(SC_PC, sigcontext, sc_pc); |
238 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); | 242 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); |
239 | BLANK(); | 243 | BLANK(); |
240 | } | 244 | } |
241 | #endif | 245 | #endif |
242 | 246 | ||
243 | #ifdef CONFIG_MIPS32_COMPAT | 247 | #ifdef CONFIG_MIPS32_COMPAT |
244 | void output_sc32_defines(void) | 248 | void output_sc32_defines(void) |
245 | { | 249 | { |
246 | COMMENT("Linux 32-bit sigcontext offsets."); | 250 | COMMENT("Linux 32-bit sigcontext offsets."); |
247 | OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); | 251 | OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); |
248 | OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); | 252 | OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); |
249 | OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); | 253 | OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); |
250 | BLANK(); | 254 | BLANK(); |
251 | } | 255 | } |
252 | #endif | 256 | #endif |
253 | 257 | ||
254 | void output_signal_defined(void) | 258 | void output_signal_defined(void) |
255 | { | 259 | { |
256 | COMMENT("Linux signal numbers."); | 260 | COMMENT("Linux signal numbers."); |
257 | DEFINE(_SIGHUP, SIGHUP); | 261 | DEFINE(_SIGHUP, SIGHUP); |
258 | DEFINE(_SIGINT, SIGINT); | 262 | DEFINE(_SIGINT, SIGINT); |
259 | DEFINE(_SIGQUIT, SIGQUIT); | 263 | DEFINE(_SIGQUIT, SIGQUIT); |
260 | DEFINE(_SIGILL, SIGILL); | 264 | DEFINE(_SIGILL, SIGILL); |
261 | DEFINE(_SIGTRAP, SIGTRAP); | 265 | DEFINE(_SIGTRAP, SIGTRAP); |
262 | DEFINE(_SIGIOT, SIGIOT); | 266 | DEFINE(_SIGIOT, SIGIOT); |
263 | DEFINE(_SIGABRT, SIGABRT); | 267 | DEFINE(_SIGABRT, SIGABRT); |
264 | DEFINE(_SIGEMT, SIGEMT); | 268 | DEFINE(_SIGEMT, SIGEMT); |
265 | DEFINE(_SIGFPE, SIGFPE); | 269 | DEFINE(_SIGFPE, SIGFPE); |
266 | DEFINE(_SIGKILL, SIGKILL); | 270 | DEFINE(_SIGKILL, SIGKILL); |
267 | DEFINE(_SIGBUS, SIGBUS); | 271 | DEFINE(_SIGBUS, SIGBUS); |
268 | DEFINE(_SIGSEGV, SIGSEGV); | 272 | DEFINE(_SIGSEGV, SIGSEGV); |
269 | DEFINE(_SIGSYS, SIGSYS); | 273 | DEFINE(_SIGSYS, SIGSYS); |
270 | DEFINE(_SIGPIPE, SIGPIPE); | 274 | DEFINE(_SIGPIPE, SIGPIPE); |
271 | DEFINE(_SIGALRM, SIGALRM); | 275 | DEFINE(_SIGALRM, SIGALRM); |
272 | DEFINE(_SIGTERM, SIGTERM); | 276 | DEFINE(_SIGTERM, SIGTERM); |
273 | DEFINE(_SIGUSR1, SIGUSR1); | 277 | DEFINE(_SIGUSR1, SIGUSR1); |
274 | DEFINE(_SIGUSR2, SIGUSR2); | 278 | DEFINE(_SIGUSR2, SIGUSR2); |
275 | DEFINE(_SIGCHLD, SIGCHLD); | 279 | DEFINE(_SIGCHLD, SIGCHLD); |
276 | DEFINE(_SIGPWR, SIGPWR); | 280 | DEFINE(_SIGPWR, SIGPWR); |
277 | DEFINE(_SIGWINCH, SIGWINCH); | 281 | DEFINE(_SIGWINCH, SIGWINCH); |
278 | DEFINE(_SIGURG, SIGURG); | 282 | DEFINE(_SIGURG, SIGURG); |
279 | DEFINE(_SIGIO, SIGIO); | 283 | DEFINE(_SIGIO, SIGIO); |
280 | DEFINE(_SIGSTOP, SIGSTOP); | 284 | DEFINE(_SIGSTOP, SIGSTOP); |
281 | DEFINE(_SIGTSTP, SIGTSTP); | 285 | DEFINE(_SIGTSTP, SIGTSTP); |
282 | DEFINE(_SIGCONT, SIGCONT); | 286 | DEFINE(_SIGCONT, SIGCONT); |
283 | DEFINE(_SIGTTIN, SIGTTIN); | 287 | DEFINE(_SIGTTIN, SIGTTIN); |
284 | DEFINE(_SIGTTOU, SIGTTOU); | 288 | DEFINE(_SIGTTOU, SIGTTOU); |
285 | DEFINE(_SIGVTALRM, SIGVTALRM); | 289 | DEFINE(_SIGVTALRM, SIGVTALRM); |
286 | DEFINE(_SIGPROF, SIGPROF); | 290 | DEFINE(_SIGPROF, SIGPROF); |
287 | DEFINE(_SIGXCPU, SIGXCPU); | 291 | DEFINE(_SIGXCPU, SIGXCPU); |
288 | DEFINE(_SIGXFSZ, SIGXFSZ); | 292 | DEFINE(_SIGXFSZ, SIGXFSZ); |
289 | BLANK(); | 293 | BLANK(); |
290 | } | 294 | } |
291 | 295 | ||
292 | void output_irq_cpustat_t_defines(void) | 296 | void output_irq_cpustat_t_defines(void) |
293 | { | 297 | { |
294 | COMMENT("Linux irq_cpustat_t offsets."); | 298 | COMMENT("Linux irq_cpustat_t offsets."); |
295 | DEFINE(IC_SOFTIRQ_PENDING, | 299 | DEFINE(IC_SOFTIRQ_PENDING, |
296 | offsetof(irq_cpustat_t, __softirq_pending)); | 300 | offsetof(irq_cpustat_t, __softirq_pending)); |
297 | DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t)); | 301 | DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t)); |
298 | BLANK(); | 302 | BLANK(); |
299 | } | 303 | } |
300 | 304 | ||
301 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 305 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
302 | void output_octeon_cop2_state_defines(void) | 306 | void output_octeon_cop2_state_defines(void) |
303 | { | 307 | { |
304 | COMMENT("Octeon specific octeon_cop2_state offsets."); | 308 | COMMENT("Octeon specific octeon_cop2_state offsets."); |
305 | OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv); | 309 | OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv); |
306 | OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length); | 310 | OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length); |
307 | OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly); | 311 | OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly); |
308 | OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat); | 312 | OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat); |
309 | OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv); | 313 | OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv); |
310 | OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key); | 314 | OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key); |
311 | OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result); | 315 | OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result); |
312 | OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0); | 316 | OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0); |
313 | OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv); | 317 | OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv); |
314 | OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key); | 318 | OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key); |
315 | OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen); | 319 | OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen); |
316 | OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result); | 320 | OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result); |
317 | OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult); | 321 | OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult); |
318 | OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly); | 322 | OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly); |
319 | OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); | 323 | OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); |
320 | OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); | 324 | OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); |
321 | OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); | 325 | OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); |
322 | OFFSET(THREAD_CP2, task_struct, thread.cp2); | 326 | OFFSET(THREAD_CP2, task_struct, thread.cp2); |
323 | OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); | 327 | OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); |
324 | BLANK(); | 328 | BLANK(); |
325 | } | 329 | } |
326 | #endif | 330 | #endif |
327 | 331 | ||
328 | #ifdef CONFIG_HIBERNATION | 332 | #ifdef CONFIG_HIBERNATION |
329 | void output_pbe_defines(void) | 333 | void output_pbe_defines(void) |
330 | { | 334 | { |
331 | COMMENT(" Linux struct pbe offsets. "); | 335 | COMMENT(" Linux struct pbe offsets. "); |
332 | OFFSET(PBE_ADDRESS, pbe, address); | 336 | OFFSET(PBE_ADDRESS, pbe, address); |
333 | OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); | 337 | OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); |
334 | OFFSET(PBE_NEXT, pbe, next); | 338 | OFFSET(PBE_NEXT, pbe, next); |
335 | DEFINE(PBE_SIZE, sizeof(struct pbe)); | 339 | DEFINE(PBE_SIZE, sizeof(struct pbe)); |
336 | BLANK(); | 340 | BLANK(); |
337 | } | 341 | } |
338 | #endif | 342 | #endif |
339 | 343 |
arch/mips/mm/init.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1994 - 2000 Ralf Baechle | 6 | * Copyright (C) 1994 - 2000 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 8 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. | 9 | * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. |
10 | */ | 10 | */ |
11 | #include <linux/bug.h> | 11 | #include <linux/bug.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/mman.h> | 23 | #include <linux/mman.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/swap.h> | 27 | #include <linux/swap.h> |
28 | #include <linux/proc_fs.h> | 28 | #include <linux/proc_fs.h> |
29 | #include <linux/pfn.h> | 29 | #include <linux/pfn.h> |
30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
31 | 31 | ||
32 | #include <asm/asm-offsets.h> | 32 | #include <asm/asm-offsets.h> |
33 | #include <asm/bootinfo.h> | 33 | #include <asm/bootinfo.h> |
34 | #include <asm/cachectl.h> | 34 | #include <asm/cachectl.h> |
35 | #include <asm/cpu.h> | 35 | #include <asm/cpu.h> |
36 | #include <asm/dma.h> | 36 | #include <asm/dma.h> |
37 | #include <asm/kmap_types.h> | 37 | #include <asm/kmap_types.h> |
38 | #include <asm/mmu_context.h> | 38 | #include <asm/mmu_context.h> |
39 | #include <asm/sections.h> | 39 | #include <asm/sections.h> |
40 | #include <asm/pgtable.h> | 40 | #include <asm/pgtable.h> |
41 | #include <asm/pgalloc.h> | 41 | #include <asm/pgalloc.h> |
42 | #include <asm/tlb.h> | 42 | #include <asm/tlb.h> |
43 | #include <asm/fixmap.h> | 43 | #include <asm/fixmap.h> |
44 | 44 | ||
45 | /* Atomicity and interruptability */ | 45 | /* Atomicity and interruptability */ |
46 | #ifdef CONFIG_MIPS_MT_SMTC | 46 | #ifdef CONFIG_MIPS_MT_SMTC |
47 | 47 | ||
48 | #include <asm/mipsmtregs.h> | 48 | #include <asm/mipsmtregs.h> |
49 | 49 | ||
50 | #define ENTER_CRITICAL(flags) \ | 50 | #define ENTER_CRITICAL(flags) \ |
51 | { \ | 51 | { \ |
52 | unsigned int mvpflags; \ | 52 | unsigned int mvpflags; \ |
53 | local_irq_save(flags);\ | 53 | local_irq_save(flags);\ |
54 | mvpflags = dvpe() | 54 | mvpflags = dvpe() |
55 | #define EXIT_CRITICAL(flags) \ | 55 | #define EXIT_CRITICAL(flags) \ |
56 | evpe(mvpflags); \ | 56 | evpe(mvpflags); \ |
57 | local_irq_restore(flags); \ | 57 | local_irq_restore(flags); \ |
58 | } | 58 | } |
59 | #else | 59 | #else |
60 | 60 | ||
61 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | 61 | #define ENTER_CRITICAL(flags) local_irq_save(flags) |
62 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | 62 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) |
63 | 63 | ||
64 | #endif /* CONFIG_MIPS_MT_SMTC */ | 64 | #endif /* CONFIG_MIPS_MT_SMTC */ |
65 | 65 | ||
66 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 66 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * We have up to 8 empty zeroed pages so we can map one of the right colour | 69 | * We have up to 8 empty zeroed pages so we can map one of the right colour |
70 | * when needed. This is necessary only on R4000 / R4400 SC and MC versions | 70 | * when needed. This is necessary only on R4000 / R4400 SC and MC versions |
71 | * where we have to avoid VCED / VECI exceptions for good performance at | 71 | * where we have to avoid VCED / VECI exceptions for good performance at |
72 | * any price. Since page is never written to after the initialization we | 72 | * any price. Since page is never written to after the initialization we |
73 | * don't have to care about aliases on other CPUs. | 73 | * don't have to care about aliases on other CPUs. |
74 | */ | 74 | */ |
75 | unsigned long empty_zero_page, zero_page_mask; | 75 | unsigned long empty_zero_page, zero_page_mask; |
76 | EXPORT_SYMBOL_GPL(empty_zero_page); | 76 | EXPORT_SYMBOL_GPL(empty_zero_page); |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * Not static inline because used by IP27 special magic initialization code | 79 | * Not static inline because used by IP27 special magic initialization code |
80 | */ | 80 | */ |
81 | unsigned long setup_zero_pages(void) | 81 | unsigned long setup_zero_pages(void) |
82 | { | 82 | { |
83 | unsigned int order; | 83 | unsigned int order; |
84 | unsigned long size; | 84 | unsigned long size; |
85 | struct page *page; | 85 | struct page *page; |
86 | 86 | ||
87 | if (cpu_has_vce) | 87 | if (cpu_has_vce) |
88 | order = 3; | 88 | order = 3; |
89 | else | 89 | else |
90 | order = 0; | 90 | order = 0; |
91 | 91 | ||
92 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 92 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
93 | if (!empty_zero_page) | 93 | if (!empty_zero_page) |
94 | panic("Oh boy, that early out of memory?"); | 94 | panic("Oh boy, that early out of memory?"); |
95 | 95 | ||
96 | page = virt_to_page((void *)empty_zero_page); | 96 | page = virt_to_page((void *)empty_zero_page); |
97 | split_page(page, order); | 97 | split_page(page, order); |
98 | while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { | 98 | while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { |
99 | SetPageReserved(page); | 99 | SetPageReserved(page); |
100 | page++; | 100 | page++; |
101 | } | 101 | } |
102 | 102 | ||
103 | size = PAGE_SIZE << order; | 103 | size = PAGE_SIZE << order; |
104 | zero_page_mask = (size - 1) & PAGE_MASK; | 104 | zero_page_mask = (size - 1) & PAGE_MASK; |
105 | 105 | ||
106 | return 1UL << order; | 106 | return 1UL << order; |
107 | } | 107 | } |
108 | 108 | ||
109 | #ifdef CONFIG_MIPS_MT_SMTC | 109 | #ifdef CONFIG_MIPS_MT_SMTC |
110 | static pte_t *kmap_coherent_pte; | 110 | static pte_t *kmap_coherent_pte; |
111 | static void __init kmap_coherent_init(void) | 111 | static void __init kmap_coherent_init(void) |
112 | { | 112 | { |
113 | unsigned long vaddr; | 113 | unsigned long vaddr; |
114 | 114 | ||
115 | /* cache the first coherent kmap pte */ | 115 | /* cache the first coherent kmap pte */ |
116 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | 116 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); |
117 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | 117 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); |
118 | } | 118 | } |
119 | #else | 119 | #else |
120 | static inline void kmap_coherent_init(void) {} | 120 | static inline void kmap_coherent_init(void) {} |
121 | #endif | 121 | #endif |
122 | 122 | ||
123 | void *kmap_coherent(struct page *page, unsigned long addr) | 123 | void *kmap_coherent(struct page *page, unsigned long addr) |
124 | { | 124 | { |
125 | enum fixed_addresses idx; | 125 | enum fixed_addresses idx; |
126 | unsigned long vaddr, flags, entrylo; | 126 | unsigned long vaddr, flags, entrylo; |
127 | unsigned long old_ctx; | 127 | unsigned long old_ctx; |
128 | pte_t pte; | 128 | pte_t pte; |
129 | int tlbidx; | 129 | int tlbidx; |
130 | 130 | ||
131 | BUG_ON(Page_dcache_dirty(page)); | 131 | BUG_ON(Page_dcache_dirty(page)); |
132 | 132 | ||
133 | inc_preempt_count(); | 133 | inc_preempt_count(); |
134 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); | 134 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); |
135 | #ifdef CONFIG_MIPS_MT_SMTC | 135 | #ifdef CONFIG_MIPS_MT_SMTC |
136 | idx += FIX_N_COLOURS * smp_processor_id() + | 136 | idx += FIX_N_COLOURS * smp_processor_id() + |
137 | (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); | 137 | (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); |
138 | #else | 138 | #else |
139 | idx += in_interrupt() ? FIX_N_COLOURS : 0; | 139 | idx += in_interrupt() ? FIX_N_COLOURS : 0; |
140 | #endif | 140 | #endif |
141 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | 141 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); |
142 | pte = mk_pte(page, PAGE_KERNEL); | 142 | pte = mk_pte(page, PAGE_KERNEL); |
143 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 143 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
144 | entrylo = pte.pte_high; | 144 | entrylo = pte.pte_high; |
145 | #else | 145 | #else |
146 | entrylo = pte_val(pte) >> 6; | 146 | entrylo = pte_val(pte) >> 6; |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | ENTER_CRITICAL(flags); | 149 | ENTER_CRITICAL(flags); |
150 | old_ctx = read_c0_entryhi(); | 150 | old_ctx = read_c0_entryhi(); |
151 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); | 151 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); |
152 | write_c0_entrylo0(entrylo); | 152 | write_c0_entrylo0(entrylo); |
153 | write_c0_entrylo1(entrylo); | 153 | write_c0_entrylo1(entrylo); |
154 | #ifdef CONFIG_MIPS_MT_SMTC | 154 | #ifdef CONFIG_MIPS_MT_SMTC |
155 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | 155 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); |
156 | /* preload TLB instead of local_flush_tlb_one() */ | 156 | /* preload TLB instead of local_flush_tlb_one() */ |
157 | mtc0_tlbw_hazard(); | 157 | mtc0_tlbw_hazard(); |
158 | tlb_probe(); | 158 | tlb_probe(); |
159 | tlb_probe_hazard(); | 159 | tlb_probe_hazard(); |
160 | tlbidx = read_c0_index(); | 160 | tlbidx = read_c0_index(); |
161 | mtc0_tlbw_hazard(); | 161 | mtc0_tlbw_hazard(); |
162 | if (tlbidx < 0) | 162 | if (tlbidx < 0) |
163 | tlb_write_random(); | 163 | tlb_write_random(); |
164 | else | 164 | else |
165 | tlb_write_indexed(); | 165 | tlb_write_indexed(); |
166 | #else | 166 | #else |
167 | tlbidx = read_c0_wired(); | 167 | tlbidx = read_c0_wired(); |
168 | write_c0_wired(tlbidx + 1); | 168 | write_c0_wired(tlbidx + 1); |
169 | write_c0_index(tlbidx); | 169 | write_c0_index(tlbidx); |
170 | mtc0_tlbw_hazard(); | 170 | mtc0_tlbw_hazard(); |
171 | tlb_write_indexed(); | 171 | tlb_write_indexed(); |
172 | #endif | 172 | #endif |
173 | tlbw_use_hazard(); | 173 | tlbw_use_hazard(); |
174 | write_c0_entryhi(old_ctx); | 174 | write_c0_entryhi(old_ctx); |
175 | EXIT_CRITICAL(flags); | 175 | EXIT_CRITICAL(flags); |
176 | 176 | ||
177 | return (void*) vaddr; | 177 | return (void*) vaddr; |
178 | } | 178 | } |
179 | 179 | ||
180 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | 180 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) |
181 | 181 | ||
182 | void kunmap_coherent(void) | 182 | void kunmap_coherent(void) |
183 | { | 183 | { |
184 | #ifndef CONFIG_MIPS_MT_SMTC | 184 | #ifndef CONFIG_MIPS_MT_SMTC |
185 | unsigned int wired; | 185 | unsigned int wired; |
186 | unsigned long flags, old_ctx; | 186 | unsigned long flags, old_ctx; |
187 | 187 | ||
188 | ENTER_CRITICAL(flags); | 188 | ENTER_CRITICAL(flags); |
189 | old_ctx = read_c0_entryhi(); | 189 | old_ctx = read_c0_entryhi(); |
190 | wired = read_c0_wired() - 1; | 190 | wired = read_c0_wired() - 1; |
191 | write_c0_wired(wired); | 191 | write_c0_wired(wired); |
192 | write_c0_index(wired); | 192 | write_c0_index(wired); |
193 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); | 193 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); |
194 | write_c0_entrylo0(0); | 194 | write_c0_entrylo0(0); |
195 | write_c0_entrylo1(0); | 195 | write_c0_entrylo1(0); |
196 | mtc0_tlbw_hazard(); | 196 | mtc0_tlbw_hazard(); |
197 | tlb_write_indexed(); | 197 | tlb_write_indexed(); |
198 | tlbw_use_hazard(); | 198 | tlbw_use_hazard(); |
199 | write_c0_entryhi(old_ctx); | 199 | write_c0_entryhi(old_ctx); |
200 | EXIT_CRITICAL(flags); | 200 | EXIT_CRITICAL(flags); |
201 | #endif | 201 | #endif |
202 | dec_preempt_count(); | 202 | dec_preempt_count(); |
203 | preempt_check_resched(); | 203 | preempt_check_resched(); |
204 | } | 204 | } |
205 | 205 | ||
206 | void copy_user_highpage(struct page *to, struct page *from, | 206 | void copy_user_highpage(struct page *to, struct page *from, |
207 | unsigned long vaddr, struct vm_area_struct *vma) | 207 | unsigned long vaddr, struct vm_area_struct *vma) |
208 | { | 208 | { |
209 | void *vfrom, *vto; | 209 | void *vfrom, *vto; |
210 | 210 | ||
211 | vto = kmap_atomic(to, KM_USER1); | 211 | vto = kmap_atomic(to, KM_USER1); |
212 | if (cpu_has_dc_aliases && | 212 | if (cpu_has_dc_aliases && |
213 | page_mapped(from) && !Page_dcache_dirty(from)) { | 213 | page_mapped(from) && !Page_dcache_dirty(from)) { |
214 | vfrom = kmap_coherent(from, vaddr); | 214 | vfrom = kmap_coherent(from, vaddr); |
215 | copy_page(vto, vfrom); | 215 | copy_page(vto, vfrom); |
216 | kunmap_coherent(); | 216 | kunmap_coherent(); |
217 | } else { | 217 | } else { |
218 | vfrom = kmap_atomic(from, KM_USER0); | 218 | vfrom = kmap_atomic(from, KM_USER0); |
219 | copy_page(vto, vfrom); | 219 | copy_page(vto, vfrom); |
220 | kunmap_atomic(vfrom, KM_USER0); | 220 | kunmap_atomic(vfrom, KM_USER0); |
221 | } | 221 | } |
222 | if ((!cpu_has_ic_fills_f_dc) || | 222 | if ((!cpu_has_ic_fills_f_dc) || |
223 | pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | 223 | pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) |
224 | flush_data_cache_page((unsigned long)vto); | 224 | flush_data_cache_page((unsigned long)vto); |
225 | kunmap_atomic(vto, KM_USER1); | 225 | kunmap_atomic(vto, KM_USER1); |
226 | /* Make sure this page is cleared on other CPU's too before using it */ | 226 | /* Make sure this page is cleared on other CPU's too before using it */ |
227 | smp_wmb(); | 227 | smp_wmb(); |
228 | } | 228 | } |
229 | 229 | ||
230 | void copy_to_user_page(struct vm_area_struct *vma, | 230 | void copy_to_user_page(struct vm_area_struct *vma, |
231 | struct page *page, unsigned long vaddr, void *dst, const void *src, | 231 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
232 | unsigned long len) | 232 | unsigned long len) |
233 | { | 233 | { |
234 | if (cpu_has_dc_aliases && | 234 | if (cpu_has_dc_aliases && |
235 | page_mapped(page) && !Page_dcache_dirty(page)) { | 235 | page_mapped(page) && !Page_dcache_dirty(page)) { |
236 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 236 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
237 | memcpy(vto, src, len); | 237 | memcpy(vto, src, len); |
238 | kunmap_coherent(); | 238 | kunmap_coherent(); |
239 | } else { | 239 | } else { |
240 | memcpy(dst, src, len); | 240 | memcpy(dst, src, len); |
241 | if (cpu_has_dc_aliases) | 241 | if (cpu_has_dc_aliases) |
242 | SetPageDcacheDirty(page); | 242 | SetPageDcacheDirty(page); |
243 | } | 243 | } |
244 | if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) | 244 | if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) |
245 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | 245 | flush_cache_page(vma, vaddr, page_to_pfn(page)); |
246 | } | 246 | } |
247 | 247 | ||
248 | void copy_from_user_page(struct vm_area_struct *vma, | 248 | void copy_from_user_page(struct vm_area_struct *vma, |
249 | struct page *page, unsigned long vaddr, void *dst, const void *src, | 249 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
250 | unsigned long len) | 250 | unsigned long len) |
251 | { | 251 | { |
252 | if (cpu_has_dc_aliases && | 252 | if (cpu_has_dc_aliases && |
253 | page_mapped(page) && !Page_dcache_dirty(page)) { | 253 | page_mapped(page) && !Page_dcache_dirty(page)) { |
254 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 254 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
255 | memcpy(dst, vfrom, len); | 255 | memcpy(dst, vfrom, len); |
256 | kunmap_coherent(); | 256 | kunmap_coherent(); |
257 | } else { | 257 | } else { |
258 | memcpy(dst, src, len); | 258 | memcpy(dst, src, len); |
259 | if (cpu_has_dc_aliases) | 259 | if (cpu_has_dc_aliases) |
260 | SetPageDcacheDirty(page); | 260 | SetPageDcacheDirty(page); |
261 | } | 261 | } |
262 | } | 262 | } |
263 | 263 | ||
264 | void __init fixrange_init(unsigned long start, unsigned long end, | 264 | void __init fixrange_init(unsigned long start, unsigned long end, |
265 | pgd_t *pgd_base) | 265 | pgd_t *pgd_base) |
266 | { | 266 | { |
267 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) | 267 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) |
268 | pgd_t *pgd; | 268 | pgd_t *pgd; |
269 | pud_t *pud; | 269 | pud_t *pud; |
270 | pmd_t *pmd; | 270 | pmd_t *pmd; |
271 | pte_t *pte; | 271 | pte_t *pte; |
272 | int i, j, k; | 272 | int i, j, k; |
273 | unsigned long vaddr; | 273 | unsigned long vaddr; |
274 | 274 | ||
275 | vaddr = start; | 275 | vaddr = start; |
276 | i = __pgd_offset(vaddr); | 276 | i = __pgd_offset(vaddr); |
277 | j = __pud_offset(vaddr); | 277 | j = __pud_offset(vaddr); |
278 | k = __pmd_offset(vaddr); | 278 | k = __pmd_offset(vaddr); |
279 | pgd = pgd_base + i; | 279 | pgd = pgd_base + i; |
280 | 280 | ||
281 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | 281 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
282 | pud = (pud_t *)pgd; | 282 | pud = (pud_t *)pgd; |
283 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | 283 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
284 | pmd = (pmd_t *)pud; | 284 | pmd = (pmd_t *)pud; |
285 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | 285 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
286 | if (pmd_none(*pmd)) { | 286 | if (pmd_none(*pmd)) { |
287 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 287 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
288 | set_pmd(pmd, __pmd((unsigned long)pte)); | 288 | set_pmd(pmd, __pmd((unsigned long)pte)); |
289 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); | 289 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
290 | } | 290 | } |
291 | vaddr += PMD_SIZE; | 291 | vaddr += PMD_SIZE; |
292 | } | 292 | } |
293 | k = 0; | 293 | k = 0; |
294 | } | 294 | } |
295 | j = 0; | 295 | j = 0; |
296 | } | 296 | } |
297 | #endif | 297 | #endif |
298 | } | 298 | } |
299 | 299 | ||
300 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 300 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
301 | static int __init page_is_ram(unsigned long pagenr) | 301 | static int __init page_is_ram(unsigned long pagenr) |
302 | { | 302 | { |
303 | int i; | 303 | int i; |
304 | 304 | ||
305 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 305 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
306 | unsigned long addr, end; | 306 | unsigned long addr, end; |
307 | 307 | ||
308 | if (boot_mem_map.map[i].type != BOOT_MEM_RAM) | 308 | if (boot_mem_map.map[i].type != BOOT_MEM_RAM) |
309 | /* not usable memory */ | 309 | /* not usable memory */ |
310 | continue; | 310 | continue; |
311 | 311 | ||
312 | addr = PFN_UP(boot_mem_map.map[i].addr); | 312 | addr = PFN_UP(boot_mem_map.map[i].addr); |
313 | end = PFN_DOWN(boot_mem_map.map[i].addr + | 313 | end = PFN_DOWN(boot_mem_map.map[i].addr + |
314 | boot_mem_map.map[i].size); | 314 | boot_mem_map.map[i].size); |
315 | 315 | ||
316 | if (pagenr >= addr && pagenr < end) | 316 | if (pagenr >= addr && pagenr < end) |
317 | return 1; | 317 | return 1; |
318 | } | 318 | } |
319 | 319 | ||
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | void __init paging_init(void) | 323 | void __init paging_init(void) |
324 | { | 324 | { |
325 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 325 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
326 | unsigned long lastpfn; | 326 | unsigned long lastpfn; |
327 | 327 | ||
328 | pagetable_init(); | 328 | pagetable_init(); |
329 | 329 | ||
330 | #ifdef CONFIG_HIGHMEM | 330 | #ifdef CONFIG_HIGHMEM |
331 | kmap_init(); | 331 | kmap_init(); |
332 | #endif | 332 | #endif |
333 | kmap_coherent_init(); | 333 | kmap_coherent_init(); |
334 | 334 | ||
335 | #ifdef CONFIG_ZONE_DMA | 335 | #ifdef CONFIG_ZONE_DMA |
336 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | 336 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
337 | #endif | 337 | #endif |
338 | #ifdef CONFIG_ZONE_DMA32 | 338 | #ifdef CONFIG_ZONE_DMA32 |
339 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | 339 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
340 | #endif | 340 | #endif |
341 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 341 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
342 | lastpfn = max_low_pfn; | 342 | lastpfn = max_low_pfn; |
343 | #ifdef CONFIG_HIGHMEM | 343 | #ifdef CONFIG_HIGHMEM |
344 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | 344 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
345 | lastpfn = highend_pfn; | 345 | lastpfn = highend_pfn; |
346 | 346 | ||
347 | if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { | 347 | if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { |
348 | printk(KERN_WARNING "This processor doesn't support highmem." | 348 | printk(KERN_WARNING "This processor doesn't support highmem." |
349 | " %ldk highmem ignored\n", | 349 | " %ldk highmem ignored\n", |
350 | (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); | 350 | (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); |
351 | max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; | 351 | max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; |
352 | lastpfn = max_low_pfn; | 352 | lastpfn = max_low_pfn; |
353 | } | 353 | } |
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | free_area_init_nodes(max_zone_pfns); | 356 | free_area_init_nodes(max_zone_pfns); |
357 | } | 357 | } |
358 | 358 | ||
359 | #ifdef CONFIG_64BIT | 359 | #ifdef CONFIG_64BIT |
360 | static struct kcore_list kcore_kseg0; | 360 | static struct kcore_list kcore_kseg0; |
361 | #endif | 361 | #endif |
362 | 362 | ||
363 | void __init mem_init(void) | 363 | void __init mem_init(void) |
364 | { | 364 | { |
365 | unsigned long codesize, reservedpages, datasize, initsize; | 365 | unsigned long codesize, reservedpages, datasize, initsize; |
366 | unsigned long tmp, ram; | 366 | unsigned long tmp, ram; |
367 | 367 | ||
368 | #ifdef CONFIG_HIGHMEM | 368 | #ifdef CONFIG_HIGHMEM |
369 | #ifdef CONFIG_DISCONTIGMEM | 369 | #ifdef CONFIG_DISCONTIGMEM |
370 | #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" | 370 | #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" |
371 | #endif | 371 | #endif |
372 | max_mapnr = highend_pfn; | 372 | max_mapnr = highend_pfn; |
373 | #else | 373 | #else |
374 | max_mapnr = max_low_pfn; | 374 | max_mapnr = max_low_pfn; |
375 | #endif | 375 | #endif |
376 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); | 376 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
377 | 377 | ||
378 | totalram_pages += free_all_bootmem(); | 378 | totalram_pages += free_all_bootmem(); |
379 | totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ | 379 | totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ |
380 | 380 | ||
381 | reservedpages = ram = 0; | 381 | reservedpages = ram = 0; |
382 | for (tmp = 0; tmp < max_low_pfn; tmp++) | 382 | for (tmp = 0; tmp < max_low_pfn; tmp++) |
383 | if (page_is_ram(tmp)) { | 383 | if (page_is_ram(tmp)) { |
384 | ram++; | 384 | ram++; |
385 | if (PageReserved(pfn_to_page(tmp))) | 385 | if (PageReserved(pfn_to_page(tmp))) |
386 | reservedpages++; | 386 | reservedpages++; |
387 | } | 387 | } |
388 | num_physpages = ram; | 388 | num_physpages = ram; |
389 | 389 | ||
390 | #ifdef CONFIG_HIGHMEM | 390 | #ifdef CONFIG_HIGHMEM |
391 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { | 391 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { |
392 | struct page *page = pfn_to_page(tmp); | 392 | struct page *page = pfn_to_page(tmp); |
393 | 393 | ||
394 | if (!page_is_ram(tmp)) { | 394 | if (!page_is_ram(tmp)) { |
395 | SetPageReserved(page); | 395 | SetPageReserved(page); |
396 | continue; | 396 | continue; |
397 | } | 397 | } |
398 | ClearPageReserved(page); | 398 | ClearPageReserved(page); |
399 | init_page_count(page); | 399 | init_page_count(page); |
400 | __free_page(page); | 400 | __free_page(page); |
401 | totalhigh_pages++; | 401 | totalhigh_pages++; |
402 | } | 402 | } |
403 | totalram_pages += totalhigh_pages; | 403 | totalram_pages += totalhigh_pages; |
404 | num_physpages += totalhigh_pages; | 404 | num_physpages += totalhigh_pages; |
405 | #endif | 405 | #endif |
406 | 406 | ||
407 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 407 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
408 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 408 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
409 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 409 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
410 | 410 | ||
411 | #ifdef CONFIG_64BIT | 411 | #ifdef CONFIG_64BIT |
412 | if ((unsigned long) &_text > (unsigned long) CKSEG0) | 412 | if ((unsigned long) &_text > (unsigned long) CKSEG0) |
413 | /* The -4 is a hack so that user tools don't have to handle | 413 | /* The -4 is a hack so that user tools don't have to handle |
414 | the overflow. */ | 414 | the overflow. */ |
415 | kclist_add(&kcore_kseg0, (void *) CKSEG0, | 415 | kclist_add(&kcore_kseg0, (void *) CKSEG0, |
416 | 0x80000000 - 4, KCORE_TEXT); | 416 | 0x80000000 - 4, KCORE_TEXT); |
417 | #endif | 417 | #endif |
418 | 418 | ||
419 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | 419 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
420 | "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", | 420 | "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", |
421 | nr_free_pages() << (PAGE_SHIFT-10), | 421 | nr_free_pages() << (PAGE_SHIFT-10), |
422 | ram << (PAGE_SHIFT-10), | 422 | ram << (PAGE_SHIFT-10), |
423 | codesize >> 10, | 423 | codesize >> 10, |
424 | reservedpages << (PAGE_SHIFT-10), | 424 | reservedpages << (PAGE_SHIFT-10), |
425 | datasize >> 10, | 425 | datasize >> 10, |
426 | initsize >> 10, | 426 | initsize >> 10, |
427 | totalhigh_pages << (PAGE_SHIFT-10)); | 427 | totalhigh_pages << (PAGE_SHIFT-10)); |
428 | } | 428 | } |
429 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 429 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
430 | 430 | ||
431 | void free_init_pages(const char *what, unsigned long begin, unsigned long end) | 431 | void free_init_pages(const char *what, unsigned long begin, unsigned long end) |
432 | { | 432 | { |
433 | unsigned long pfn; | 433 | unsigned long pfn; |
434 | 434 | ||
435 | for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { | 435 | for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { |
436 | struct page *page = pfn_to_page(pfn); | 436 | struct page *page = pfn_to_page(pfn); |
437 | void *addr = phys_to_virt(PFN_PHYS(pfn)); | 437 | void *addr = phys_to_virt(PFN_PHYS(pfn)); |
438 | 438 | ||
439 | ClearPageReserved(page); | 439 | ClearPageReserved(page); |
440 | init_page_count(page); | 440 | init_page_count(page); |
441 | memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); | 441 | memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); |
442 | __free_page(page); | 442 | __free_page(page); |
443 | totalram_pages++; | 443 | totalram_pages++; |
444 | } | 444 | } |
445 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | 445 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
446 | } | 446 | } |
447 | 447 | ||
448 | #ifdef CONFIG_BLK_DEV_INITRD | 448 | #ifdef CONFIG_BLK_DEV_INITRD |
449 | void free_initrd_mem(unsigned long start, unsigned long end) | 449 | void free_initrd_mem(unsigned long start, unsigned long end) |
450 | { | 450 | { |
451 | free_init_pages("initrd memory", | 451 | free_init_pages("initrd memory", |
452 | virt_to_phys((void *)start), | 452 | virt_to_phys((void *)start), |
453 | virt_to_phys((void *)end)); | 453 | virt_to_phys((void *)end)); |
454 | } | 454 | } |
455 | #endif | 455 | #endif |
456 | 456 | ||
457 | void __init_refok free_initmem(void) | 457 | void __init_refok free_initmem(void) |
458 | { | 458 | { |
459 | prom_free_prom_memory(); | 459 | prom_free_prom_memory(); |
460 | free_init_pages("unused kernel memory", | 460 | free_init_pages("unused kernel memory", |
461 | __pa_symbol(&__init_begin), | 461 | __pa_symbol(&__init_begin), |
462 | __pa_symbol(&__init_end)); | 462 | __pa_symbol(&__init_end)); |
463 | } | 463 | } |
464 | 464 | ||
465 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 465 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
466 | unsigned long pgd_current[NR_CPUS]; | 466 | unsigned long pgd_current[NR_CPUS]; |
467 | #endif | 467 | #endif |
468 | /* | 468 | /* |
469 | * On 64-bit we've got three-level pagetables with a slightly | 469 | * On 64-bit we've got three-level pagetables with a slightly |
470 | * different layout ... | 470 | * different layout ... |
471 | */ | 471 | */ |
472 | #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) | 472 | #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) |
473 | 473 | ||
474 | /* | 474 | /* |
475 | * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER | 475 | * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER |
476 | * are constants. So we use the variants from asm-offset.h until that gcc | 476 | * are constants. So we use the variants from asm-offset.h until that gcc |
477 | * will officially be retired. | 477 | * will officially be retired. |
478 | */ | 478 | */ |
479 | pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); | 479 | pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); |
480 | #ifdef CONFIG_64BIT | 480 | #ifndef __PAGETABLE_PMD_FOLDED |
481 | pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); | 481 | pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); |
482 | #endif | 482 | #endif |
483 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); | 483 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); |
484 | 484 |
arch/mips/mm/pgtable-64.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1999, 2000 by Silicon Graphics | 6 | * Copyright (C) 1999, 2000 by Silicon Graphics |
7 | * Copyright (C) 2003 by Ralf Baechle | 7 | * Copyright (C) 2003 by Ralf Baechle |
8 | */ | 8 | */ |
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <asm/fixmap.h> | 11 | #include <asm/fixmap.h> |
12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
13 | #include <asm/pgalloc.h> | 13 | #include <asm/pgalloc.h> |
14 | 14 | ||
15 | void pgd_init(unsigned long page) | 15 | void pgd_init(unsigned long page) |
16 | { | 16 | { |
17 | unsigned long *p, *end; | 17 | unsigned long *p, *end; |
18 | unsigned long entry; | ||
18 | 19 | ||
20 | #ifdef __PAGETABLE_PMD_FOLDED | ||
21 | entry = (unsigned long)invalid_pte_table; | ||
22 | #else | ||
23 | entry = (unsigned long)invalid_pmd_table; | ||
24 | #endif | ||
25 | |||
19 | p = (unsigned long *) page; | 26 | p = (unsigned long *) page; |
20 | end = p + PTRS_PER_PGD; | 27 | end = p + PTRS_PER_PGD; |
21 | 28 | ||
22 | while (p < end) { | 29 | while (p < end) { |
23 | p[0] = (unsigned long) invalid_pmd_table; | 30 | p[0] = entry; |
24 | p[1] = (unsigned long) invalid_pmd_table; | 31 | p[1] = entry; |
25 | p[2] = (unsigned long) invalid_pmd_table; | 32 | p[2] = entry; |
26 | p[3] = (unsigned long) invalid_pmd_table; | 33 | p[3] = entry; |
27 | p[4] = (unsigned long) invalid_pmd_table; | 34 | p[4] = entry; |
28 | p[5] = (unsigned long) invalid_pmd_table; | 35 | p[5] = entry; |
29 | p[6] = (unsigned long) invalid_pmd_table; | 36 | p[6] = entry; |
30 | p[7] = (unsigned long) invalid_pmd_table; | 37 | p[7] = entry; |
31 | p += 8; | 38 | p += 8; |
32 | } | 39 | } |
33 | } | 40 | } |
34 | 41 | ||
42 | #ifndef __PAGETABLE_PMD_FOLDED | ||
35 | void pmd_init(unsigned long addr, unsigned long pagetable) | 43 | void pmd_init(unsigned long addr, unsigned long pagetable) |
36 | { | 44 | { |
37 | unsigned long *p, *end; | 45 | unsigned long *p, *end; |
38 | 46 | ||
39 | p = (unsigned long *) addr; | 47 | p = (unsigned long *) addr; |
40 | end = p + PTRS_PER_PMD; | 48 | end = p + PTRS_PER_PMD; |
41 | 49 | ||
42 | while (p < end) { | 50 | while (p < end) { |
43 | p[0] = (unsigned long)pagetable; | 51 | p[0] = pagetable; |
44 | p[1] = (unsigned long)pagetable; | 52 | p[1] = pagetable; |
45 | p[2] = (unsigned long)pagetable; | 53 | p[2] = pagetable; |
46 | p[3] = (unsigned long)pagetable; | 54 | p[3] = pagetable; |
47 | p[4] = (unsigned long)pagetable; | 55 | p[4] = pagetable; |
48 | p[5] = (unsigned long)pagetable; | 56 | p[5] = pagetable; |
49 | p[6] = (unsigned long)pagetable; | 57 | p[6] = pagetable; |
50 | p[7] = (unsigned long)pagetable; | 58 | p[7] = pagetable; |
51 | p += 8; | 59 | p += 8; |
52 | } | 60 | } |
53 | } | 61 | } |
62 | #endif | ||
54 | 63 | ||
55 | void __init pagetable_init(void) | 64 | void __init pagetable_init(void) |
56 | { | 65 | { |
57 | unsigned long vaddr; | 66 | unsigned long vaddr; |
58 | pgd_t *pgd_base; | 67 | pgd_t *pgd_base; |
59 | 68 | ||
60 | /* Initialize the entire pgd. */ | 69 | /* Initialize the entire pgd. */ |
61 | pgd_init((unsigned long)swapper_pg_dir); | 70 | pgd_init((unsigned long)swapper_pg_dir); |
71 | #ifndef __PAGETABLE_PMD_FOLDED | ||
62 | pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); | 72 | pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); |
63 | 73 | #endif | |
64 | pgd_base = swapper_pg_dir; | 74 | pgd_base = swapper_pg_dir; |
65 | /* | 75 | /* |
66 | * Fixed mappings: | 76 | * Fixed mappings: |
67 | */ | 77 | */ |
68 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 78 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
69 | fixrange_init(vaddr, 0, pgd_base); | 79 | fixrange_init(vaddr, 0, pgd_base); |
70 | } | 80 | } |
71 | 81 |
arch/mips/mm/tlbex.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Synthesize TLB refill handlers at runtime. | 6 | * Synthesize TLB refill handlers at runtime. |
7 | * | 7 | * |
8 | * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer | 8 | * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer |
9 | * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki | 9 | * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki |
10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) | 10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) |
11 | * Copyright (C) 2008, 2009 Cavium Networks, Inc. | 11 | * Copyright (C) 2008, 2009 Cavium Networks, Inc. |
12 | * | 12 | * |
13 | * ... and the days got worse and worse and now you see | 13 | * ... and the days got worse and worse and now you see |
14 | * I've gone completly out of my mind. | 14 | * I've gone completly out of my mind. |
15 | * | 15 | * |
16 | * They're coming to take me a away haha | 16 | * They're coming to take me a away haha |
17 | * they're coming to take me a away hoho hihi haha | 17 | * they're coming to take me a away hoho hihi haha |
18 | * to the funny farm where code is beautiful all the time ... | 18 | * to the funny farm where code is beautiful all the time ... |
19 | * | 19 | * |
20 | * (Condolences to Napoleon XIV) | 20 | * (Condolences to Napoleon XIV) |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/smp.h> | 26 | #include <linux/smp.h> |
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | 29 | ||
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/war.h> | 31 | #include <asm/war.h> |
32 | 32 | ||
33 | #include "uasm.h" | 33 | #include "uasm.h" |
34 | 34 | ||
35 | static inline int r45k_bvahwbug(void) | 35 | static inline int r45k_bvahwbug(void) |
36 | { | 36 | { |
37 | /* XXX: We should probe for the presence of this bug, but we don't. */ | 37 | /* XXX: We should probe for the presence of this bug, but we don't. */ |
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline int r4k_250MHZhwbug(void) | 41 | static inline int r4k_250MHZhwbug(void) |
42 | { | 42 | { |
43 | /* XXX: We should probe for the presence of this bug, but we don't. */ | 43 | /* XXX: We should probe for the presence of this bug, but we don't. */ |
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline int __maybe_unused bcm1250_m3_war(void) | 47 | static inline int __maybe_unused bcm1250_m3_war(void) |
48 | { | 48 | { |
49 | return BCM1250_M3_WAR; | 49 | return BCM1250_M3_WAR; |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline int __maybe_unused r10000_llsc_war(void) | 52 | static inline int __maybe_unused r10000_llsc_war(void) |
53 | { | 53 | { |
54 | return R10000_LLSC_WAR; | 54 | return R10000_LLSC_WAR; |
55 | } | 55 | } |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Found by experiment: At least some revisions of the 4kc throw under | 58 | * Found by experiment: At least some revisions of the 4kc throw under |
59 | * some circumstances a machine check exception, triggered by invalid | 59 | * some circumstances a machine check exception, triggered by invalid |
60 | * values in the index register. Delaying the tlbp instruction until | 60 | * values in the index register. Delaying the tlbp instruction until |
61 | * after the next branch, plus adding an additional nop in front of | 61 | * after the next branch, plus adding an additional nop in front of |
62 | * tlbwi/tlbwr avoids the invalid index register values. Nobody knows | 62 | * tlbwi/tlbwr avoids the invalid index register values. Nobody knows |
63 | * why; it's not an issue caused by the core RTL. | 63 | * why; it's not an issue caused by the core RTL. |
64 | * | 64 | * |
65 | */ | 65 | */ |
66 | static int __cpuinit m4kc_tlbp_war(void) | 66 | static int __cpuinit m4kc_tlbp_war(void) |
67 | { | 67 | { |
68 | return (current_cpu_data.processor_id & 0xffff00) == | 68 | return (current_cpu_data.processor_id & 0xffff00) == |
69 | (PRID_COMP_MIPS | PRID_IMP_4KC); | 69 | (PRID_COMP_MIPS | PRID_IMP_4KC); |
70 | } | 70 | } |
71 | 71 | ||
72 | /* Handle labels (which must be positive integers). */ | 72 | /* Handle labels (which must be positive integers). */ |
73 | enum label_id { | 73 | enum label_id { |
74 | label_second_part = 1, | 74 | label_second_part = 1, |
75 | label_leave, | 75 | label_leave, |
76 | label_vmalloc, | 76 | label_vmalloc, |
77 | label_vmalloc_done, | 77 | label_vmalloc_done, |
78 | label_tlbw_hazard, | 78 | label_tlbw_hazard, |
79 | label_split, | 79 | label_split, |
80 | label_nopage_tlbl, | 80 | label_nopage_tlbl, |
81 | label_nopage_tlbs, | 81 | label_nopage_tlbs, |
82 | label_nopage_tlbm, | 82 | label_nopage_tlbm, |
83 | label_smp_pgtable_change, | 83 | label_smp_pgtable_change, |
84 | label_r3000_write_probe_fail, | 84 | label_r3000_write_probe_fail, |
85 | #ifdef CONFIG_HUGETLB_PAGE | 85 | #ifdef CONFIG_HUGETLB_PAGE |
86 | label_tlb_huge_update, | 86 | label_tlb_huge_update, |
87 | #endif | 87 | #endif |
88 | }; | 88 | }; |
89 | 89 | ||
90 | UASM_L_LA(_second_part) | 90 | UASM_L_LA(_second_part) |
91 | UASM_L_LA(_leave) | 91 | UASM_L_LA(_leave) |
92 | UASM_L_LA(_vmalloc) | 92 | UASM_L_LA(_vmalloc) |
93 | UASM_L_LA(_vmalloc_done) | 93 | UASM_L_LA(_vmalloc_done) |
94 | UASM_L_LA(_tlbw_hazard) | 94 | UASM_L_LA(_tlbw_hazard) |
95 | UASM_L_LA(_split) | 95 | UASM_L_LA(_split) |
96 | UASM_L_LA(_nopage_tlbl) | 96 | UASM_L_LA(_nopage_tlbl) |
97 | UASM_L_LA(_nopage_tlbs) | 97 | UASM_L_LA(_nopage_tlbs) |
98 | UASM_L_LA(_nopage_tlbm) | 98 | UASM_L_LA(_nopage_tlbm) |
99 | UASM_L_LA(_smp_pgtable_change) | 99 | UASM_L_LA(_smp_pgtable_change) |
100 | UASM_L_LA(_r3000_write_probe_fail) | 100 | UASM_L_LA(_r3000_write_probe_fail) |
101 | #ifdef CONFIG_HUGETLB_PAGE | 101 | #ifdef CONFIG_HUGETLB_PAGE |
102 | UASM_L_LA(_tlb_huge_update) | 102 | UASM_L_LA(_tlb_huge_update) |
103 | #endif | 103 | #endif |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * For debug purposes. | 106 | * For debug purposes. |
107 | */ | 107 | */ |
108 | static inline void dump_handler(const u32 *handler, int count) | 108 | static inline void dump_handler(const u32 *handler, int count) |
109 | { | 109 | { |
110 | int i; | 110 | int i; |
111 | 111 | ||
112 | pr_debug("\t.set push\n"); | 112 | pr_debug("\t.set push\n"); |
113 | pr_debug("\t.set noreorder\n"); | 113 | pr_debug("\t.set noreorder\n"); |
114 | 114 | ||
115 | for (i = 0; i < count; i++) | 115 | for (i = 0; i < count; i++) |
116 | pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); | 116 | pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); |
117 | 117 | ||
118 | pr_debug("\t.set pop\n"); | 118 | pr_debug("\t.set pop\n"); |
119 | } | 119 | } |
120 | 120 | ||
121 | /* The only general purpose registers allowed in TLB handlers. */ | 121 | /* The only general purpose registers allowed in TLB handlers. */ |
122 | #define K0 26 | 122 | #define K0 26 |
123 | #define K1 27 | 123 | #define K1 27 |
124 | 124 | ||
125 | /* Some CP0 registers */ | 125 | /* Some CP0 registers */ |
126 | #define C0_INDEX 0, 0 | 126 | #define C0_INDEX 0, 0 |
127 | #define C0_ENTRYLO0 2, 0 | 127 | #define C0_ENTRYLO0 2, 0 |
128 | #define C0_TCBIND 2, 2 | 128 | #define C0_TCBIND 2, 2 |
129 | #define C0_ENTRYLO1 3, 0 | 129 | #define C0_ENTRYLO1 3, 0 |
130 | #define C0_CONTEXT 4, 0 | 130 | #define C0_CONTEXT 4, 0 |
131 | #define C0_PAGEMASK 5, 0 | 131 | #define C0_PAGEMASK 5, 0 |
132 | #define C0_BADVADDR 8, 0 | 132 | #define C0_BADVADDR 8, 0 |
133 | #define C0_ENTRYHI 10, 0 | 133 | #define C0_ENTRYHI 10, 0 |
134 | #define C0_EPC 14, 0 | 134 | #define C0_EPC 14, 0 |
135 | #define C0_XCONTEXT 20, 0 | 135 | #define C0_XCONTEXT 20, 0 |
136 | 136 | ||
137 | #ifdef CONFIG_64BIT | 137 | #ifdef CONFIG_64BIT |
138 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) | 138 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) |
139 | #else | 139 | #else |
140 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) | 140 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | /* The worst case length of the handler is around 18 instructions for | 143 | /* The worst case length of the handler is around 18 instructions for |
144 | * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. | 144 | * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. |
145 | * Maximum space available is 32 instructions for R3000 and 64 | 145 | * Maximum space available is 32 instructions for R3000 and 64 |
146 | * instructions for R4000. | 146 | * instructions for R4000. |
147 | * | 147 | * |
148 | * We deliberately chose a buffer size of 128, so we won't scribble | 148 | * We deliberately chose a buffer size of 128, so we won't scribble |
149 | * over anything important on overflow before we panic. | 149 | * over anything important on overflow before we panic. |
150 | */ | 150 | */ |
151 | static u32 tlb_handler[128] __cpuinitdata; | 151 | static u32 tlb_handler[128] __cpuinitdata; |
152 | 152 | ||
153 | /* simply assume worst case size for labels and relocs */ | 153 | /* simply assume worst case size for labels and relocs */ |
154 | static struct uasm_label labels[128] __cpuinitdata; | 154 | static struct uasm_label labels[128] __cpuinitdata; |
155 | static struct uasm_reloc relocs[128] __cpuinitdata; | 155 | static struct uasm_reloc relocs[128] __cpuinitdata; |
156 | 156 | ||
157 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 157 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
158 | /* | 158 | /* |
159 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | 159 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, |
160 | * we cannot do r3000 under these circumstances. | 160 | * we cannot do r3000 under these circumstances. |
161 | */ | 161 | */ |
162 | 162 | ||
163 | /* | 163 | /* |
164 | * The R3000 TLB handler is simple. | 164 | * The R3000 TLB handler is simple. |
165 | */ | 165 | */ |
166 | static void __cpuinit build_r3000_tlb_refill_handler(void) | 166 | static void __cpuinit build_r3000_tlb_refill_handler(void) |
167 | { | 167 | { |
168 | long pgdc = (long)pgd_current; | 168 | long pgdc = (long)pgd_current; |
169 | u32 *p; | 169 | u32 *p; |
170 | 170 | ||
171 | memset(tlb_handler, 0, sizeof(tlb_handler)); | 171 | memset(tlb_handler, 0, sizeof(tlb_handler)); |
172 | p = tlb_handler; | 172 | p = tlb_handler; |
173 | 173 | ||
174 | uasm_i_mfc0(&p, K0, C0_BADVADDR); | 174 | uasm_i_mfc0(&p, K0, C0_BADVADDR); |
175 | uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ | 175 | uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ |
176 | uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); | 176 | uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); |
177 | uasm_i_srl(&p, K0, K0, 22); /* load delay */ | 177 | uasm_i_srl(&p, K0, K0, 22); /* load delay */ |
178 | uasm_i_sll(&p, K0, K0, 2); | 178 | uasm_i_sll(&p, K0, K0, 2); |
179 | uasm_i_addu(&p, K1, K1, K0); | 179 | uasm_i_addu(&p, K1, K1, K0); |
180 | uasm_i_mfc0(&p, K0, C0_CONTEXT); | 180 | uasm_i_mfc0(&p, K0, C0_CONTEXT); |
181 | uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ | 181 | uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ |
182 | uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ | 182 | uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ |
183 | uasm_i_addu(&p, K1, K1, K0); | 183 | uasm_i_addu(&p, K1, K1, K0); |
184 | uasm_i_lw(&p, K0, 0, K1); | 184 | uasm_i_lw(&p, K0, 0, K1); |
185 | uasm_i_nop(&p); /* load delay */ | 185 | uasm_i_nop(&p); /* load delay */ |
186 | uasm_i_mtc0(&p, K0, C0_ENTRYLO0); | 186 | uasm_i_mtc0(&p, K0, C0_ENTRYLO0); |
187 | uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ | 187 | uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ |
188 | uasm_i_tlbwr(&p); /* cp0 delay */ | 188 | uasm_i_tlbwr(&p); /* cp0 delay */ |
189 | uasm_i_jr(&p, K1); | 189 | uasm_i_jr(&p, K1); |
190 | uasm_i_rfe(&p); /* branch delay */ | 190 | uasm_i_rfe(&p); /* branch delay */ |
191 | 191 | ||
192 | if (p > tlb_handler + 32) | 192 | if (p > tlb_handler + 32) |
193 | panic("TLB refill handler space exceeded"); | 193 | panic("TLB refill handler space exceeded"); |
194 | 194 | ||
195 | pr_debug("Wrote TLB refill handler (%u instructions).\n", | 195 | pr_debug("Wrote TLB refill handler (%u instructions).\n", |
196 | (unsigned int)(p - tlb_handler)); | 196 | (unsigned int)(p - tlb_handler)); |
197 | 197 | ||
198 | memcpy((void *)ebase, tlb_handler, 0x80); | 198 | memcpy((void *)ebase, tlb_handler, 0x80); |
199 | 199 | ||
200 | dump_handler((u32 *)ebase, 32); | 200 | dump_handler((u32 *)ebase, 32); |
201 | } | 201 | } |
202 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ | 202 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ |
203 | 203 | ||
204 | /* | 204 | /* |
205 | * The R4000 TLB handler is much more complicated. We have two | 205 | * The R4000 TLB handler is much more complicated. We have two |
206 | * consecutive handler areas with 32 instructions space each. | 206 | * consecutive handler areas with 32 instructions space each. |
207 | * Since they aren't used at the same time, we can overflow in the | 207 | * Since they aren't used at the same time, we can overflow in the |
208 | * other one.To keep things simple, we first assume linear space, | 208 | * other one.To keep things simple, we first assume linear space, |
209 | * then we relocate it to the final handler layout as needed. | 209 | * then we relocate it to the final handler layout as needed. |
210 | */ | 210 | */ |
211 | static u32 final_handler[64] __cpuinitdata; | 211 | static u32 final_handler[64] __cpuinitdata; |
212 | 212 | ||
213 | /* | 213 | /* |
214 | * Hazards | 214 | * Hazards |
215 | * | 215 | * |
216 | * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: | 216 | * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: |
217 | * 2. A timing hazard exists for the TLBP instruction. | 217 | * 2. A timing hazard exists for the TLBP instruction. |
218 | * | 218 | * |
219 | * stalling_instruction | 219 | * stalling_instruction |
220 | * TLBP | 220 | * TLBP |
221 | * | 221 | * |
222 | * The JTLB is being read for the TLBP throughout the stall generated by the | 222 | * The JTLB is being read for the TLBP throughout the stall generated by the |
223 | * previous instruction. This is not really correct as the stalling instruction | 223 | * previous instruction. This is not really correct as the stalling instruction |
224 | * can modify the address used to access the JTLB. The failure symptom is that | 224 | * can modify the address used to access the JTLB. The failure symptom is that |
225 | * the TLBP instruction will use an address created for the stalling instruction | 225 | * the TLBP instruction will use an address created for the stalling instruction |
226 | * and not the address held in C0_ENHI and thus report the wrong results. | 226 | * and not the address held in C0_ENHI and thus report the wrong results. |
227 | * | 227 | * |
228 | * The software work-around is to not allow the instruction preceding the TLBP | 228 | * The software work-around is to not allow the instruction preceding the TLBP |
229 | * to stall - make it an NOP or some other instruction guaranteed not to stall. | 229 | * to stall - make it an NOP or some other instruction guaranteed not to stall. |
230 | * | 230 | * |
231 | * Errata 2 will not be fixed. This errata is also on the R5000. | 231 | * Errata 2 will not be fixed. This errata is also on the R5000. |
232 | * | 232 | * |
233 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... | 233 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... |
234 | */ | 234 | */ |
235 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) | 235 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) |
236 | { | 236 | { |
237 | switch (current_cpu_type()) { | 237 | switch (current_cpu_type()) { |
238 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ | 238 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ |
239 | case CPU_R4600: | 239 | case CPU_R4600: |
240 | case CPU_R4700: | 240 | case CPU_R4700: |
241 | case CPU_R5000: | 241 | case CPU_R5000: |
242 | case CPU_R5000A: | 242 | case CPU_R5000A: |
243 | case CPU_NEVADA: | 243 | case CPU_NEVADA: |
244 | uasm_i_nop(p); | 244 | uasm_i_nop(p); |
245 | uasm_i_tlbp(p); | 245 | uasm_i_tlbp(p); |
246 | break; | 246 | break; |
247 | 247 | ||
248 | default: | 248 | default: |
249 | uasm_i_tlbp(p); | 249 | uasm_i_tlbp(p); |
250 | break; | 250 | break; |
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * Write random or indexed TLB entry, and care about the hazards from | 255 | * Write random or indexed TLB entry, and care about the hazards from |
256 | * the preceeding mtc0 and for the following eret. | 256 | * the preceeding mtc0 and for the following eret. |
257 | */ | 257 | */ |
258 | enum tlb_write_entry { tlb_random, tlb_indexed }; | 258 | enum tlb_write_entry { tlb_random, tlb_indexed }; |
259 | 259 | ||
260 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | 260 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, |
261 | struct uasm_reloc **r, | 261 | struct uasm_reloc **r, |
262 | enum tlb_write_entry wmode) | 262 | enum tlb_write_entry wmode) |
263 | { | 263 | { |
264 | void(*tlbw)(u32 **) = NULL; | 264 | void(*tlbw)(u32 **) = NULL; |
265 | 265 | ||
266 | switch (wmode) { | 266 | switch (wmode) { |
267 | case tlb_random: tlbw = uasm_i_tlbwr; break; | 267 | case tlb_random: tlbw = uasm_i_tlbwr; break; |
268 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; | 268 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; |
269 | } | 269 | } |
270 | 270 | ||
271 | if (cpu_has_mips_r2) { | 271 | if (cpu_has_mips_r2) { |
272 | if (cpu_has_mips_r2_exec_hazard) | 272 | if (cpu_has_mips_r2_exec_hazard) |
273 | uasm_i_ehb(p); | 273 | uasm_i_ehb(p); |
274 | tlbw(p); | 274 | tlbw(p); |
275 | return; | 275 | return; |
276 | } | 276 | } |
277 | 277 | ||
278 | switch (current_cpu_type()) { | 278 | switch (current_cpu_type()) { |
279 | case CPU_R4000PC: | 279 | case CPU_R4000PC: |
280 | case CPU_R4000SC: | 280 | case CPU_R4000SC: |
281 | case CPU_R4000MC: | 281 | case CPU_R4000MC: |
282 | case CPU_R4400PC: | 282 | case CPU_R4400PC: |
283 | case CPU_R4400SC: | 283 | case CPU_R4400SC: |
284 | case CPU_R4400MC: | 284 | case CPU_R4400MC: |
285 | /* | 285 | /* |
286 | * This branch uses up a mtc0 hazard nop slot and saves | 286 | * This branch uses up a mtc0 hazard nop slot and saves |
287 | * two nops after the tlbw instruction. | 287 | * two nops after the tlbw instruction. |
288 | */ | 288 | */ |
289 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); | 289 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); |
290 | tlbw(p); | 290 | tlbw(p); |
291 | uasm_l_tlbw_hazard(l, *p); | 291 | uasm_l_tlbw_hazard(l, *p); |
292 | uasm_i_nop(p); | 292 | uasm_i_nop(p); |
293 | break; | 293 | break; |
294 | 294 | ||
295 | case CPU_R4600: | 295 | case CPU_R4600: |
296 | case CPU_R4700: | 296 | case CPU_R4700: |
297 | case CPU_R5000: | 297 | case CPU_R5000: |
298 | case CPU_R5000A: | 298 | case CPU_R5000A: |
299 | uasm_i_nop(p); | 299 | uasm_i_nop(p); |
300 | tlbw(p); | 300 | tlbw(p); |
301 | uasm_i_nop(p); | 301 | uasm_i_nop(p); |
302 | break; | 302 | break; |
303 | 303 | ||
304 | case CPU_R4300: | 304 | case CPU_R4300: |
305 | case CPU_5KC: | 305 | case CPU_5KC: |
306 | case CPU_TX49XX: | 306 | case CPU_TX49XX: |
307 | case CPU_PR4450: | 307 | case CPU_PR4450: |
308 | uasm_i_nop(p); | 308 | uasm_i_nop(p); |
309 | tlbw(p); | 309 | tlbw(p); |
310 | break; | 310 | break; |
311 | 311 | ||
312 | case CPU_R10000: | 312 | case CPU_R10000: |
313 | case CPU_R12000: | 313 | case CPU_R12000: |
314 | case CPU_R14000: | 314 | case CPU_R14000: |
315 | case CPU_4KC: | 315 | case CPU_4KC: |
316 | case CPU_4KEC: | 316 | case CPU_4KEC: |
317 | case CPU_SB1: | 317 | case CPU_SB1: |
318 | case CPU_SB1A: | 318 | case CPU_SB1A: |
319 | case CPU_4KSC: | 319 | case CPU_4KSC: |
320 | case CPU_20KC: | 320 | case CPU_20KC: |
321 | case CPU_25KF: | 321 | case CPU_25KF: |
322 | case CPU_BCM3302: | 322 | case CPU_BCM3302: |
323 | case CPU_BCM4710: | 323 | case CPU_BCM4710: |
324 | case CPU_LOONGSON2: | 324 | case CPU_LOONGSON2: |
325 | case CPU_BCM6338: | 325 | case CPU_BCM6338: |
326 | case CPU_BCM6345: | 326 | case CPU_BCM6345: |
327 | case CPU_BCM6348: | 327 | case CPU_BCM6348: |
328 | case CPU_BCM6358: | 328 | case CPU_BCM6358: |
329 | case CPU_R5500: | 329 | case CPU_R5500: |
330 | if (m4kc_tlbp_war()) | 330 | if (m4kc_tlbp_war()) |
331 | uasm_i_nop(p); | 331 | uasm_i_nop(p); |
332 | case CPU_ALCHEMY: | 332 | case CPU_ALCHEMY: |
333 | tlbw(p); | 333 | tlbw(p); |
334 | break; | 334 | break; |
335 | 335 | ||
336 | case CPU_NEVADA: | 336 | case CPU_NEVADA: |
337 | uasm_i_nop(p); /* QED specifies 2 nops hazard */ | 337 | uasm_i_nop(p); /* QED specifies 2 nops hazard */ |
338 | /* | 338 | /* |
339 | * This branch uses up a mtc0 hazard nop slot and saves | 339 | * This branch uses up a mtc0 hazard nop slot and saves |
340 | * a nop after the tlbw instruction. | 340 | * a nop after the tlbw instruction. |
341 | */ | 341 | */ |
342 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); | 342 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); |
343 | tlbw(p); | 343 | tlbw(p); |
344 | uasm_l_tlbw_hazard(l, *p); | 344 | uasm_l_tlbw_hazard(l, *p); |
345 | break; | 345 | break; |
346 | 346 | ||
347 | case CPU_RM7000: | 347 | case CPU_RM7000: |
348 | uasm_i_nop(p); | 348 | uasm_i_nop(p); |
349 | uasm_i_nop(p); | 349 | uasm_i_nop(p); |
350 | uasm_i_nop(p); | 350 | uasm_i_nop(p); |
351 | uasm_i_nop(p); | 351 | uasm_i_nop(p); |
352 | tlbw(p); | 352 | tlbw(p); |
353 | break; | 353 | break; |
354 | 354 | ||
355 | case CPU_RM9000: | 355 | case CPU_RM9000: |
356 | /* | 356 | /* |
357 | * When the JTLB is updated by tlbwi or tlbwr, a subsequent | 357 | * When the JTLB is updated by tlbwi or tlbwr, a subsequent |
358 | * use of the JTLB for instructions should not occur for 4 | 358 | * use of the JTLB for instructions should not occur for 4 |
359 | * cpu cycles and use for data translations should not occur | 359 | * cpu cycles and use for data translations should not occur |
360 | * for 3 cpu cycles. | 360 | * for 3 cpu cycles. |
361 | */ | 361 | */ |
362 | uasm_i_ssnop(p); | 362 | uasm_i_ssnop(p); |
363 | uasm_i_ssnop(p); | 363 | uasm_i_ssnop(p); |
364 | uasm_i_ssnop(p); | 364 | uasm_i_ssnop(p); |
365 | uasm_i_ssnop(p); | 365 | uasm_i_ssnop(p); |
366 | tlbw(p); | 366 | tlbw(p); |
367 | uasm_i_ssnop(p); | 367 | uasm_i_ssnop(p); |
368 | uasm_i_ssnop(p); | 368 | uasm_i_ssnop(p); |
369 | uasm_i_ssnop(p); | 369 | uasm_i_ssnop(p); |
370 | uasm_i_ssnop(p); | 370 | uasm_i_ssnop(p); |
371 | break; | 371 | break; |
372 | 372 | ||
373 | case CPU_VR4111: | 373 | case CPU_VR4111: |
374 | case CPU_VR4121: | 374 | case CPU_VR4121: |
375 | case CPU_VR4122: | 375 | case CPU_VR4122: |
376 | case CPU_VR4181: | 376 | case CPU_VR4181: |
377 | case CPU_VR4181A: | 377 | case CPU_VR4181A: |
378 | uasm_i_nop(p); | 378 | uasm_i_nop(p); |
379 | uasm_i_nop(p); | 379 | uasm_i_nop(p); |
380 | tlbw(p); | 380 | tlbw(p); |
381 | uasm_i_nop(p); | 381 | uasm_i_nop(p); |
382 | uasm_i_nop(p); | 382 | uasm_i_nop(p); |
383 | break; | 383 | break; |
384 | 384 | ||
385 | case CPU_VR4131: | 385 | case CPU_VR4131: |
386 | case CPU_VR4133: | 386 | case CPU_VR4133: |
387 | case CPU_R5432: | 387 | case CPU_R5432: |
388 | uasm_i_nop(p); | 388 | uasm_i_nop(p); |
389 | uasm_i_nop(p); | 389 | uasm_i_nop(p); |
390 | tlbw(p); | 390 | tlbw(p); |
391 | break; | 391 | break; |
392 | 392 | ||
393 | default: | 393 | default: |
394 | panic("No TLB refill handler yet (CPU type: %d)", | 394 | panic("No TLB refill handler yet (CPU type: %d)", |
395 | current_cpu_data.cputype); | 395 | current_cpu_data.cputype); |
396 | break; | 396 | break; |
397 | } | 397 | } |
398 | } | 398 | } |
399 | 399 | ||
400 | #ifdef CONFIG_HUGETLB_PAGE | 400 | #ifdef CONFIG_HUGETLB_PAGE |
401 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 401 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, |
402 | struct uasm_label **l, | 402 | struct uasm_label **l, |
403 | struct uasm_reloc **r, | 403 | struct uasm_reloc **r, |
404 | unsigned int tmp, | 404 | unsigned int tmp, |
405 | enum tlb_write_entry wmode) | 405 | enum tlb_write_entry wmode) |
406 | { | 406 | { |
407 | /* Set huge page tlb entry size */ | 407 | /* Set huge page tlb entry size */ |
408 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 408 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); |
409 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | 409 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); |
410 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 410 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
411 | 411 | ||
412 | build_tlb_write_entry(p, l, r, wmode); | 412 | build_tlb_write_entry(p, l, r, wmode); |
413 | 413 | ||
414 | /* Reset default page size */ | 414 | /* Reset default page size */ |
415 | if (PM_DEFAULT_MASK >> 16) { | 415 | if (PM_DEFAULT_MASK >> 16) { |
416 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 416 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); |
417 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 417 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); |
418 | uasm_il_b(p, r, label_leave); | 418 | uasm_il_b(p, r, label_leave); |
419 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 419 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
420 | } else if (PM_DEFAULT_MASK) { | 420 | } else if (PM_DEFAULT_MASK) { |
421 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 421 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); |
422 | uasm_il_b(p, r, label_leave); | 422 | uasm_il_b(p, r, label_leave); |
423 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 423 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
424 | } else { | 424 | } else { |
425 | uasm_il_b(p, r, label_leave); | 425 | uasm_il_b(p, r, label_leave); |
426 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | 426 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
427 | } | 427 | } |
428 | } | 428 | } |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * Check if Huge PTE is present, if so then jump to LABEL. | 431 | * Check if Huge PTE is present, if so then jump to LABEL. |
432 | */ | 432 | */ |
433 | static void __cpuinit | 433 | static void __cpuinit |
434 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, | 434 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, |
435 | unsigned int pmd, int lid) | 435 | unsigned int pmd, int lid) |
436 | { | 436 | { |
437 | UASM_i_LW(p, tmp, 0, pmd); | 437 | UASM_i_LW(p, tmp, 0, pmd); |
438 | uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); | 438 | uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); |
439 | uasm_il_bnez(p, r, tmp, lid); | 439 | uasm_il_bnez(p, r, tmp, lid); |
440 | } | 440 | } |
441 | 441 | ||
442 | static __cpuinit void build_huge_update_entries(u32 **p, | 442 | static __cpuinit void build_huge_update_entries(u32 **p, |
443 | unsigned int pte, | 443 | unsigned int pte, |
444 | unsigned int tmp) | 444 | unsigned int tmp) |
445 | { | 445 | { |
446 | int small_sequence; | 446 | int small_sequence; |
447 | 447 | ||
448 | /* | 448 | /* |
449 | * A huge PTE describes an area the size of the | 449 | * A huge PTE describes an area the size of the |
450 | * configured huge page size. This is twice the | 450 | * configured huge page size. This is twice the |
451 | * of the large TLB entry size we intend to use. | 451 | * of the large TLB entry size we intend to use. |
452 | * A TLB entry half the size of the configured | 452 | * A TLB entry half the size of the configured |
453 | * huge page size is configured into entrylo0 | 453 | * huge page size is configured into entrylo0 |
454 | * and entrylo1 to cover the contiguous huge PTE | 454 | * and entrylo1 to cover the contiguous huge PTE |
455 | * address space. | 455 | * address space. |
456 | */ | 456 | */ |
457 | small_sequence = (HPAGE_SIZE >> 7) < 0x10000; | 457 | small_sequence = (HPAGE_SIZE >> 7) < 0x10000; |
458 | 458 | ||
459 | /* We can clobber tmp. It isn't used after this.*/ | 459 | /* We can clobber tmp. It isn't used after this.*/ |
460 | if (!small_sequence) | 460 | if (!small_sequence) |
461 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | 461 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); |
462 | 462 | ||
463 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ | 463 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ |
464 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ | 464 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ |
465 | /* convert to entrylo1 */ | 465 | /* convert to entrylo1 */ |
466 | if (small_sequence) | 466 | if (small_sequence) |
467 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); | 467 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); |
468 | else | 468 | else |
469 | UASM_i_ADDU(p, pte, pte, tmp); | 469 | UASM_i_ADDU(p, pte, pte, tmp); |
470 | 470 | ||
471 | uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ | 471 | uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ |
472 | } | 472 | } |
473 | 473 | ||
474 | static __cpuinit void build_huge_handler_tail(u32 **p, | 474 | static __cpuinit void build_huge_handler_tail(u32 **p, |
475 | struct uasm_reloc **r, | 475 | struct uasm_reloc **r, |
476 | struct uasm_label **l, | 476 | struct uasm_label **l, |
477 | unsigned int pte, | 477 | unsigned int pte, |
478 | unsigned int ptr) | 478 | unsigned int ptr) |
479 | { | 479 | { |
480 | #ifdef CONFIG_SMP | 480 | #ifdef CONFIG_SMP |
481 | UASM_i_SC(p, pte, 0, ptr); | 481 | UASM_i_SC(p, pte, 0, ptr); |
482 | uasm_il_beqz(p, r, pte, label_tlb_huge_update); | 482 | uasm_il_beqz(p, r, pte, label_tlb_huge_update); |
483 | UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ | 483 | UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ |
484 | #else | 484 | #else |
485 | UASM_i_SW(p, pte, 0, ptr); | 485 | UASM_i_SW(p, pte, 0, ptr); |
486 | #endif | 486 | #endif |
487 | build_huge_update_entries(p, pte, ptr); | 487 | build_huge_update_entries(p, pte, ptr); |
488 | build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); | 488 | build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); |
489 | } | 489 | } |
490 | #endif /* CONFIG_HUGETLB_PAGE */ | 490 | #endif /* CONFIG_HUGETLB_PAGE */ |
491 | 491 | ||
492 | #ifdef CONFIG_64BIT | 492 | #ifdef CONFIG_64BIT |
493 | /* | 493 | /* |
494 | * TMP and PTR are scratch. | 494 | * TMP and PTR are scratch. |
495 | * TMP will be clobbered, PTR will hold the pmd entry. | 495 | * TMP will be clobbered, PTR will hold the pmd entry. |
496 | */ | 496 | */ |
497 | static void __cpuinit | 497 | static void __cpuinit |
498 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 498 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
499 | unsigned int tmp, unsigned int ptr) | 499 | unsigned int tmp, unsigned int ptr) |
500 | { | 500 | { |
501 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 501 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
502 | long pgdc = (long)pgd_current; | 502 | long pgdc = (long)pgd_current; |
503 | #endif | 503 | #endif |
504 | /* | 504 | /* |
505 | * The vmalloc handling is not in the hotpath. | 505 | * The vmalloc handling is not in the hotpath. |
506 | */ | 506 | */ |
507 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 507 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
508 | uasm_il_bltz(p, r, tmp, label_vmalloc); | 508 | uasm_il_bltz(p, r, tmp, label_vmalloc); |
509 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ | 509 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
510 | 510 | ||
511 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 511 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
512 | /* | 512 | /* |
513 | * &pgd << 11 stored in CONTEXT [23..63]. | 513 | * &pgd << 11 stored in CONTEXT [23..63]. |
514 | */ | 514 | */ |
515 | UASM_i_MFC0(p, ptr, C0_CONTEXT); | 515 | UASM_i_MFC0(p, ptr, C0_CONTEXT); |
516 | uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */ | 516 | uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */ |
517 | uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */ | 517 | uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */ |
518 | uasm_i_drotr(p, ptr, ptr, 11); | 518 | uasm_i_drotr(p, ptr, ptr, 11); |
519 | #elif defined(CONFIG_SMP) | 519 | #elif defined(CONFIG_SMP) |
520 | # ifdef CONFIG_MIPS_MT_SMTC | 520 | # ifdef CONFIG_MIPS_MT_SMTC |
521 | /* | 521 | /* |
522 | * SMTC uses TCBind value as "CPU" index | 522 | * SMTC uses TCBind value as "CPU" index |
523 | */ | 523 | */ |
524 | uasm_i_mfc0(p, ptr, C0_TCBIND); | 524 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
525 | uasm_i_dsrl(p, ptr, ptr, 19); | 525 | uasm_i_dsrl(p, ptr, ptr, 19); |
526 | # else | 526 | # else |
527 | /* | 527 | /* |
528 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 | 528 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
529 | * stored in CONTEXT. | 529 | * stored in CONTEXT. |
530 | */ | 530 | */ |
531 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); | 531 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); |
532 | uasm_i_dsrl(p, ptr, ptr, 23); | 532 | uasm_i_dsrl(p, ptr, ptr, 23); |
533 | # endif | 533 | # endif |
534 | UASM_i_LA_mostly(p, tmp, pgdc); | 534 | UASM_i_LA_mostly(p, tmp, pgdc); |
535 | uasm_i_daddu(p, ptr, ptr, tmp); | 535 | uasm_i_daddu(p, ptr, ptr, tmp); |
536 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 536 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
537 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | 537 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); |
538 | #else | 538 | #else |
539 | UASM_i_LA_mostly(p, ptr, pgdc); | 539 | UASM_i_LA_mostly(p, ptr, pgdc); |
540 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | 540 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); |
541 | #endif | 541 | #endif |
542 | 542 | ||
543 | uasm_l_vmalloc_done(l, *p); | 543 | uasm_l_vmalloc_done(l, *p); |
544 | 544 | ||
545 | if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ | 545 | if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ |
546 | uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); | 546 | uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); |
547 | else | 547 | else |
548 | uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); | 548 | uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); |
549 | 549 | ||
550 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | 550 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); |
551 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | 551 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ |
552 | #ifndef __PAGETABLE_PMD_FOLDED | ||
552 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 553 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
553 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | 554 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ |
554 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ | 555 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
555 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); | 556 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
556 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | 557 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ |
558 | #endif | ||
557 | } | 559 | } |
558 | 560 | ||
559 | /* | 561 | /* |
560 | * BVADDR is the faulting address, PTR is scratch. | 562 | * BVADDR is the faulting address, PTR is scratch. |
561 | * PTR will hold the pgd for vmalloc. | 563 | * PTR will hold the pgd for vmalloc. |
562 | */ | 564 | */ |
563 | static void __cpuinit | 565 | static void __cpuinit |
564 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 566 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
565 | unsigned int bvaddr, unsigned int ptr) | 567 | unsigned int bvaddr, unsigned int ptr) |
566 | { | 568 | { |
567 | long swpd = (long)swapper_pg_dir; | 569 | long swpd = (long)swapper_pg_dir; |
568 | 570 | ||
569 | uasm_l_vmalloc(l, *p); | 571 | uasm_l_vmalloc(l, *p); |
570 | 572 | ||
571 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | 573 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { |
572 | uasm_il_b(p, r, label_vmalloc_done); | 574 | uasm_il_b(p, r, label_vmalloc_done); |
573 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 575 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); |
574 | } else { | 576 | } else { |
575 | UASM_i_LA_mostly(p, ptr, swpd); | 577 | UASM_i_LA_mostly(p, ptr, swpd); |
576 | uasm_il_b(p, r, label_vmalloc_done); | 578 | uasm_il_b(p, r, label_vmalloc_done); |
577 | if (uasm_in_compat_space_p(swpd)) | 579 | if (uasm_in_compat_space_p(swpd)) |
578 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 580 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); |
579 | else | 581 | else |
580 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 582 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); |
581 | } | 583 | } |
582 | } | 584 | } |
583 | 585 | ||
584 | #else /* !CONFIG_64BIT */ | 586 | #else /* !CONFIG_64BIT */ |
585 | 587 | ||
586 | /* | 588 | /* |
587 | * TMP and PTR are scratch. | 589 | * TMP and PTR are scratch. |
588 | * TMP will be clobbered, PTR will hold the pgd entry. | 590 | * TMP will be clobbered, PTR will hold the pgd entry. |
589 | */ | 591 | */ |
590 | static void __cpuinit __maybe_unused | 592 | static void __cpuinit __maybe_unused |
591 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | 593 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) |
592 | { | 594 | { |
593 | long pgdc = (long)pgd_current; | 595 | long pgdc = (long)pgd_current; |
594 | 596 | ||
595 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | 597 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ |
596 | #ifdef CONFIG_SMP | 598 | #ifdef CONFIG_SMP |
597 | #ifdef CONFIG_MIPS_MT_SMTC | 599 | #ifdef CONFIG_MIPS_MT_SMTC |
598 | /* | 600 | /* |
599 | * SMTC uses TCBind value as "CPU" index | 601 | * SMTC uses TCBind value as "CPU" index |
600 | */ | 602 | */ |
601 | uasm_i_mfc0(p, ptr, C0_TCBIND); | 603 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
602 | UASM_i_LA_mostly(p, tmp, pgdc); | 604 | UASM_i_LA_mostly(p, tmp, pgdc); |
603 | uasm_i_srl(p, ptr, ptr, 19); | 605 | uasm_i_srl(p, ptr, ptr, 19); |
604 | #else | 606 | #else |
605 | /* | 607 | /* |
606 | * smp_processor_id() << 3 is stored in CONTEXT. | 608 | * smp_processor_id() << 3 is stored in CONTEXT. |
607 | */ | 609 | */ |
608 | uasm_i_mfc0(p, ptr, C0_CONTEXT); | 610 | uasm_i_mfc0(p, ptr, C0_CONTEXT); |
609 | UASM_i_LA_mostly(p, tmp, pgdc); | 611 | UASM_i_LA_mostly(p, tmp, pgdc); |
610 | uasm_i_srl(p, ptr, ptr, 23); | 612 | uasm_i_srl(p, ptr, ptr, 23); |
611 | #endif | 613 | #endif |
612 | uasm_i_addu(p, ptr, tmp, ptr); | 614 | uasm_i_addu(p, ptr, tmp, ptr); |
613 | #else | 615 | #else |
614 | UASM_i_LA_mostly(p, ptr, pgdc); | 616 | UASM_i_LA_mostly(p, ptr, pgdc); |
615 | #endif | 617 | #endif |
616 | uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 618 | uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
617 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | 619 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); |
618 | uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ | 620 | uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ |
619 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); | 621 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); |
620 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ | 622 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ |
621 | } | 623 | } |
622 | 624 | ||
623 | #endif /* !CONFIG_64BIT */ | 625 | #endif /* !CONFIG_64BIT */ |
624 | 626 | ||
625 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) | 627 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) |
626 | { | 628 | { |
627 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; | 629 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; |
628 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); | 630 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); |
629 | 631 | ||
630 | switch (current_cpu_type()) { | 632 | switch (current_cpu_type()) { |
631 | case CPU_VR41XX: | 633 | case CPU_VR41XX: |
632 | case CPU_VR4111: | 634 | case CPU_VR4111: |
633 | case CPU_VR4121: | 635 | case CPU_VR4121: |
634 | case CPU_VR4122: | 636 | case CPU_VR4122: |
635 | case CPU_VR4131: | 637 | case CPU_VR4131: |
636 | case CPU_VR4181: | 638 | case CPU_VR4181: |
637 | case CPU_VR4181A: | 639 | case CPU_VR4181A: |
638 | case CPU_VR4133: | 640 | case CPU_VR4133: |
639 | shift += 2; | 641 | shift += 2; |
640 | break; | 642 | break; |
641 | 643 | ||
642 | default: | 644 | default: |
643 | break; | 645 | break; |
644 | } | 646 | } |
645 | 647 | ||
646 | if (shift) | 648 | if (shift) |
647 | UASM_i_SRL(p, ctx, ctx, shift); | 649 | UASM_i_SRL(p, ctx, ctx, shift); |
648 | uasm_i_andi(p, ctx, ctx, mask); | 650 | uasm_i_andi(p, ctx, ctx, mask); |
649 | } | 651 | } |
650 | 652 | ||
651 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) | 653 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) |
652 | { | 654 | { |
653 | /* | 655 | /* |
654 | * Bug workaround for the Nevada. It seems as if under certain | 656 | * Bug workaround for the Nevada. It seems as if under certain |
655 | * circumstances the move from cp0_context might produce a | 657 | * circumstances the move from cp0_context might produce a |
656 | * bogus result when the mfc0 instruction and its consumer are | 658 | * bogus result when the mfc0 instruction and its consumer are |
657 | * in a different cacheline or a load instruction, probably any | 659 | * in a different cacheline or a load instruction, probably any |
658 | * memory reference, is between them. | 660 | * memory reference, is between them. |
659 | */ | 661 | */ |
660 | switch (current_cpu_type()) { | 662 | switch (current_cpu_type()) { |
661 | case CPU_NEVADA: | 663 | case CPU_NEVADA: |
662 | UASM_i_LW(p, ptr, 0, ptr); | 664 | UASM_i_LW(p, ptr, 0, ptr); |
663 | GET_CONTEXT(p, tmp); /* get context reg */ | 665 | GET_CONTEXT(p, tmp); /* get context reg */ |
664 | break; | 666 | break; |
665 | 667 | ||
666 | default: | 668 | default: |
667 | GET_CONTEXT(p, tmp); /* get context reg */ | 669 | GET_CONTEXT(p, tmp); /* get context reg */ |
668 | UASM_i_LW(p, ptr, 0, ptr); | 670 | UASM_i_LW(p, ptr, 0, ptr); |
669 | break; | 671 | break; |
670 | } | 672 | } |
671 | 673 | ||
672 | build_adjust_context(p, tmp); | 674 | build_adjust_context(p, tmp); |
673 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ | 675 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ |
674 | } | 676 | } |
675 | 677 | ||
676 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | 678 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, |
677 | unsigned int ptep) | 679 | unsigned int ptep) |
678 | { | 680 | { |
679 | /* | 681 | /* |
680 | * 64bit address support (36bit on a 32bit CPU) in a 32bit | 682 | * 64bit address support (36bit on a 32bit CPU) in a 32bit |
681 | * Kernel is a special case. Only a few CPUs use it. | 683 | * Kernel is a special case. Only a few CPUs use it. |
682 | */ | 684 | */ |
683 | #ifdef CONFIG_64BIT_PHYS_ADDR | 685 | #ifdef CONFIG_64BIT_PHYS_ADDR |
684 | if (cpu_has_64bits) { | 686 | if (cpu_has_64bits) { |
685 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ | 687 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
686 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 688 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
687 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ | 689 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ |
688 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 690 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ |
689 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ | 691 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ |
690 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 692 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ |
691 | } else { | 693 | } else { |
692 | int pte_off_even = sizeof(pte_t) / 2; | 694 | int pte_off_even = sizeof(pte_t) / 2; |
693 | int pte_off_odd = pte_off_even + sizeof(pte_t); | 695 | int pte_off_odd = pte_off_even + sizeof(pte_t); |
694 | 696 | ||
695 | /* The pte entries are pre-shifted */ | 697 | /* The pte entries are pre-shifted */ |
696 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ | 698 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ |
697 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 699 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ |
698 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ | 700 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ |
699 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 701 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ |
700 | } | 702 | } |
701 | #else | 703 | #else |
702 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ | 704 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ |
703 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 705 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
704 | if (r45k_bvahwbug()) | 706 | if (r45k_bvahwbug()) |
705 | build_tlb_probe_entry(p); | 707 | build_tlb_probe_entry(p); |
706 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ | 708 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ |
707 | if (r4k_250MHZhwbug()) | 709 | if (r4k_250MHZhwbug()) |
708 | uasm_i_mtc0(p, 0, C0_ENTRYLO0); | 710 | uasm_i_mtc0(p, 0, C0_ENTRYLO0); |
709 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 711 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ |
710 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ | 712 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ |
711 | if (r45k_bvahwbug()) | 713 | if (r45k_bvahwbug()) |
712 | uasm_i_mfc0(p, tmp, C0_INDEX); | 714 | uasm_i_mfc0(p, tmp, C0_INDEX); |
713 | if (r4k_250MHZhwbug()) | 715 | if (r4k_250MHZhwbug()) |
714 | uasm_i_mtc0(p, 0, C0_ENTRYLO1); | 716 | uasm_i_mtc0(p, 0, C0_ENTRYLO1); |
715 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 717 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ |
716 | #endif | 718 | #endif |
717 | } | 719 | } |
718 | 720 | ||
719 | /* | 721 | /* |
720 | * For a 64-bit kernel, we are using the 64-bit XTLB refill exception | 722 | * For a 64-bit kernel, we are using the 64-bit XTLB refill exception |
721 | * because EXL == 0. If we wrap, we can also use the 32 instruction | 723 | * because EXL == 0. If we wrap, we can also use the 32 instruction |
722 | * slots before the XTLB refill exception handler which belong to the | 724 | * slots before the XTLB refill exception handler which belong to the |
723 | * unused TLB refill exception. | 725 | * unused TLB refill exception. |
724 | */ | 726 | */ |
725 | #define MIPS64_REFILL_INSNS 32 | 727 | #define MIPS64_REFILL_INSNS 32 |
726 | 728 | ||
727 | static void __cpuinit build_r4000_tlb_refill_handler(void) | 729 | static void __cpuinit build_r4000_tlb_refill_handler(void) |
728 | { | 730 | { |
729 | u32 *p = tlb_handler; | 731 | u32 *p = tlb_handler; |
730 | struct uasm_label *l = labels; | 732 | struct uasm_label *l = labels; |
731 | struct uasm_reloc *r = relocs; | 733 | struct uasm_reloc *r = relocs; |
732 | u32 *f; | 734 | u32 *f; |
733 | unsigned int final_len; | 735 | unsigned int final_len; |
734 | 736 | ||
735 | memset(tlb_handler, 0, sizeof(tlb_handler)); | 737 | memset(tlb_handler, 0, sizeof(tlb_handler)); |
736 | memset(labels, 0, sizeof(labels)); | 738 | memset(labels, 0, sizeof(labels)); |
737 | memset(relocs, 0, sizeof(relocs)); | 739 | memset(relocs, 0, sizeof(relocs)); |
738 | memset(final_handler, 0, sizeof(final_handler)); | 740 | memset(final_handler, 0, sizeof(final_handler)); |
739 | 741 | ||
740 | /* | 742 | /* |
741 | * create the plain linear handler | 743 | * create the plain linear handler |
742 | */ | 744 | */ |
743 | if (bcm1250_m3_war()) { | 745 | if (bcm1250_m3_war()) { |
744 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 746 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
745 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 747 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); |
746 | uasm_i_xor(&p, K0, K0, K1); | 748 | uasm_i_xor(&p, K0, K0, K1); |
747 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 749 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); |
748 | uasm_il_bnez(&p, &r, K0, label_leave); | 750 | uasm_il_bnez(&p, &r, K0, label_leave); |
749 | /* No need for uasm_i_nop */ | 751 | /* No need for uasm_i_nop */ |
750 | } | 752 | } |
751 | 753 | ||
752 | #ifdef CONFIG_64BIT | 754 | #ifdef CONFIG_64BIT |
753 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ | 755 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ |
754 | #else | 756 | #else |
755 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | 757 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ |
756 | #endif | 758 | #endif |
757 | 759 | ||
758 | #ifdef CONFIG_HUGETLB_PAGE | 760 | #ifdef CONFIG_HUGETLB_PAGE |
759 | build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); | 761 | build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); |
760 | #endif | 762 | #endif |
761 | 763 | ||
762 | build_get_ptep(&p, K0, K1); | 764 | build_get_ptep(&p, K0, K1); |
763 | build_update_entries(&p, K0, K1); | 765 | build_update_entries(&p, K0, K1); |
764 | build_tlb_write_entry(&p, &l, &r, tlb_random); | 766 | build_tlb_write_entry(&p, &l, &r, tlb_random); |
765 | uasm_l_leave(&l, p); | 767 | uasm_l_leave(&l, p); |
766 | uasm_i_eret(&p); /* return from trap */ | 768 | uasm_i_eret(&p); /* return from trap */ |
767 | 769 | ||
768 | #ifdef CONFIG_HUGETLB_PAGE | 770 | #ifdef CONFIG_HUGETLB_PAGE |
769 | uasm_l_tlb_huge_update(&l, p); | 771 | uasm_l_tlb_huge_update(&l, p); |
770 | UASM_i_LW(&p, K0, 0, K1); | 772 | UASM_i_LW(&p, K0, 0, K1); |
771 | build_huge_update_entries(&p, K0, K1); | 773 | build_huge_update_entries(&p, K0, K1); |
772 | build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); | 774 | build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); |
773 | #endif | 775 | #endif |
774 | 776 | ||
775 | #ifdef CONFIG_64BIT | 777 | #ifdef CONFIG_64BIT |
776 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); | 778 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); |
777 | #endif | 779 | #endif |
778 | 780 | ||
779 | /* | 781 | /* |
780 | * Overflow check: For the 64bit handler, we need at least one | 782 | * Overflow check: For the 64bit handler, we need at least one |
781 | * free instruction slot for the wrap-around branch. In worst | 783 | * free instruction slot for the wrap-around branch. In worst |
782 | * case, if the intended insertion point is a delay slot, we | 784 | * case, if the intended insertion point is a delay slot, we |
783 | * need three, with the second nop'ed and the third being | 785 | * need three, with the second nop'ed and the third being |
784 | * unused. | 786 | * unused. |
785 | */ | 787 | */ |
786 | /* Loongson2 ebase is different than r4k, we have more space */ | 788 | /* Loongson2 ebase is different than r4k, we have more space */ |
787 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) | 789 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) |
788 | if ((p - tlb_handler) > 64) | 790 | if ((p - tlb_handler) > 64) |
789 | panic("TLB refill handler space exceeded"); | 791 | panic("TLB refill handler space exceeded"); |
790 | #else | 792 | #else |
791 | if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) | 793 | if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) |
792 | || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) | 794 | || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) |
793 | && uasm_insn_has_bdelay(relocs, | 795 | && uasm_insn_has_bdelay(relocs, |
794 | tlb_handler + MIPS64_REFILL_INSNS - 3))) | 796 | tlb_handler + MIPS64_REFILL_INSNS - 3))) |
795 | panic("TLB refill handler space exceeded"); | 797 | panic("TLB refill handler space exceeded"); |
796 | #endif | 798 | #endif |
797 | 799 | ||
798 | /* | 800 | /* |
799 | * Now fold the handler in the TLB refill handler space. | 801 | * Now fold the handler in the TLB refill handler space. |
800 | */ | 802 | */ |
801 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) | 803 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) |
802 | f = final_handler; | 804 | f = final_handler; |
803 | /* Simplest case, just copy the handler. */ | 805 | /* Simplest case, just copy the handler. */ |
804 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); | 806 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
805 | final_len = p - tlb_handler; | 807 | final_len = p - tlb_handler; |
806 | #else /* CONFIG_64BIT */ | 808 | #else /* CONFIG_64BIT */ |
807 | f = final_handler + MIPS64_REFILL_INSNS; | 809 | f = final_handler + MIPS64_REFILL_INSNS; |
808 | if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { | 810 | if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { |
809 | /* Just copy the handler. */ | 811 | /* Just copy the handler. */ |
810 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); | 812 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
811 | final_len = p - tlb_handler; | 813 | final_len = p - tlb_handler; |
812 | } else { | 814 | } else { |
813 | #if defined(CONFIG_HUGETLB_PAGE) | 815 | #if defined(CONFIG_HUGETLB_PAGE) |
814 | const enum label_id ls = label_tlb_huge_update; | 816 | const enum label_id ls = label_tlb_huge_update; |
815 | #else | 817 | #else |
816 | const enum label_id ls = label_vmalloc; | 818 | const enum label_id ls = label_vmalloc; |
817 | #endif | 819 | #endif |
818 | u32 *split; | 820 | u32 *split; |
819 | int ov = 0; | 821 | int ov = 0; |
820 | int i; | 822 | int i; |
821 | 823 | ||
822 | for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) | 824 | for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) |
823 | ; | 825 | ; |
824 | BUG_ON(i == ARRAY_SIZE(labels)); | 826 | BUG_ON(i == ARRAY_SIZE(labels)); |
825 | split = labels[i].addr; | 827 | split = labels[i].addr; |
826 | 828 | ||
827 | /* | 829 | /* |
828 | * See if we have overflown one way or the other. | 830 | * See if we have overflown one way or the other. |
829 | */ | 831 | */ |
830 | if (split > tlb_handler + MIPS64_REFILL_INSNS || | 832 | if (split > tlb_handler + MIPS64_REFILL_INSNS || |
831 | split < p - MIPS64_REFILL_INSNS) | 833 | split < p - MIPS64_REFILL_INSNS) |
832 | ov = 1; | 834 | ov = 1; |
833 | 835 | ||
834 | if (ov) { | 836 | if (ov) { |
835 | /* | 837 | /* |
836 | * Split two instructions before the end. One | 838 | * Split two instructions before the end. One |
837 | * for the branch and one for the instruction | 839 | * for the branch and one for the instruction |
838 | * in the delay slot. | 840 | * in the delay slot. |
839 | */ | 841 | */ |
840 | split = tlb_handler + MIPS64_REFILL_INSNS - 2; | 842 | split = tlb_handler + MIPS64_REFILL_INSNS - 2; |
841 | 843 | ||
842 | /* | 844 | /* |
843 | * If the branch would fall in a delay slot, | 845 | * If the branch would fall in a delay slot, |
844 | * we must back up an additional instruction | 846 | * we must back up an additional instruction |
845 | * so that it is no longer in a delay slot. | 847 | * so that it is no longer in a delay slot. |
846 | */ | 848 | */ |
847 | if (uasm_insn_has_bdelay(relocs, split - 1)) | 849 | if (uasm_insn_has_bdelay(relocs, split - 1)) |
848 | split--; | 850 | split--; |
849 | } | 851 | } |
850 | /* Copy first part of the handler. */ | 852 | /* Copy first part of the handler. */ |
851 | uasm_copy_handler(relocs, labels, tlb_handler, split, f); | 853 | uasm_copy_handler(relocs, labels, tlb_handler, split, f); |
852 | f += split - tlb_handler; | 854 | f += split - tlb_handler; |
853 | 855 | ||
854 | if (ov) { | 856 | if (ov) { |
855 | /* Insert branch. */ | 857 | /* Insert branch. */ |
856 | uasm_l_split(&l, final_handler); | 858 | uasm_l_split(&l, final_handler); |
857 | uasm_il_b(&f, &r, label_split); | 859 | uasm_il_b(&f, &r, label_split); |
858 | if (uasm_insn_has_bdelay(relocs, split)) | 860 | if (uasm_insn_has_bdelay(relocs, split)) |
859 | uasm_i_nop(&f); | 861 | uasm_i_nop(&f); |
860 | else { | 862 | else { |
861 | uasm_copy_handler(relocs, labels, | 863 | uasm_copy_handler(relocs, labels, |
862 | split, split + 1, f); | 864 | split, split + 1, f); |
863 | uasm_move_labels(labels, f, f + 1, -1); | 865 | uasm_move_labels(labels, f, f + 1, -1); |
864 | f++; | 866 | f++; |
865 | split++; | 867 | split++; |
866 | } | 868 | } |
867 | } | 869 | } |
868 | 870 | ||
869 | /* Copy the rest of the handler. */ | 871 | /* Copy the rest of the handler. */ |
870 | uasm_copy_handler(relocs, labels, split, p, final_handler); | 872 | uasm_copy_handler(relocs, labels, split, p, final_handler); |
871 | final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + | 873 | final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + |
872 | (p - split); | 874 | (p - split); |
873 | } | 875 | } |
874 | #endif /* CONFIG_64BIT */ | 876 | #endif /* CONFIG_64BIT */ |
875 | 877 | ||
876 | uasm_resolve_relocs(relocs, labels); | 878 | uasm_resolve_relocs(relocs, labels); |
877 | pr_debug("Wrote TLB refill handler (%u instructions).\n", | 879 | pr_debug("Wrote TLB refill handler (%u instructions).\n", |
878 | final_len); | 880 | final_len); |
879 | 881 | ||
880 | memcpy((void *)ebase, final_handler, 0x100); | 882 | memcpy((void *)ebase, final_handler, 0x100); |
881 | 883 | ||
882 | dump_handler((u32 *)ebase, 64); | 884 | dump_handler((u32 *)ebase, 64); |
883 | } | 885 | } |
884 | 886 | ||
885 | /* | 887 | /* |
886 | * TLB load/store/modify handlers. | 888 | * TLB load/store/modify handlers. |
887 | * | 889 | * |
888 | * Only the fastpath gets synthesized at runtime, the slowpath for | 890 | * Only the fastpath gets synthesized at runtime, the slowpath for |
889 | * do_page_fault remains normal asm. | 891 | * do_page_fault remains normal asm. |
890 | */ | 892 | */ |
891 | extern void tlb_do_page_fault_0(void); | 893 | extern void tlb_do_page_fault_0(void); |
892 | extern void tlb_do_page_fault_1(void); | 894 | extern void tlb_do_page_fault_1(void); |
893 | 895 | ||
894 | /* | 896 | /* |
895 | * 128 instructions for the fastpath handler is generous and should | 897 | * 128 instructions for the fastpath handler is generous and should |
896 | * never be exceeded. | 898 | * never be exceeded. |
897 | */ | 899 | */ |
898 | #define FASTPATH_SIZE 128 | 900 | #define FASTPATH_SIZE 128 |
899 | 901 | ||
900 | u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; | 902 | u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; |
901 | u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; | 903 | u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; |
902 | u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; | 904 | u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; |
903 | 905 | ||
904 | static void __cpuinit | 906 | static void __cpuinit |
905 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) | 907 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
906 | { | 908 | { |
907 | #ifdef CONFIG_SMP | 909 | #ifdef CONFIG_SMP |
908 | # ifdef CONFIG_64BIT_PHYS_ADDR | 910 | # ifdef CONFIG_64BIT_PHYS_ADDR |
909 | if (cpu_has_64bits) | 911 | if (cpu_has_64bits) |
910 | uasm_i_lld(p, pte, 0, ptr); | 912 | uasm_i_lld(p, pte, 0, ptr); |
911 | else | 913 | else |
912 | # endif | 914 | # endif |
913 | UASM_i_LL(p, pte, 0, ptr); | 915 | UASM_i_LL(p, pte, 0, ptr); |
914 | #else | 916 | #else |
915 | # ifdef CONFIG_64BIT_PHYS_ADDR | 917 | # ifdef CONFIG_64BIT_PHYS_ADDR |
916 | if (cpu_has_64bits) | 918 | if (cpu_has_64bits) |
917 | uasm_i_ld(p, pte, 0, ptr); | 919 | uasm_i_ld(p, pte, 0, ptr); |
918 | else | 920 | else |
919 | # endif | 921 | # endif |
920 | UASM_i_LW(p, pte, 0, ptr); | 922 | UASM_i_LW(p, pte, 0, ptr); |
921 | #endif | 923 | #endif |
922 | } | 924 | } |
923 | 925 | ||
924 | static void __cpuinit | 926 | static void __cpuinit |
925 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, | 927 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, |
926 | unsigned int mode) | 928 | unsigned int mode) |
927 | { | 929 | { |
928 | #ifdef CONFIG_64BIT_PHYS_ADDR | 930 | #ifdef CONFIG_64BIT_PHYS_ADDR |
929 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | 931 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); |
930 | #endif | 932 | #endif |
931 | 933 | ||
932 | uasm_i_ori(p, pte, pte, mode); | 934 | uasm_i_ori(p, pte, pte, mode); |
933 | #ifdef CONFIG_SMP | 935 | #ifdef CONFIG_SMP |
934 | # ifdef CONFIG_64BIT_PHYS_ADDR | 936 | # ifdef CONFIG_64BIT_PHYS_ADDR |
935 | if (cpu_has_64bits) | 937 | if (cpu_has_64bits) |
936 | uasm_i_scd(p, pte, 0, ptr); | 938 | uasm_i_scd(p, pte, 0, ptr); |
937 | else | 939 | else |
938 | # endif | 940 | # endif |
939 | UASM_i_SC(p, pte, 0, ptr); | 941 | UASM_i_SC(p, pte, 0, ptr); |
940 | 942 | ||
941 | if (r10000_llsc_war()) | 943 | if (r10000_llsc_war()) |
942 | uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); | 944 | uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); |
943 | else | 945 | else |
944 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); | 946 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); |
945 | 947 | ||
946 | # ifdef CONFIG_64BIT_PHYS_ADDR | 948 | # ifdef CONFIG_64BIT_PHYS_ADDR |
947 | if (!cpu_has_64bits) { | 949 | if (!cpu_has_64bits) { |
948 | /* no uasm_i_nop needed */ | 950 | /* no uasm_i_nop needed */ |
949 | uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); | 951 | uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); |
950 | uasm_i_ori(p, pte, pte, hwmode); | 952 | uasm_i_ori(p, pte, pte, hwmode); |
951 | uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); | 953 | uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); |
952 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); | 954 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); |
953 | /* no uasm_i_nop needed */ | 955 | /* no uasm_i_nop needed */ |
954 | uasm_i_lw(p, pte, 0, ptr); | 956 | uasm_i_lw(p, pte, 0, ptr); |
955 | } else | 957 | } else |
956 | uasm_i_nop(p); | 958 | uasm_i_nop(p); |
957 | # else | 959 | # else |
958 | uasm_i_nop(p); | 960 | uasm_i_nop(p); |
959 | # endif | 961 | # endif |
960 | #else | 962 | #else |
961 | # ifdef CONFIG_64BIT_PHYS_ADDR | 963 | # ifdef CONFIG_64BIT_PHYS_ADDR |
962 | if (cpu_has_64bits) | 964 | if (cpu_has_64bits) |
963 | uasm_i_sd(p, pte, 0, ptr); | 965 | uasm_i_sd(p, pte, 0, ptr); |
964 | else | 966 | else |
965 | # endif | 967 | # endif |
966 | UASM_i_SW(p, pte, 0, ptr); | 968 | UASM_i_SW(p, pte, 0, ptr); |
967 | 969 | ||
968 | # ifdef CONFIG_64BIT_PHYS_ADDR | 970 | # ifdef CONFIG_64BIT_PHYS_ADDR |
969 | if (!cpu_has_64bits) { | 971 | if (!cpu_has_64bits) { |
970 | uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); | 972 | uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); |
971 | uasm_i_ori(p, pte, pte, hwmode); | 973 | uasm_i_ori(p, pte, pte, hwmode); |
972 | uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); | 974 | uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); |
973 | uasm_i_lw(p, pte, 0, ptr); | 975 | uasm_i_lw(p, pte, 0, ptr); |
974 | } | 976 | } |
975 | # endif | 977 | # endif |
976 | #endif | 978 | #endif |
977 | } | 979 | } |
978 | 980 | ||
979 | /* | 981 | /* |
980 | * Check if PTE is present, if not then jump to LABEL. PTR points to | 982 | * Check if PTE is present, if not then jump to LABEL. PTR points to |
981 | * the page table where this PTE is located, PTE will be re-loaded | 983 | * the page table where this PTE is located, PTE will be re-loaded |
982 | * with it's original value. | 984 | * with it's original value. |
983 | */ | 985 | */ |
984 | static void __cpuinit | 986 | static void __cpuinit |
985 | build_pte_present(u32 **p, struct uasm_reloc **r, | 987 | build_pte_present(u32 **p, struct uasm_reloc **r, |
986 | unsigned int pte, unsigned int ptr, enum label_id lid) | 988 | unsigned int pte, unsigned int ptr, enum label_id lid) |
987 | { | 989 | { |
988 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 990 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); |
989 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 991 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); |
990 | uasm_il_bnez(p, r, pte, lid); | 992 | uasm_il_bnez(p, r, pte, lid); |
991 | iPTE_LW(p, pte, ptr); | 993 | iPTE_LW(p, pte, ptr); |
992 | } | 994 | } |
993 | 995 | ||
994 | /* Make PTE valid, store result in PTR. */ | 996 | /* Make PTE valid, store result in PTR. */ |
995 | static void __cpuinit | 997 | static void __cpuinit |
996 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, | 998 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, |
997 | unsigned int ptr) | 999 | unsigned int ptr) |
998 | { | 1000 | { |
999 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; | 1001 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; |
1000 | 1002 | ||
1001 | iPTE_SW(p, r, pte, ptr, mode); | 1003 | iPTE_SW(p, r, pte, ptr, mode); |
1002 | } | 1004 | } |
1003 | 1005 | ||
1004 | /* | 1006 | /* |
1005 | * Check if PTE can be written to, if not branch to LABEL. Regardless | 1007 | * Check if PTE can be written to, if not branch to LABEL. Regardless |
1006 | * restore PTE with value from PTR when done. | 1008 | * restore PTE with value from PTR when done. |
1007 | */ | 1009 | */ |
1008 | static void __cpuinit | 1010 | static void __cpuinit |
1009 | build_pte_writable(u32 **p, struct uasm_reloc **r, | 1011 | build_pte_writable(u32 **p, struct uasm_reloc **r, |
1010 | unsigned int pte, unsigned int ptr, enum label_id lid) | 1012 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1011 | { | 1013 | { |
1012 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | 1014 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1013 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | 1015 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1014 | uasm_il_bnez(p, r, pte, lid); | 1016 | uasm_il_bnez(p, r, pte, lid); |
1015 | iPTE_LW(p, pte, ptr); | 1017 | iPTE_LW(p, pte, ptr); |
1016 | } | 1018 | } |
1017 | 1019 | ||
1018 | /* Make PTE writable, update software status bits as well, then store | 1020 | /* Make PTE writable, update software status bits as well, then store |
1019 | * at PTR. | 1021 | * at PTR. |
1020 | */ | 1022 | */ |
1021 | static void __cpuinit | 1023 | static void __cpuinit |
1022 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, | 1024 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1023 | unsigned int ptr) | 1025 | unsigned int ptr) |
1024 | { | 1026 | { |
1025 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | 1027 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID |
1026 | | _PAGE_DIRTY); | 1028 | | _PAGE_DIRTY); |
1027 | 1029 | ||
1028 | iPTE_SW(p, r, pte, ptr, mode); | 1030 | iPTE_SW(p, r, pte, ptr, mode); |
1029 | } | 1031 | } |
1030 | 1032 | ||
1031 | /* | 1033 | /* |
1032 | * Check if PTE can be modified, if not branch to LABEL. Regardless | 1034 | * Check if PTE can be modified, if not branch to LABEL. Regardless |
1033 | * restore PTE with value from PTR when done. | 1035 | * restore PTE with value from PTR when done. |
1034 | */ | 1036 | */ |
1035 | static void __cpuinit | 1037 | static void __cpuinit |
1036 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, | 1038 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, |
1037 | unsigned int pte, unsigned int ptr, enum label_id lid) | 1039 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1038 | { | 1040 | { |
1039 | uasm_i_andi(p, pte, pte, _PAGE_WRITE); | 1041 | uasm_i_andi(p, pte, pte, _PAGE_WRITE); |
1040 | uasm_il_beqz(p, r, pte, lid); | 1042 | uasm_il_beqz(p, r, pte, lid); |
1041 | iPTE_LW(p, pte, ptr); | 1043 | iPTE_LW(p, pte, ptr); |
1042 | } | 1044 | } |
1043 | 1045 | ||
1044 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 1046 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1045 | /* | 1047 | /* |
1046 | * R3000 style TLB load/store/modify handlers. | 1048 | * R3000 style TLB load/store/modify handlers. |
1047 | */ | 1049 | */ |
1048 | 1050 | ||
1049 | /* | 1051 | /* |
1050 | * This places the pte into ENTRYLO0 and writes it with tlbwi. | 1052 | * This places the pte into ENTRYLO0 and writes it with tlbwi. |
1051 | * Then it returns. | 1053 | * Then it returns. |
1052 | */ | 1054 | */ |
1053 | static void __cpuinit | 1055 | static void __cpuinit |
1054 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) | 1056 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1055 | { | 1057 | { |
1056 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | 1058 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1057 | uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ | 1059 | uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ |
1058 | uasm_i_tlbwi(p); | 1060 | uasm_i_tlbwi(p); |
1059 | uasm_i_jr(p, tmp); | 1061 | uasm_i_jr(p, tmp); |
1060 | uasm_i_rfe(p); /* branch delay */ | 1062 | uasm_i_rfe(p); /* branch delay */ |
1061 | } | 1063 | } |
1062 | 1064 | ||
1063 | /* | 1065 | /* |
1064 | * This places the pte into ENTRYLO0 and writes it with tlbwi | 1066 | * This places the pte into ENTRYLO0 and writes it with tlbwi |
1065 | * or tlbwr as appropriate. This is because the index register | 1067 | * or tlbwr as appropriate. This is because the index register |
1066 | * may have the probe fail bit set as a result of a trap on a | 1068 | * may have the probe fail bit set as a result of a trap on a |
1067 | * kseg2 access, i.e. without refill. Then it returns. | 1069 | * kseg2 access, i.e. without refill. Then it returns. |
1068 | */ | 1070 | */ |
1069 | static void __cpuinit | 1071 | static void __cpuinit |
1070 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, | 1072 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, |
1071 | struct uasm_reloc **r, unsigned int pte, | 1073 | struct uasm_reloc **r, unsigned int pte, |
1072 | unsigned int tmp) | 1074 | unsigned int tmp) |
1073 | { | 1075 | { |
1074 | uasm_i_mfc0(p, tmp, C0_INDEX); | 1076 | uasm_i_mfc0(p, tmp, C0_INDEX); |
1075 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | 1077 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1076 | uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ | 1078 | uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ |
1077 | uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ | 1079 | uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ |
1078 | uasm_i_tlbwi(p); /* cp0 delay */ | 1080 | uasm_i_tlbwi(p); /* cp0 delay */ |
1079 | uasm_i_jr(p, tmp); | 1081 | uasm_i_jr(p, tmp); |
1080 | uasm_i_rfe(p); /* branch delay */ | 1082 | uasm_i_rfe(p); /* branch delay */ |
1081 | uasm_l_r3000_write_probe_fail(l, *p); | 1083 | uasm_l_r3000_write_probe_fail(l, *p); |
1082 | uasm_i_tlbwr(p); /* cp0 delay */ | 1084 | uasm_i_tlbwr(p); /* cp0 delay */ |
1083 | uasm_i_jr(p, tmp); | 1085 | uasm_i_jr(p, tmp); |
1084 | uasm_i_rfe(p); /* branch delay */ | 1086 | uasm_i_rfe(p); /* branch delay */ |
1085 | } | 1087 | } |
1086 | 1088 | ||
1087 | static void __cpuinit | 1089 | static void __cpuinit |
1088 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, | 1090 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, |
1089 | unsigned int ptr) | 1091 | unsigned int ptr) |
1090 | { | 1092 | { |
1091 | long pgdc = (long)pgd_current; | 1093 | long pgdc = (long)pgd_current; |
1092 | 1094 | ||
1093 | uasm_i_mfc0(p, pte, C0_BADVADDR); | 1095 | uasm_i_mfc0(p, pte, C0_BADVADDR); |
1094 | uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ | 1096 | uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ |
1095 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | 1097 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); |
1096 | uasm_i_srl(p, pte, pte, 22); /* load delay */ | 1098 | uasm_i_srl(p, pte, pte, 22); /* load delay */ |
1097 | uasm_i_sll(p, pte, pte, 2); | 1099 | uasm_i_sll(p, pte, pte, 2); |
1098 | uasm_i_addu(p, ptr, ptr, pte); | 1100 | uasm_i_addu(p, ptr, ptr, pte); |
1099 | uasm_i_mfc0(p, pte, C0_CONTEXT); | 1101 | uasm_i_mfc0(p, pte, C0_CONTEXT); |
1100 | uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ | 1102 | uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ |
1101 | uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ | 1103 | uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ |
1102 | uasm_i_addu(p, ptr, ptr, pte); | 1104 | uasm_i_addu(p, ptr, ptr, pte); |
1103 | uasm_i_lw(p, pte, 0, ptr); | 1105 | uasm_i_lw(p, pte, 0, ptr); |
1104 | uasm_i_tlbp(p); /* load delay */ | 1106 | uasm_i_tlbp(p); /* load delay */ |
1105 | } | 1107 | } |
1106 | 1108 | ||
1107 | static void __cpuinit build_r3000_tlb_load_handler(void) | 1109 | static void __cpuinit build_r3000_tlb_load_handler(void) |
1108 | { | 1110 | { |
1109 | u32 *p = handle_tlbl; | 1111 | u32 *p = handle_tlbl; |
1110 | struct uasm_label *l = labels; | 1112 | struct uasm_label *l = labels; |
1111 | struct uasm_reloc *r = relocs; | 1113 | struct uasm_reloc *r = relocs; |
1112 | 1114 | ||
1113 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | 1115 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); |
1114 | memset(labels, 0, sizeof(labels)); | 1116 | memset(labels, 0, sizeof(labels)); |
1115 | memset(relocs, 0, sizeof(relocs)); | 1117 | memset(relocs, 0, sizeof(relocs)); |
1116 | 1118 | ||
1117 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1119 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1118 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1120 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1119 | uasm_i_nop(&p); /* load delay */ | 1121 | uasm_i_nop(&p); /* load delay */ |
1120 | build_make_valid(&p, &r, K0, K1); | 1122 | build_make_valid(&p, &r, K0, K1); |
1121 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); | 1123 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1122 | 1124 | ||
1123 | uasm_l_nopage_tlbl(&l, p); | 1125 | uasm_l_nopage_tlbl(&l, p); |
1124 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | 1126 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); |
1125 | uasm_i_nop(&p); | 1127 | uasm_i_nop(&p); |
1126 | 1128 | ||
1127 | if ((p - handle_tlbl) > FASTPATH_SIZE) | 1129 | if ((p - handle_tlbl) > FASTPATH_SIZE) |
1128 | panic("TLB load handler fastpath space exceeded"); | 1130 | panic("TLB load handler fastpath space exceeded"); |
1129 | 1131 | ||
1130 | uasm_resolve_relocs(relocs, labels); | 1132 | uasm_resolve_relocs(relocs, labels); |
1131 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | 1133 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", |
1132 | (unsigned int)(p - handle_tlbl)); | 1134 | (unsigned int)(p - handle_tlbl)); |
1133 | 1135 | ||
1134 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); | 1136 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1135 | } | 1137 | } |
1136 | 1138 | ||
1137 | static void __cpuinit build_r3000_tlb_store_handler(void) | 1139 | static void __cpuinit build_r3000_tlb_store_handler(void) |
1138 | { | 1140 | { |
1139 | u32 *p = handle_tlbs; | 1141 | u32 *p = handle_tlbs; |
1140 | struct uasm_label *l = labels; | 1142 | struct uasm_label *l = labels; |
1141 | struct uasm_reloc *r = relocs; | 1143 | struct uasm_reloc *r = relocs; |
1142 | 1144 | ||
1143 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | 1145 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); |
1144 | memset(labels, 0, sizeof(labels)); | 1146 | memset(labels, 0, sizeof(labels)); |
1145 | memset(relocs, 0, sizeof(relocs)); | 1147 | memset(relocs, 0, sizeof(relocs)); |
1146 | 1148 | ||
1147 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1149 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1148 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); | 1150 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
1149 | uasm_i_nop(&p); /* load delay */ | 1151 | uasm_i_nop(&p); /* load delay */ |
1150 | build_make_write(&p, &r, K0, K1); | 1152 | build_make_write(&p, &r, K0, K1); |
1151 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); | 1153 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1152 | 1154 | ||
1153 | uasm_l_nopage_tlbs(&l, p); | 1155 | uasm_l_nopage_tlbs(&l, p); |
1154 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 1156 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1155 | uasm_i_nop(&p); | 1157 | uasm_i_nop(&p); |
1156 | 1158 | ||
1157 | if ((p - handle_tlbs) > FASTPATH_SIZE) | 1159 | if ((p - handle_tlbs) > FASTPATH_SIZE) |
1158 | panic("TLB store handler fastpath space exceeded"); | 1160 | panic("TLB store handler fastpath space exceeded"); |
1159 | 1161 | ||
1160 | uasm_resolve_relocs(relocs, labels); | 1162 | uasm_resolve_relocs(relocs, labels); |
1161 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | 1163 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", |
1162 | (unsigned int)(p - handle_tlbs)); | 1164 | (unsigned int)(p - handle_tlbs)); |
1163 | 1165 | ||
1164 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); | 1166 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1165 | } | 1167 | } |
1166 | 1168 | ||
1167 | static void __cpuinit build_r3000_tlb_modify_handler(void) | 1169 | static void __cpuinit build_r3000_tlb_modify_handler(void) |
1168 | { | 1170 | { |
1169 | u32 *p = handle_tlbm; | 1171 | u32 *p = handle_tlbm; |
1170 | struct uasm_label *l = labels; | 1172 | struct uasm_label *l = labels; |
1171 | struct uasm_reloc *r = relocs; | 1173 | struct uasm_reloc *r = relocs; |
1172 | 1174 | ||
1173 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | 1175 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); |
1174 | memset(labels, 0, sizeof(labels)); | 1176 | memset(labels, 0, sizeof(labels)); |
1175 | memset(relocs, 0, sizeof(relocs)); | 1177 | memset(relocs, 0, sizeof(relocs)); |
1176 | 1178 | ||
1177 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1179 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1178 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); | 1180 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
1179 | uasm_i_nop(&p); /* load delay */ | 1181 | uasm_i_nop(&p); /* load delay */ |
1180 | build_make_write(&p, &r, K0, K1); | 1182 | build_make_write(&p, &r, K0, K1); |
1181 | build_r3000_pte_reload_tlbwi(&p, K0, K1); | 1183 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
1182 | 1184 | ||
1183 | uasm_l_nopage_tlbm(&l, p); | 1185 | uasm_l_nopage_tlbm(&l, p); |
1184 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 1186 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1185 | uasm_i_nop(&p); | 1187 | uasm_i_nop(&p); |
1186 | 1188 | ||
1187 | if ((p - handle_tlbm) > FASTPATH_SIZE) | 1189 | if ((p - handle_tlbm) > FASTPATH_SIZE) |
1188 | panic("TLB modify handler fastpath space exceeded"); | 1190 | panic("TLB modify handler fastpath space exceeded"); |
1189 | 1191 | ||
1190 | uasm_resolve_relocs(relocs, labels); | 1192 | uasm_resolve_relocs(relocs, labels); |
1191 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | 1193 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", |
1192 | (unsigned int)(p - handle_tlbm)); | 1194 | (unsigned int)(p - handle_tlbm)); |
1193 | 1195 | ||
1194 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); | 1196 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1195 | } | 1197 | } |
1196 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ | 1198 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ |
1197 | 1199 | ||
1198 | /* | 1200 | /* |
1199 | * R4000 style TLB load/store/modify handlers. | 1201 | * R4000 style TLB load/store/modify handlers. |
1200 | */ | 1202 | */ |
1201 | static void __cpuinit | 1203 | static void __cpuinit |
1202 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, | 1204 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, |
1203 | struct uasm_reloc **r, unsigned int pte, | 1205 | struct uasm_reloc **r, unsigned int pte, |
1204 | unsigned int ptr) | 1206 | unsigned int ptr) |
1205 | { | 1207 | { |
1206 | #ifdef CONFIG_64BIT | 1208 | #ifdef CONFIG_64BIT |
1207 | build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ | 1209 | build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ |
1208 | #else | 1210 | #else |
1209 | build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ | 1211 | build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ |
1210 | #endif | 1212 | #endif |
1211 | 1213 | ||
1212 | #ifdef CONFIG_HUGETLB_PAGE | 1214 | #ifdef CONFIG_HUGETLB_PAGE |
1213 | /* | 1215 | /* |
1214 | * For huge tlb entries, pmd doesn't contain an address but | 1216 | * For huge tlb entries, pmd doesn't contain an address but |
1215 | * instead contains the tlb pte. Check the PAGE_HUGE bit and | 1217 | * instead contains the tlb pte. Check the PAGE_HUGE bit and |
1216 | * see if we need to jump to huge tlb processing. | 1218 | * see if we need to jump to huge tlb processing. |
1217 | */ | 1219 | */ |
1218 | build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); | 1220 | build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); |
1219 | #endif | 1221 | #endif |
1220 | 1222 | ||
1221 | UASM_i_MFC0(p, pte, C0_BADVADDR); | 1223 | UASM_i_MFC0(p, pte, C0_BADVADDR); |
1222 | UASM_i_LW(p, ptr, 0, ptr); | 1224 | UASM_i_LW(p, ptr, 0, ptr); |
1223 | UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); | 1225 | UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); |
1224 | uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); | 1226 | uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); |
1225 | UASM_i_ADDU(p, ptr, ptr, pte); | 1227 | UASM_i_ADDU(p, ptr, ptr, pte); |
1226 | 1228 | ||
1227 | #ifdef CONFIG_SMP | 1229 | #ifdef CONFIG_SMP |
1228 | uasm_l_smp_pgtable_change(l, *p); | 1230 | uasm_l_smp_pgtable_change(l, *p); |
1229 | #endif | 1231 | #endif |
1230 | iPTE_LW(p, pte, ptr); /* get even pte */ | 1232 | iPTE_LW(p, pte, ptr); /* get even pte */ |
1231 | if (!m4kc_tlbp_war()) | 1233 | if (!m4kc_tlbp_war()) |
1232 | build_tlb_probe_entry(p); | 1234 | build_tlb_probe_entry(p); |
1233 | } | 1235 | } |
1234 | 1236 | ||
1235 | static void __cpuinit | 1237 | static void __cpuinit |
1236 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | 1238 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, |
1237 | struct uasm_reloc **r, unsigned int tmp, | 1239 | struct uasm_reloc **r, unsigned int tmp, |
1238 | unsigned int ptr) | 1240 | unsigned int ptr) |
1239 | { | 1241 | { |
1240 | uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); | 1242 | uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); |
1241 | uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); | 1243 | uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); |
1242 | build_update_entries(p, tmp, ptr); | 1244 | build_update_entries(p, tmp, ptr); |
1243 | build_tlb_write_entry(p, l, r, tlb_indexed); | 1245 | build_tlb_write_entry(p, l, r, tlb_indexed); |
1244 | uasm_l_leave(l, *p); | 1246 | uasm_l_leave(l, *p); |
1245 | uasm_i_eret(p); /* return from trap */ | 1247 | uasm_i_eret(p); /* return from trap */ |
1246 | 1248 | ||
1247 | #ifdef CONFIG_64BIT | 1249 | #ifdef CONFIG_64BIT |
1248 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); | 1250 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); |
1249 | #endif | 1251 | #endif |
1250 | } | 1252 | } |
1251 | 1253 | ||
1252 | static void __cpuinit build_r4000_tlb_load_handler(void) | 1254 | static void __cpuinit build_r4000_tlb_load_handler(void) |
1253 | { | 1255 | { |
1254 | u32 *p = handle_tlbl; | 1256 | u32 *p = handle_tlbl; |
1255 | struct uasm_label *l = labels; | 1257 | struct uasm_label *l = labels; |
1256 | struct uasm_reloc *r = relocs; | 1258 | struct uasm_reloc *r = relocs; |
1257 | 1259 | ||
1258 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | 1260 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); |
1259 | memset(labels, 0, sizeof(labels)); | 1261 | memset(labels, 0, sizeof(labels)); |
1260 | memset(relocs, 0, sizeof(relocs)); | 1262 | memset(relocs, 0, sizeof(relocs)); |
1261 | 1263 | ||
1262 | if (bcm1250_m3_war()) { | 1264 | if (bcm1250_m3_war()) { |
1263 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 1265 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
1264 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 1266 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); |
1265 | uasm_i_xor(&p, K0, K0, K1); | 1267 | uasm_i_xor(&p, K0, K0, K1); |
1266 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 1268 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); |
1267 | uasm_il_bnez(&p, &r, K0, label_leave); | 1269 | uasm_il_bnez(&p, &r, K0, label_leave); |
1268 | /* No need for uasm_i_nop */ | 1270 | /* No need for uasm_i_nop */ |
1269 | } | 1271 | } |
1270 | 1272 | ||
1271 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | 1273 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); |
1272 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1274 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1273 | if (m4kc_tlbp_war()) | 1275 | if (m4kc_tlbp_war()) |
1274 | build_tlb_probe_entry(&p); | 1276 | build_tlb_probe_entry(&p); |
1275 | build_make_valid(&p, &r, K0, K1); | 1277 | build_make_valid(&p, &r, K0, K1); |
1276 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1278 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1277 | 1279 | ||
1278 | #ifdef CONFIG_HUGETLB_PAGE | 1280 | #ifdef CONFIG_HUGETLB_PAGE |
1279 | /* | 1281 | /* |
1280 | * This is the entry point when build_r4000_tlbchange_handler_head | 1282 | * This is the entry point when build_r4000_tlbchange_handler_head |
1281 | * spots a huge page. | 1283 | * spots a huge page. |
1282 | */ | 1284 | */ |
1283 | uasm_l_tlb_huge_update(&l, p); | 1285 | uasm_l_tlb_huge_update(&l, p); |
1284 | iPTE_LW(&p, K0, K1); | 1286 | iPTE_LW(&p, K0, K1); |
1285 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1287 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1286 | build_tlb_probe_entry(&p); | 1288 | build_tlb_probe_entry(&p); |
1287 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); | 1289 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1288 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1290 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1289 | #endif | 1291 | #endif |
1290 | 1292 | ||
1291 | uasm_l_nopage_tlbl(&l, p); | 1293 | uasm_l_nopage_tlbl(&l, p); |
1292 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | 1294 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); |
1293 | uasm_i_nop(&p); | 1295 | uasm_i_nop(&p); |
1294 | 1296 | ||
1295 | if ((p - handle_tlbl) > FASTPATH_SIZE) | 1297 | if ((p - handle_tlbl) > FASTPATH_SIZE) |
1296 | panic("TLB load handler fastpath space exceeded"); | 1298 | panic("TLB load handler fastpath space exceeded"); |
1297 | 1299 | ||
1298 | uasm_resolve_relocs(relocs, labels); | 1300 | uasm_resolve_relocs(relocs, labels); |
1299 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | 1301 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", |
1300 | (unsigned int)(p - handle_tlbl)); | 1302 | (unsigned int)(p - handle_tlbl)); |
1301 | 1303 | ||
1302 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); | 1304 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1303 | } | 1305 | } |
1304 | 1306 | ||
1305 | static void __cpuinit build_r4000_tlb_store_handler(void) | 1307 | static void __cpuinit build_r4000_tlb_store_handler(void) |
1306 | { | 1308 | { |
1307 | u32 *p = handle_tlbs; | 1309 | u32 *p = handle_tlbs; |
1308 | struct uasm_label *l = labels; | 1310 | struct uasm_label *l = labels; |
1309 | struct uasm_reloc *r = relocs; | 1311 | struct uasm_reloc *r = relocs; |
1310 | 1312 | ||
1311 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | 1313 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); |
1312 | memset(labels, 0, sizeof(labels)); | 1314 | memset(labels, 0, sizeof(labels)); |
1313 | memset(relocs, 0, sizeof(relocs)); | 1315 | memset(relocs, 0, sizeof(relocs)); |
1314 | 1316 | ||
1315 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | 1317 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); |
1316 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); | 1318 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
1317 | if (m4kc_tlbp_war()) | 1319 | if (m4kc_tlbp_war()) |
1318 | build_tlb_probe_entry(&p); | 1320 | build_tlb_probe_entry(&p); |
1319 | build_make_write(&p, &r, K0, K1); | 1321 | build_make_write(&p, &r, K0, K1); |
1320 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1322 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1321 | 1323 | ||
1322 | #ifdef CONFIG_HUGETLB_PAGE | 1324 | #ifdef CONFIG_HUGETLB_PAGE |
1323 | /* | 1325 | /* |
1324 | * This is the entry point when | 1326 | * This is the entry point when |
1325 | * build_r4000_tlbchange_handler_head spots a huge page. | 1327 | * build_r4000_tlbchange_handler_head spots a huge page. |
1326 | */ | 1328 | */ |
1327 | uasm_l_tlb_huge_update(&l, p); | 1329 | uasm_l_tlb_huge_update(&l, p); |
1328 | iPTE_LW(&p, K0, K1); | 1330 | iPTE_LW(&p, K0, K1); |
1329 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); | 1331 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
1330 | build_tlb_probe_entry(&p); | 1332 | build_tlb_probe_entry(&p); |
1331 | uasm_i_ori(&p, K0, K0, | 1333 | uasm_i_ori(&p, K0, K0, |
1332 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | 1334 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); |
1333 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1335 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1334 | #endif | 1336 | #endif |
1335 | 1337 | ||
1336 | uasm_l_nopage_tlbs(&l, p); | 1338 | uasm_l_nopage_tlbs(&l, p); |
1337 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 1339 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1338 | uasm_i_nop(&p); | 1340 | uasm_i_nop(&p); |
1339 | 1341 | ||
1340 | if ((p - handle_tlbs) > FASTPATH_SIZE) | 1342 | if ((p - handle_tlbs) > FASTPATH_SIZE) |
1341 | panic("TLB store handler fastpath space exceeded"); | 1343 | panic("TLB store handler fastpath space exceeded"); |
1342 | 1344 | ||
1343 | uasm_resolve_relocs(relocs, labels); | 1345 | uasm_resolve_relocs(relocs, labels); |
1344 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | 1346 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", |
1345 | (unsigned int)(p - handle_tlbs)); | 1347 | (unsigned int)(p - handle_tlbs)); |
1346 | 1348 | ||
1347 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); | 1349 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1348 | } | 1350 | } |
1349 | 1351 | ||
1350 | static void __cpuinit build_r4000_tlb_modify_handler(void) | 1352 | static void __cpuinit build_r4000_tlb_modify_handler(void) |
1351 | { | 1353 | { |
1352 | u32 *p = handle_tlbm; | 1354 | u32 *p = handle_tlbm; |
1353 | struct uasm_label *l = labels; | 1355 | struct uasm_label *l = labels; |
1354 | struct uasm_reloc *r = relocs; | 1356 | struct uasm_reloc *r = relocs; |
1355 | 1357 | ||
1356 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | 1358 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); |
1357 | memset(labels, 0, sizeof(labels)); | 1359 | memset(labels, 0, sizeof(labels)); |
1358 | memset(relocs, 0, sizeof(relocs)); | 1360 | memset(relocs, 0, sizeof(relocs)); |
1359 | 1361 | ||
1360 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | 1362 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); |
1361 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); | 1363 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
1362 | if (m4kc_tlbp_war()) | 1364 | if (m4kc_tlbp_war()) |
1363 | build_tlb_probe_entry(&p); | 1365 | build_tlb_probe_entry(&p); |
1364 | /* Present and writable bits set, set accessed and dirty bits. */ | 1366 | /* Present and writable bits set, set accessed and dirty bits. */ |
1365 | build_make_write(&p, &r, K0, K1); | 1367 | build_make_write(&p, &r, K0, K1); |
1366 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1368 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1367 | 1369 | ||
1368 | #ifdef CONFIG_HUGETLB_PAGE | 1370 | #ifdef CONFIG_HUGETLB_PAGE |
1369 | /* | 1371 | /* |
1370 | * This is the entry point when | 1372 | * This is the entry point when |
1371 | * build_r4000_tlbchange_handler_head spots a huge page. | 1373 | * build_r4000_tlbchange_handler_head spots a huge page. |
1372 | */ | 1374 | */ |
1373 | uasm_l_tlb_huge_update(&l, p); | 1375 | uasm_l_tlb_huge_update(&l, p); |
1374 | iPTE_LW(&p, K0, K1); | 1376 | iPTE_LW(&p, K0, K1); |
1375 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); | 1377 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
1376 | build_tlb_probe_entry(&p); | 1378 | build_tlb_probe_entry(&p); |
1377 | uasm_i_ori(&p, K0, K0, | 1379 | uasm_i_ori(&p, K0, K0, |
1378 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | 1380 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); |
1379 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1381 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1380 | #endif | 1382 | #endif |
1381 | 1383 | ||
1382 | uasm_l_nopage_tlbm(&l, p); | 1384 | uasm_l_nopage_tlbm(&l, p); |
1383 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 1385 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
1384 | uasm_i_nop(&p); | 1386 | uasm_i_nop(&p); |
1385 | 1387 | ||
1386 | if ((p - handle_tlbm) > FASTPATH_SIZE) | 1388 | if ((p - handle_tlbm) > FASTPATH_SIZE) |
1387 | panic("TLB modify handler fastpath space exceeded"); | 1389 | panic("TLB modify handler fastpath space exceeded"); |
1388 | 1390 | ||
1389 | uasm_resolve_relocs(relocs, labels); | 1391 | uasm_resolve_relocs(relocs, labels); |
1390 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | 1392 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", |
1391 | (unsigned int)(p - handle_tlbm)); | 1393 | (unsigned int)(p - handle_tlbm)); |
1392 | 1394 | ||
1393 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); | 1395 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1394 | } | 1396 | } |
1395 | 1397 | ||
1396 | void __cpuinit build_tlb_refill_handler(void) | 1398 | void __cpuinit build_tlb_refill_handler(void) |
1397 | { | 1399 | { |
1398 | /* | 1400 | /* |
1399 | * The refill handler is generated per-CPU, multi-node systems | 1401 | * The refill handler is generated per-CPU, multi-node systems |
1400 | * may have local storage for it. The other handlers are only | 1402 | * may have local storage for it. The other handlers are only |
1401 | * needed once. | 1403 | * needed once. |
1402 | */ | 1404 | */ |
1403 | static int run_once = 0; | 1405 | static int run_once = 0; |
1404 | 1406 | ||
1405 | switch (current_cpu_type()) { | 1407 | switch (current_cpu_type()) { |
1406 | case CPU_R2000: | 1408 | case CPU_R2000: |
1407 | case CPU_R3000: | 1409 | case CPU_R3000: |
1408 | case CPU_R3000A: | 1410 | case CPU_R3000A: |
1409 | case CPU_R3081E: | 1411 | case CPU_R3081E: |
1410 | case CPU_TX3912: | 1412 | case CPU_TX3912: |
1411 | case CPU_TX3922: | 1413 | case CPU_TX3922: |
1412 | case CPU_TX3927: | 1414 | case CPU_TX3927: |
1413 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 1415 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1414 | build_r3000_tlb_refill_handler(); | 1416 | build_r3000_tlb_refill_handler(); |
1415 | if (!run_once) { | 1417 | if (!run_once) { |
1416 | build_r3000_tlb_load_handler(); | 1418 | build_r3000_tlb_load_handler(); |
1417 | build_r3000_tlb_store_handler(); | 1419 | build_r3000_tlb_store_handler(); |
1418 | build_r3000_tlb_modify_handler(); | 1420 | build_r3000_tlb_modify_handler(); |
1419 | run_once++; | 1421 | run_once++; |
1420 | } | 1422 | } |
1421 | #else | 1423 | #else |
1422 | panic("No R3000 TLB refill handler"); | 1424 | panic("No R3000 TLB refill handler"); |
1423 | #endif | 1425 | #endif |
1424 | break; | 1426 | break; |
1425 | 1427 | ||
1426 | case CPU_R6000: | 1428 | case CPU_R6000: |
1427 | case CPU_R6000A: | 1429 | case CPU_R6000A: |
1428 | panic("No R6000 TLB refill handler yet"); | 1430 | panic("No R6000 TLB refill handler yet"); |
1429 | break; | 1431 | break; |
1430 | 1432 | ||
1431 | case CPU_R8000: | 1433 | case CPU_R8000: |
1432 | panic("No R8000 TLB refill handler yet"); | 1434 | panic("No R8000 TLB refill handler yet"); |
1433 | break; | 1435 | break; |
1434 | 1436 | ||
1435 | default: | 1437 | default: |
1436 | build_r4000_tlb_refill_handler(); | 1438 | build_r4000_tlb_refill_handler(); |
1437 | if (!run_once) { | 1439 | if (!run_once) { |
1438 | build_r4000_tlb_load_handler(); | 1440 | build_r4000_tlb_load_handler(); |
1439 | build_r4000_tlb_store_handler(); | 1441 | build_r4000_tlb_store_handler(); |
1440 | build_r4000_tlb_modify_handler(); | 1442 | build_r4000_tlb_modify_handler(); |
1441 | run_once++; | 1443 | run_once++; |
1442 | } | 1444 | } |
1443 | } | 1445 | } |
1444 | } | 1446 | } |
1445 | 1447 | ||
1446 | void __cpuinit flush_tlb_handlers(void) | 1448 | void __cpuinit flush_tlb_handlers(void) |
1447 | { | 1449 | { |
1448 | local_flush_icache_range((unsigned long)handle_tlbl, | 1450 | local_flush_icache_range((unsigned long)handle_tlbl, |
1449 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); | 1451 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); |
1450 | local_flush_icache_range((unsigned long)handle_tlbs, | 1452 | local_flush_icache_range((unsigned long)handle_tlbs, |
1451 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); | 1453 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); |
1452 | local_flush_icache_range((unsigned long)handle_tlbm, | 1454 | local_flush_icache_range((unsigned long)handle_tlbm, |
1453 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); | 1455 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); |
1454 | } | 1456 | } |
1455 | 1457 |