Commit 5e9c8ac5699f2a830fab2c224b6f57bd7da338b8
1 parent
5286031693
Exists in
master
and in
7 other branches
sh: Fix up set_fixmap_nocache() for SH-5.
This needs a PAGE_KERNEL_NOCACHE definition, as provided by pgtable_32.h. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 1 changed file with 5 additions and 0 deletions Inline Diff
include/asm-sh/pgtable_64.h
1 | #ifndef __ASM_SH_PGTABLE_64_H | 1 | #ifndef __ASM_SH_PGTABLE_64_H |
2 | #define __ASM_SH_PGTABLE_64_H | 2 | #define __ASM_SH_PGTABLE_64_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * include/asm-sh/pgtable_64.h | 5 | * include/asm-sh/pgtable_64.h |
6 | * | 6 | * |
7 | * This file contains the functions and defines necessary to modify and use | 7 | * This file contains the functions and defines necessary to modify and use |
8 | * the SuperH page table tree. | 8 | * the SuperH page table tree. |
9 | * | 9 | * |
10 | * Copyright (C) 2000, 2001 Paolo Alberelli | 10 | * Copyright (C) 2000, 2001 Paolo Alberelli |
11 | * Copyright (C) 2003, 2004 Paul Mundt | 11 | * Copyright (C) 2003, 2004 Paul Mundt |
12 | * Copyright (C) 2003, 2004 Richard Curnow | 12 | * Copyright (C) 2003, 2004 Richard Curnow |
13 | * | 13 | * |
14 | * This file is subject to the terms and conditions of the GNU General Public | 14 | * This file is subject to the terms and conditions of the GNU General Public |
15 | * License. See the file "COPYING" in the main directory of this archive | 15 | * License. See the file "COPYING" in the main directory of this archive |
16 | * for more details. | 16 | * for more details. |
17 | */ | 17 | */ |
18 | #include <linux/threads.h> | 18 | #include <linux/threads.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Error outputs. | 23 | * Error outputs. |
24 | */ | 24 | */ |
25 | #define pte_ERROR(e) \ | 25 | #define pte_ERROR(e) \ |
26 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) | 26 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) |
27 | #define pgd_ERROR(e) \ | 27 | #define pgd_ERROR(e) \ |
28 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 28 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Table setting routines. Used within arch/mm only. | 31 | * Table setting routines. Used within arch/mm only. |
32 | */ | 32 | */ |
33 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | 33 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
34 | 34 | ||
35 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) | 35 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) |
36 | { | 36 | { |
37 | unsigned long long x = ((unsigned long long) pteval.pte_low); | 37 | unsigned long long x = ((unsigned long long) pteval.pte_low); |
38 | unsigned long long *xp = (unsigned long long *) pteptr; | 38 | unsigned long long *xp = (unsigned long long *) pteptr; |
39 | /* | 39 | /* |
40 | * Sign-extend based on NPHYS. | 40 | * Sign-extend based on NPHYS. |
41 | */ | 41 | */ |
42 | *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; | 42 | *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; |
43 | } | 43 | } |
44 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 44 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
45 | 45 | ||
46 | static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) | 46 | static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) |
47 | { | 47 | { |
48 | pmd_val(*pmdp) = (unsigned long) ptep; | 48 | pmd_val(*pmdp) = (unsigned long) ptep; |
49 | } | 49 | } |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * PGD defines. Top level. | 52 | * PGD defines. Top level. |
53 | */ | 53 | */ |
54 | 54 | ||
55 | /* To find an entry in a generic PGD. */ | 55 | /* To find an entry in a generic PGD. */ |
56 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 56 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
57 | #define __pgd_offset(address) pgd_index(address) | 57 | #define __pgd_offset(address) pgd_index(address) |
58 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 58 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) |
59 | 59 | ||
60 | /* To find an entry in a kernel PGD. */ | 60 | /* To find an entry in a kernel PGD. */ |
61 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 61 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * PMD level access routines. Same notes as above. | 64 | * PMD level access routines. Same notes as above. |
65 | */ | 65 | */ |
66 | #define _PMD_EMPTY 0x0 | 66 | #define _PMD_EMPTY 0x0 |
67 | /* Either the PMD is empty or present, it's not paged out */ | 67 | /* Either the PMD is empty or present, it's not paged out */ |
68 | #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) | 68 | #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) |
69 | #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) | 69 | #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) |
70 | #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) | 70 | #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) |
71 | #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 71 | #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
72 | 72 | ||
73 | #define pmd_page_vaddr(pmd_entry) \ | 73 | #define pmd_page_vaddr(pmd_entry) \ |
74 | ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) | 74 | ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) |
75 | 75 | ||
76 | #define pmd_page(pmd) \ | 76 | #define pmd_page(pmd) \ |
77 | (virt_to_page(pmd_val(pmd))) | 77 | (virt_to_page(pmd_val(pmd))) |
78 | 78 | ||
79 | /* PMD to PTE dereferencing */ | 79 | /* PMD to PTE dereferencing */ |
80 | #define pte_index(address) \ | 80 | #define pte_index(address) \ |
81 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 81 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
82 | 82 | ||
83 | #define pte_offset_kernel(dir, addr) \ | 83 | #define pte_offset_kernel(dir, addr) \ |
84 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) | 84 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) |
85 | 85 | ||
86 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) | 86 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) |
87 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) | 87 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) |
88 | #define pte_unmap(pte) do { } while (0) | 88 | #define pte_unmap(pte) do { } while (0) |
89 | #define pte_unmap_nested(pte) do { } while (0) | 89 | #define pte_unmap_nested(pte) do { } while (0) |
90 | 90 | ||
91 | #ifndef __ASSEMBLY__ | 91 | #ifndef __ASSEMBLY__ |
92 | #define IOBASE_VADDR 0xff000000 | 92 | #define IOBASE_VADDR 0xff000000 |
93 | #define IOBASE_END 0xffffffff | 93 | #define IOBASE_END 0xffffffff |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * PTEL coherent flags. | 96 | * PTEL coherent flags. |
97 | * See Chapter 17 ST50 CPU Core Volume 1, Architecture. | 97 | * See Chapter 17 ST50 CPU Core Volume 1, Architecture. |
98 | */ | 98 | */ |
99 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined | 99 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined |
100 | positions, to avoid expensive bit shuffling on every refill. The remaining | 100 | positions, to avoid expensive bit shuffling on every refill. The remaining |
101 | bits are used for s/w purposes and masked out on each refill. | 101 | bits are used for s/w purposes and masked out on each refill. |
102 | 102 | ||
103 | Note, the PTE slots are used to hold data of type swp_entry_t when a page is | 103 | Note, the PTE slots are used to hold data of type swp_entry_t when a page is |
104 | swapped out. Only the _PAGE_PRESENT flag is significant when the page is | 104 | swapped out. Only the _PAGE_PRESENT flag is significant when the page is |
105 | swapped out, and it must be placed so that it doesn't overlap either the | 105 | swapped out, and it must be placed so that it doesn't overlap either the |
106 | type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type | 106 | type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type |
107 | at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This | 107 | at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This |
108 | scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit | 108 | scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit |
109 | [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split | 109 | [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split |
110 | into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ | 110 | into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ |
111 | #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ | 111 | #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ |
112 | #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ | 112 | #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ |
113 | #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ | 113 | #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ |
114 | #define _PAGE_PRESENT 0x004 /* software: page referenced */ | 114 | #define _PAGE_PRESENT 0x004 /* software: page referenced */ |
115 | #define _PAGE_FILE 0x004 /* software: only when !present */ | 115 | #define _PAGE_FILE 0x004 /* software: only when !present */ |
116 | #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ | 116 | #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ |
117 | #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ | 117 | #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ |
118 | #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ | 118 | #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ |
119 | #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */ | 119 | #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */ |
120 | #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */ | 120 | #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */ |
121 | #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */ | 121 | #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */ |
122 | #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */ | 122 | #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */ |
123 | #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ | 123 | #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ |
124 | #define _PAGE_ACCESSED 0x800 /* software: page referenced */ | 124 | #define _PAGE_ACCESSED 0x800 /* software: page referenced */ |
125 | 125 | ||
126 | /* Mask which drops software flags */ | 126 | /* Mask which drops software flags */ |
127 | #define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL | 127 | #define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * HugeTLB support | 130 | * HugeTLB support |
131 | */ | 131 | */ |
132 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 132 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
133 | #define _PAGE_SZHUGE (_PAGE_SIZE0) | 133 | #define _PAGE_SZHUGE (_PAGE_SIZE0) |
134 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 134 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
135 | #define _PAGE_SZHUGE (_PAGE_SIZE1) | 135 | #define _PAGE_SZHUGE (_PAGE_SIZE1) |
136 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) | 136 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) |
137 | #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) | 137 | #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) |
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Stub out _PAGE_SZHUGE if we don't have a good definition for it, | 141 | * Stub out _PAGE_SZHUGE if we don't have a good definition for it, |
142 | * to make pte_mkhuge() happy. | 142 | * to make pte_mkhuge() happy. |
143 | */ | 143 | */ |
144 | #ifndef _PAGE_SZHUGE | 144 | #ifndef _PAGE_SZHUGE |
145 | # define _PAGE_SZHUGE (0) | 145 | # define _PAGE_SZHUGE (0) |
146 | #endif | 146 | #endif |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Default flags for a Kernel page. | 149 | * Default flags for a Kernel page. |
150 | * This is fundametally also SHARED because the main use of this define | 150 | * This is fundametally also SHARED because the main use of this define |
151 | * (other than for PGD/PMD entries) is for the VMALLOC pool which is | 151 | * (other than for PGD/PMD entries) is for the VMALLOC pool which is |
152 | * contextless. | 152 | * contextless. |
153 | * | 153 | * |
154 | * _PAGE_EXECUTE is required for modules | 154 | * _PAGE_EXECUTE is required for modules |
155 | * | 155 | * |
156 | */ | 156 | */ |
157 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | 157 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
158 | _PAGE_EXECUTE | \ | 158 | _PAGE_EXECUTE | \ |
159 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ | 159 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
160 | _PAGE_SHARED) | 160 | _PAGE_SHARED) |
161 | 161 | ||
162 | /* Default flags for a User page */ | 162 | /* Default flags for a User page */ |
163 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) | 163 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) |
164 | 164 | ||
165 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 165 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * We have full permissions (Read/Write/Execute/Shared). | 168 | * We have full permissions (Read/Write/Execute/Shared). |
169 | */ | 169 | */ |
170 | #define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \ | 170 | #define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \ |
171 | _PAGE_CACHABLE | _PAGE_ACCESSED) | 171 | _PAGE_CACHABLE | _PAGE_ACCESSED) |
172 | 172 | ||
173 | #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) | 173 | #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) |
174 | #define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \ | 174 | #define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \ |
175 | _PAGE_SHARED) | 175 | _PAGE_SHARED) |
176 | #define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE) | 176 | #define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE) |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default | 179 | * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default |
180 | * protection mode for the stack. | 180 | * protection mode for the stack. |
181 | */ | 181 | */ |
182 | #define PAGE_COPY PAGE_EXECREAD | 182 | #define PAGE_COPY PAGE_EXECREAD |
183 | 183 | ||
184 | #define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ) | 184 | #define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ) |
185 | #define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE) | 185 | #define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE) |
186 | #define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \ | 186 | #define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \ |
187 | _PAGE_WRITE | _PAGE_EXECUTE) | 187 | _PAGE_WRITE | _PAGE_EXECUTE) |
188 | #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) | 188 | #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) |
189 | 189 | ||
190 | #define PAGE_KERNEL_NOCACHE \ | ||
191 | __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | ||
192 | _PAGE_EXECUTE | _PAGE_ACCESSED | \ | ||
193 | _PAGE_DIRTY | _PAGE_SHARED) | ||
194 | |||
190 | /* Make it a device mapping for maximum safety (e.g. for mapping device | 195 | /* Make it a device mapping for maximum safety (e.g. for mapping device |
191 | registers into user-space via /dev/map). */ | 196 | registers into user-space via /dev/map). */ |
192 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) | 197 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) |
193 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) | 198 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) |
194 | 199 | ||
195 | /* | 200 | /* |
196 | * Handling allocation failures during page table setup. | 201 | * Handling allocation failures during page table setup. |
197 | */ | 202 | */ |
198 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); | 203 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); |
199 | #define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) | 204 | #define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) |
200 | 205 | ||
201 | /* | 206 | /* |
202 | * PTE level access routines. | 207 | * PTE level access routines. |
203 | * | 208 | * |
204 | * Note1: | 209 | * Note1: |
205 | * It's the tree walk leaf. This is physical address to be stored. | 210 | * It's the tree walk leaf. This is physical address to be stored. |
206 | * | 211 | * |
207 | * Note 2: | 212 | * Note 2: |
208 | * Regarding the choice of _PTE_EMPTY: | 213 | * Regarding the choice of _PTE_EMPTY: |
209 | 214 | ||
210 | We must choose a bit pattern that cannot be valid, whether or not the page | 215 | We must choose a bit pattern that cannot be valid, whether or not the page |
211 | is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped | 216 | is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped |
212 | out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is | 217 | out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is |
213 | left for us to select. If we force bit[7]==0 when swapped out, we could use | 218 | left for us to select. If we force bit[7]==0 when swapped out, we could use |
214 | the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if | 219 | the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if |
215 | we force bit[7]==1 when swapped out, we can use all zeroes to indicate | 220 | we force bit[7]==1 when swapped out, we can use all zeroes to indicate |
216 | empty. This is convenient, because the page tables get cleared to zero | 221 | empty. This is convenient, because the page tables get cleared to zero |
217 | when they are allocated. | 222 | when they are allocated. |
218 | 223 | ||
219 | */ | 224 | */ |
220 | #define _PTE_EMPTY 0x0 | 225 | #define _PTE_EMPTY 0x0 |
221 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | 226 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
222 | #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) | 227 | #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) |
223 | #define pte_none(x) (pte_val(x) == _PTE_EMPTY) | 228 | #define pte_none(x) (pte_val(x) == _PTE_EMPTY) |
224 | 229 | ||
225 | /* | 230 | /* |
226 | * Some definitions to translate between mem_map, PTEs, and page | 231 | * Some definitions to translate between mem_map, PTEs, and page |
227 | * addresses: | 232 | * addresses: |
228 | */ | 233 | */ |
229 | 234 | ||
230 | /* | 235 | /* |
231 | * Given a PTE, return the index of the mem_map[] entry corresponding | 236 | * Given a PTE, return the index of the mem_map[] entry corresponding |
232 | * to the page frame the PTE. Get the absolute physical address, make | 237 | * to the page frame the PTE. Get the absolute physical address, make |
233 | * a relative physical address and translate it to an index. | 238 | * a relative physical address and translate it to an index. |
234 | */ | 239 | */ |
235 | #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ | 240 | #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ |
236 | __MEMORY_START) >> PAGE_SHIFT) | 241 | __MEMORY_START) >> PAGE_SHIFT) |
237 | 242 | ||
238 | /* | 243 | /* |
239 | * Given a PTE, return the "struct page *". | 244 | * Given a PTE, return the "struct page *". |
240 | */ | 245 | */ |
241 | #define pte_page(x) (mem_map + pte_pagenr(x)) | 246 | #define pte_page(x) (mem_map + pte_pagenr(x)) |
242 | 247 | ||
243 | /* | 248 | /* |
244 | * Return number of (down rounded) MB corresponding to x pages. | 249 | * Return number of (down rounded) MB corresponding to x pages. |
245 | */ | 250 | */ |
246 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 251 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
247 | 252 | ||
248 | 253 | ||
249 | /* | 254 | /* |
250 | * The following have defined behavior only work if pte_present() is true. | 255 | * The following have defined behavior only work if pte_present() is true. |
251 | */ | 256 | */ |
252 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } | 257 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } |
253 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } | 258 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } |
254 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 259 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
255 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } | 260 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } |
256 | 261 | ||
257 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } | 262 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } |
258 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | 263 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } |
259 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | 264 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } |
260 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } | 265 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } |
261 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | 266 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } |
262 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | 267 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } |
263 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } | 268 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } |
264 | 269 | ||
265 | 270 | ||
266 | /* | 271 | /* |
267 | * Conversion functions: convert a page and protection to a page entry. | 272 | * Conversion functions: convert a page and protection to a page entry. |
268 | * | 273 | * |
269 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) | 274 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) |
270 | */ | 275 | */ |
271 | #define mk_pte(page,pgprot) \ | 276 | #define mk_pte(page,pgprot) \ |
272 | ({ \ | 277 | ({ \ |
273 | pte_t __pte; \ | 278 | pte_t __pte; \ |
274 | \ | 279 | \ |
275 | set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ | 280 | set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ |
276 | __MEMORY_START | pgprot_val((pgprot)))); \ | 281 | __MEMORY_START | pgprot_val((pgprot)))); \ |
277 | __pte; \ | 282 | __pte; \ |
278 | }) | 283 | }) |
279 | 284 | ||
280 | /* | 285 | /* |
281 | * This takes a (absolute) physical page address that is used | 286 | * This takes a (absolute) physical page address that is used |
282 | * by the remapping functions | 287 | * by the remapping functions |
283 | */ | 288 | */ |
284 | #define mk_pte_phys(physpage, pgprot) \ | 289 | #define mk_pte_phys(physpage, pgprot) \ |
285 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) | 290 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) |
286 | 291 | ||
287 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 292 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
288 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } | 293 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } |
289 | 294 | ||
290 | /* Encode and decode a swap entry */ | 295 | /* Encode and decode a swap entry */ |
291 | #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) | 296 | #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) |
292 | #define __swp_offset(x) ((x).val >> 8) | 297 | #define __swp_offset(x) ((x).val >> 8) |
293 | #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) | 298 | #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) |
294 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 299 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
295 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 300 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
296 | 301 | ||
297 | /* Encode and decode a nonlinear file mapping entry */ | 302 | /* Encode and decode a nonlinear file mapping entry */ |
298 | #define PTE_FILE_MAX_BITS 29 | 303 | #define PTE_FILE_MAX_BITS 29 |
299 | #define pte_to_pgoff(pte) (pte_val(pte)) | 304 | #define pte_to_pgoff(pte) (pte_val(pte)) |
300 | #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) | 305 | #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) |
301 | 306 | ||
302 | #endif /* !__ASSEMBLY__ */ | 307 | #endif /* !__ASSEMBLY__ */ |
303 | 308 | ||
304 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 309 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
305 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 310 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
306 | 311 | ||
307 | #endif /* __ASM_SH_PGTABLE_64_H */ | 312 | #endif /* __ASM_SH_PGTABLE_64_H */ |
308 | 313 |