Commit cdcc970829e81da3445346cb71b2ea264c9952b9
1 parent
9085fa1255
Exists in
master
and in
7 other branches
sh: Move in the SH-5 mmu_context headers.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 5 changed files with 173 additions and 271 deletions Side-by-side Diff
include/asm-sh/cpu-sh5/mmu_context.h
1 | +#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H | |
2 | +#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H | |
3 | + | |
4 | +/* Common defines */ | |
5 | +#define TLB_STEP 0x00000010 | |
6 | +#define TLB_PTEH 0x00000000 | |
7 | +#define TLB_PTEL 0x00000008 | |
8 | + | |
9 | +/* PTEH defines */ | |
10 | +#define PTEH_ASID_SHIFT 2 | |
11 | +#define PTEH_VALID 0x0000000000000001 | |
12 | +#define PTEH_SHARED 0x0000000000000002 | |
13 | +#define PTEH_MATCH_ASID 0x00000000000003ff | |
14 | + | |
15 | +#ifndef __ASSEMBLY__ | |
16 | +/* This has to be a common function because the next location to fill | |
17 | + * information is shared. */ | |
18 | +extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); | |
19 | + | |
20 | +/* Profiling counter. */ | |
21 | +#ifdef CONFIG_SH64_PROC_TLB | |
22 | +extern unsigned long long calls_to_do_fast_page_fault; | |
23 | +#endif | |
24 | + | |
25 | +#endif /* __ASSEMBLY__ */ | |
26 | + | |
27 | +#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */ |
include/asm-sh/mmu_context.h
1 | 1 | /* |
2 | 2 | * Copyright (C) 1999 Niibe Yutaka |
3 | - * Copyright (C) 2003 - 2006 Paul Mundt | |
3 | + * Copyright (C) 2003 - 2007 Paul Mundt | |
4 | 4 | * |
5 | 5 | * ASID handling idea taken from MIPS implementation. |
6 | 6 | */ |
7 | 7 | #ifndef __ASM_SH_MMU_CONTEXT_H |
8 | 8 | #define __ASM_SH_MMU_CONTEXT_H |
9 | -#ifdef __KERNEL__ | |
10 | 9 | |
10 | +#ifdef __KERNEL__ | |
11 | 11 | #include <asm/cpu/mmu_context.h> |
12 | 12 | #include <asm/tlbflush.h> |
13 | 13 | #include <asm/uaccess.h> |
... | ... | @@ -19,7 +19,6 @@ |
19 | 19 | * (a) TLB cache version (or round, cycle whatever expression you like) |
20 | 20 | * (b) ASID (Address Space IDentifier) |
21 | 21 | */ |
22 | - | |
23 | 22 | #define MMU_CONTEXT_ASID_MASK 0x000000ff |
24 | 23 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 |
25 | 24 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 |
26 | 25 | |
27 | 26 | |
28 | 27 | |
... | ... | @@ -28,17 +27,24 @@ |
28 | 27 | /* ASID is 8-bit value, so it can't be 0x100 */ |
29 | 28 | #define MMU_NO_ASID 0x100 |
30 | 29 | |
31 | -#define cpu_context(cpu, mm) ((mm)->context.id[cpu]) | |
32 | -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \ | |
33 | - MMU_CONTEXT_ASID_MASK) | |
34 | 30 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) |
31 | +#define cpu_context(cpu, mm) ((mm)->context.id[cpu]) | |
35 | 32 | |
33 | +#define cpu_asid(cpu, mm) \ | |
34 | + (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) | |
35 | + | |
36 | 36 | /* |
37 | 37 | * Virtual Page Number mask |
38 | 38 | */ |
39 | 39 | #define MMU_VPN_MASK 0xfffff000 |
40 | 40 | |
41 | 41 | #ifdef CONFIG_MMU |
42 | +#if defined(CONFIG_SUPERH32) | |
43 | +#include "mmu_context_32.h" | |
44 | +#else | |
45 | +#include "mmu_context_64.h" | |
46 | +#endif | |
47 | + | |
42 | 48 | /* |
43 | 49 | * Get MMU context if needed. |
44 | 50 | */ |
45 | 51 | |
... | ... | @@ -59,7 +65,15 @@ |
59 | 65 | */ |
60 | 66 | flush_tlb_all(); |
61 | 67 | |
68 | +#ifdef CONFIG_SUPERH64 | |
62 | 69 | /* |
70 | + * The SH-5 cache uses the ASIDs, requiring both the I and D | |
71 | + * cache to be flushed when the ASID is exhausted. Weak. | |
72 | + */ | |
73 | + flush_cache_all(); | |
74 | +#endif | |
75 | + | |
76 | + /* | |
63 | 77 | * Fix version; Note that we avoid version #0 |
64 | 78 | * to distingush NO_CONTEXT. |
65 | 79 | */ |
... | ... | @@ -86,39 +100,6 @@ |
86 | 100 | } |
87 | 101 | |
88 | 102 | /* |
89 | - * Destroy context related info for an mm_struct that is about | |
90 | - * to be put to rest. | |
91 | - */ | |
92 | -static inline void destroy_context(struct mm_struct *mm) | |
93 | -{ | |
94 | - /* Do nothing */ | |
95 | -} | |
96 | - | |
97 | -static inline void set_asid(unsigned long asid) | |
98 | -{ | |
99 | - unsigned long __dummy; | |
100 | - | |
101 | - __asm__ __volatile__ ("mov.l %2, %0\n\t" | |
102 | - "and %3, %0\n\t" | |
103 | - "or %1, %0\n\t" | |
104 | - "mov.l %0, %2" | |
105 | - : "=&r" (__dummy) | |
106 | - : "r" (asid), "m" (__m(MMU_PTEH)), | |
107 | - "r" (0xffffff00)); | |
108 | -} | |
109 | - | |
110 | -static inline unsigned long get_asid(void) | |
111 | -{ | |
112 | - unsigned long asid; | |
113 | - | |
114 | - __asm__ __volatile__ ("mov.l %1, %0" | |
115 | - : "=r" (asid) | |
116 | - : "m" (__m(MMU_PTEH))); | |
117 | - asid &= MMU_CONTEXT_ASID_MASK; | |
118 | - return asid; | |
119 | -} | |
120 | - | |
121 | -/* | |
122 | 103 | * After we have set current->mm to a new value, this activates |
123 | 104 | * the context for the new mm so we see the new mappings. |
124 | 105 | */ |
... | ... | @@ -128,17 +109,6 @@ |
128 | 109 | set_asid(cpu_asid(cpu, mm)); |
129 | 110 | } |
130 | 111 | |
131 | -/* MMU_TTB is used for optimizing the fault handling. */ | |
132 | -static inline void set_TTB(pgd_t *pgd) | |
133 | -{ | |
134 | - ctrl_outl((unsigned long)pgd, MMU_TTB); | |
135 | -} | |
136 | - | |
137 | -static inline pgd_t *get_TTB(void) | |
138 | -{ | |
139 | - return (pgd_t *)ctrl_inl(MMU_TTB); | |
140 | -} | |
141 | - | |
142 | 112 | static inline void switch_mm(struct mm_struct *prev, |
143 | 113 | struct mm_struct *next, |
144 | 114 | struct task_struct *tsk) |
... | ... | @@ -153,17 +123,7 @@ |
153 | 123 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) |
154 | 124 | activate_context(next, cpu); |
155 | 125 | } |
156 | - | |
157 | -#define deactivate_mm(tsk,mm) do { } while (0) | |
158 | - | |
159 | -#define activate_mm(prev, next) \ | |
160 | - switch_mm((prev),(next),NULL) | |
161 | - | |
162 | -static inline void | |
163 | -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
164 | -{ | |
165 | -} | |
166 | -#else /* !CONFIG_MMU */ | |
126 | +#else | |
167 | 127 | #define get_mmu_context(mm) do { } while (0) |
168 | 128 | #define init_new_context(tsk,mm) (0) |
169 | 129 | #define destroy_context(mm) do { } while (0) |
170 | 130 | |
171 | 131 | |
... | ... | @@ -173,10 +133,11 @@ |
173 | 133 | #define get_TTB() (0) |
174 | 134 | #define activate_context(mm,cpu) do { } while (0) |
175 | 135 | #define switch_mm(prev,next,tsk) do { } while (0) |
136 | +#endif /* CONFIG_MMU */ | |
137 | + | |
138 | +#define activate_mm(prev, next) switch_mm((prev),(next),NULL) | |
176 | 139 | #define deactivate_mm(tsk,mm) do { } while (0) |
177 | -#define activate_mm(prev,next) do { } while (0) | |
178 | 140 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
179 | -#endif /* CONFIG_MMU */ | |
180 | 141 | |
181 | 142 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) |
182 | 143 | /* |
include/asm-sh/mmu_context_32.h
1 | +#ifndef __ASM_SH_MMU_CONTEXT_32_H | |
2 | +#define __ASM_SH_MMU_CONTEXT_32_H | |
3 | + | |
4 | +/* | |
5 | + * Destroy context related info for an mm_struct that is about | |
6 | + * to be put to rest. | |
7 | + */ | |
8 | +static inline void destroy_context(struct mm_struct *mm) | |
9 | +{ | |
10 | + /* Do nothing */ | |
11 | +} | |
12 | + | |
13 | +static inline void set_asid(unsigned long asid) | |
14 | +{ | |
15 | + unsigned long __dummy; | |
16 | + | |
17 | + __asm__ __volatile__ ("mov.l %2, %0\n\t" | |
18 | + "and %3, %0\n\t" | |
19 | + "or %1, %0\n\t" | |
20 | + "mov.l %0, %2" | |
21 | + : "=&r" (__dummy) | |
22 | + : "r" (asid), "m" (__m(MMU_PTEH)), | |
23 | + "r" (0xffffff00)); | |
24 | +} | |
25 | + | |
26 | +static inline unsigned long get_asid(void) | |
27 | +{ | |
28 | + unsigned long asid; | |
29 | + | |
30 | + __asm__ __volatile__ ("mov.l %1, %0" | |
31 | + : "=r" (asid) | |
32 | + : "m" (__m(MMU_PTEH))); | |
33 | + asid &= MMU_CONTEXT_ASID_MASK; | |
34 | + return asid; | |
35 | +} | |
36 | + | |
37 | +/* MMU_TTB is used for optimizing the fault handling. */ | |
38 | +static inline void set_TTB(pgd_t *pgd) | |
39 | +{ | |
40 | + ctrl_outl((unsigned long)pgd, MMU_TTB); | |
41 | +} | |
42 | + | |
43 | +static inline pgd_t *get_TTB(void) | |
44 | +{ | |
45 | + return (pgd_t *)ctrl_inl(MMU_TTB); | |
46 | +} | |
47 | +#endif /* __ASM_SH_MMU_CONTEXT_32_H */ |
include/asm-sh/mmu_context_64.h
1 | +#ifndef __ASM_SH_MMU_CONTEXT_64_H | |
2 | +#define __ASM_SH_MMU_CONTEXT_64_H | |
3 | + | |
4 | +/* | |
5 | + * sh64-specific mmu_context interface. | |
6 | + * | |
7 | + * Copyright (C) 2000, 2001 Paolo Alberelli | |
8 | + * Copyright (C) 2003 - 2007 Paul Mundt | |
9 | + * | |
10 | + * This file is subject to the terms and conditions of the GNU General Public | |
11 | + * License. See the file "COPYING" in the main directory of this archive | |
12 | + * for more details. | |
13 | + */ | |
14 | +#include <asm/cpu/registers.h> | |
15 | +#include <asm/cacheflush.h> | |
16 | + | |
17 | +#define SR_ASID_MASK 0xffffffffff00ffffULL | |
18 | +#define SR_ASID_SHIFT 16 | |
19 | + | |
20 | +/* | |
21 | + * Destroy context related info for an mm_struct that is about | |
22 | + * to be put to rest. | |
23 | + */ | |
24 | +static inline void destroy_context(struct mm_struct *mm) | |
25 | +{ | |
26 | + /* Well, at least free TLB entries */ | |
27 | + flush_tlb_mm(mm); | |
28 | +} | |
29 | + | |
30 | +static inline unsigned long get_asid(void) | |
31 | +{ | |
32 | + unsigned long long sr; | |
33 | + | |
34 | + asm volatile ("getcon " __SR ", %0\n\t" | |
35 | + : "=r" (sr)); | |
36 | + | |
37 | + sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; | |
38 | + return (unsigned long) sr; | |
39 | +} | |
40 | + | |
41 | +/* Set ASID into SR */ | |
42 | +static inline void set_asid(unsigned long asid) | |
43 | +{ | |
44 | + unsigned long long sr, pc; | |
45 | + | |
46 | + asm volatile ("getcon " __SR ", %0" : "=r" (sr)); | |
47 | + | |
48 | + sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); | |
49 | + | |
50 | + /* | |
51 | + * It is possible that this function may be inlined and so to avoid | |
52 | + * the assembler reporting duplicate symbols we make use of the | |
53 | + * gas trick of generating symbols using numerics and forward | |
54 | + * reference. | |
55 | + */ | |
56 | + asm volatile ("movi 1, %1\n\t" | |
57 | + "shlli %1, 28, %1\n\t" | |
58 | + "or %0, %1, %1\n\t" | |
59 | + "putcon %1, " __SR "\n\t" | |
60 | + "putcon %0, " __SSR "\n\t" | |
61 | + "movi 1f, %1\n\t" | |
62 | + "ori %1, 1 , %1\n\t" | |
63 | + "putcon %1, " __SPC "\n\t" | |
64 | + "rte\n" | |
65 | + "1:\n\t" | |
66 | + : "=r" (sr), "=r" (pc) : "0" (sr)); | |
67 | +} | |
68 | + | |
69 | +/* No spare register to twiddle, so use a software cache */ | |
70 | +extern pgd_t *mmu_pdtp_cache; | |
71 | + | |
72 | +#define set_TTB(pgd) (mmu_pdtp_cache = (pgd)) | |
73 | +#define get_TTB() (mmu_pdtp_cache) | |
74 | + | |
75 | +#endif /* __ASM_SH_MMU_CONTEXT_64_H */ |
include/asm-sh64/mmu_context.h
1 | -#ifndef __ASM_SH64_MMU_CONTEXT_H | |
2 | -#define __ASM_SH64_MMU_CONTEXT_H | |
3 | - | |
4 | -/* | |
5 | - * This file is subject to the terms and conditions of the GNU General Public | |
6 | - * License. See the file "COPYING" in the main directory of this archive | |
7 | - * for more details. | |
8 | - * | |
9 | - * include/asm-sh64/mmu_context.h | |
10 | - * | |
11 | - * Copyright (C) 2000, 2001 Paolo Alberelli | |
12 | - * Copyright (C) 2003 Paul Mundt | |
13 | - * | |
14 | - * ASID handling idea taken from MIPS implementation. | |
15 | - * | |
16 | - */ | |
17 | - | |
18 | -#ifndef __ASSEMBLY__ | |
19 | - | |
20 | -/* | |
21 | - * Cache of MMU context last used. | |
22 | - * | |
23 | - * The MMU "context" consists of two things: | |
24 | - * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache) | |
25 | - * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache) | |
26 | - */ | |
27 | -extern unsigned long mmu_context_cache; | |
28 | - | |
29 | -#include <asm/page.h> | |
30 | -#include <asm-generic/mm_hooks.h> | |
31 | - | |
32 | -/* Current mm's pgd */ | |
33 | -extern pgd_t *mmu_pdtp_cache; | |
34 | - | |
35 | -#define SR_ASID_MASK 0xffffffffff00ffffULL | |
36 | -#define SR_ASID_SHIFT 16 | |
37 | - | |
38 | -#define MMU_CONTEXT_ASID_MASK 0x000000ff | |
39 | -#define MMU_CONTEXT_VERSION_MASK 0xffffff00 | |
40 | -#define MMU_CONTEXT_FIRST_VERSION 0x00000100 | |
41 | -#define NO_CONTEXT 0 | |
42 | - | |
43 | -/* ASID is 8-bit value, so it can't be 0x100 */ | |
44 | -#define MMU_NO_ASID 0x100 | |
45 | - | |
46 | - | |
47 | -/* | |
48 | - * Virtual Page Number mask | |
49 | - */ | |
50 | -#define MMU_VPN_MASK 0xfffff000 | |
51 | - | |
52 | -static inline void | |
53 | -get_new_mmu_context(struct mm_struct *mm) | |
54 | -{ | |
55 | - extern void flush_tlb_all(void); | |
56 | - extern void flush_cache_all(void); | |
57 | - | |
58 | - unsigned long mc = ++mmu_context_cache; | |
59 | - | |
60 | - if (!(mc & MMU_CONTEXT_ASID_MASK)) { | |
61 | - /* We exhaust ASID of this version. | |
62 | - Flush all TLB and start new cycle. */ | |
63 | - flush_tlb_all(); | |
64 | - /* We have to flush all caches as ASIDs are | |
65 | - used in cache */ | |
66 | - flush_cache_all(); | |
67 | - /* Fix version if needed. | |
68 | - Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */ | |
69 | - if (!mc) | |
70 | - mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; | |
71 | - } | |
72 | - mm->context = mc; | |
73 | -} | |
74 | - | |
75 | -/* | |
76 | - * Get MMU context if needed. | |
77 | - */ | |
78 | -static __inline__ void | |
79 | -get_mmu_context(struct mm_struct *mm) | |
80 | -{ | |
81 | - if (mm) { | |
82 | - unsigned long mc = mmu_context_cache; | |
83 | - /* Check if we have old version of context. | |
84 | - If it's old, we need to get new context with new version. */ | |
85 | - if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) | |
86 | - get_new_mmu_context(mm); | |
87 | - } | |
88 | -} | |
89 | - | |
90 | -/* | |
91 | - * Initialize the context related info for a new mm_struct | |
92 | - * instance. | |
93 | - */ | |
94 | -static inline int init_new_context(struct task_struct *tsk, | |
95 | - struct mm_struct *mm) | |
96 | -{ | |
97 | - mm->context = NO_CONTEXT; | |
98 | - | |
99 | - return 0; | |
100 | -} | |
101 | - | |
102 | -/* | |
103 | - * Destroy context related info for an mm_struct that is about | |
104 | - * to be put to rest. | |
105 | - */ | |
106 | -static inline void destroy_context(struct mm_struct *mm) | |
107 | -{ | |
108 | - extern void flush_tlb_mm(struct mm_struct *mm); | |
109 | - | |
110 | - /* Well, at least free TLB entries */ | |
111 | - flush_tlb_mm(mm); | |
112 | -} | |
113 | - | |
114 | -#endif /* __ASSEMBLY__ */ | |
115 | - | |
116 | -/* Common defines */ | |
117 | -#define TLB_STEP 0x00000010 | |
118 | -#define TLB_PTEH 0x00000000 | |
119 | -#define TLB_PTEL 0x00000008 | |
120 | - | |
121 | -/* PTEH defines */ | |
122 | -#define PTEH_ASID_SHIFT 2 | |
123 | -#define PTEH_VALID 0x0000000000000001 | |
124 | -#define PTEH_SHARED 0x0000000000000002 | |
125 | -#define PTEH_MATCH_ASID 0x00000000000003ff | |
126 | - | |
127 | -#ifndef __ASSEMBLY__ | |
128 | -/* This has to be a common function because the next location to fill | |
129 | - * information is shared. */ | |
130 | -extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); | |
131 | - | |
132 | -/* Profiling counter. */ | |
133 | -#ifdef CONFIG_SH64_PROC_TLB | |
134 | -extern unsigned long long calls_to_do_fast_page_fault; | |
135 | -#endif | |
136 | - | |
137 | -static inline unsigned long get_asid(void) | |
138 | -{ | |
139 | - unsigned long long sr; | |
140 | - | |
141 | - asm volatile ("getcon " __SR ", %0\n\t" | |
142 | - : "=r" (sr)); | |
143 | - | |
144 | - sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; | |
145 | - return (unsigned long) sr; | |
146 | -} | |
147 | - | |
148 | -/* Set ASID into SR */ | |
149 | -static inline void set_asid(unsigned long asid) | |
150 | -{ | |
151 | - unsigned long long sr, pc; | |
152 | - | |
153 | - asm volatile ("getcon " __SR ", %0" : "=r" (sr)); | |
154 | - | |
155 | - sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); | |
156 | - | |
157 | - /* | |
158 | - * It is possible that this function may be inlined and so to avoid | |
159 | - * the assembler reporting duplicate symbols we make use of the gas trick | |
160 | - * of generating symbols using numerics and forward reference. | |
161 | - */ | |
162 | - asm volatile ("movi 1, %1\n\t" | |
163 | - "shlli %1, 28, %1\n\t" | |
164 | - "or %0, %1, %1\n\t" | |
165 | - "putcon %1, " __SR "\n\t" | |
166 | - "putcon %0, " __SSR "\n\t" | |
167 | - "movi 1f, %1\n\t" | |
168 | - "ori %1, 1 , %1\n\t" | |
169 | - "putcon %1, " __SPC "\n\t" | |
170 | - "rte\n" | |
171 | - "1:\n\t" | |
172 | - : "=r" (sr), "=r" (pc) : "0" (sr)); | |
173 | -} | |
174 | - | |
175 | -/* | |
176 | - * After we have set current->mm to a new value, this activates | |
177 | - * the context for the new mm so we see the new mappings. | |
178 | - */ | |
179 | -static __inline__ void activate_context(struct mm_struct *mm) | |
180 | -{ | |
181 | - get_mmu_context(mm); | |
182 | - set_asid(mm->context & MMU_CONTEXT_ASID_MASK); | |
183 | -} | |
184 | - | |
185 | - | |
186 | -static __inline__ void switch_mm(struct mm_struct *prev, | |
187 | - struct mm_struct *next, | |
188 | - struct task_struct *tsk) | |
189 | -{ | |
190 | - if (prev != next) { | |
191 | - mmu_pdtp_cache = next->pgd; | |
192 | - activate_context(next); | |
193 | - } | |
194 | -} | |
195 | - | |
196 | -#define deactivate_mm(tsk,mm) do { } while (0) | |
197 | - | |
198 | -#define activate_mm(prev, next) \ | |
199 | - switch_mm((prev),(next),NULL) | |
200 | - | |
201 | -static inline void | |
202 | -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
203 | -{ | |
204 | -} | |
205 | - | |
206 | -#endif /* __ASSEMBLY__ */ | |
207 | - | |
208 | -#endif /* __ASM_SH64_MMU_CONTEXT_H */ |