Blame view
include/asm-sh64/mmu_context.h
4.87 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
#ifndef __ASM_SH64_MMU_CONTEXT_H #define __ASM_SH64_MMU_CONTEXT_H /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * include/asm-sh64/mmu_context.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 Paul Mundt * * ASID handling idea taken from MIPS implementation. * */ #ifndef __ASSEMBLY__ /* * Cache of MMU context last used. * * The MMU "context" consists of two things: * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache) * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache) */ extern unsigned long mmu_context_cache; |
1da177e4c
|
28 |
#include <asm/page.h> |
d6dd61c83
|
29 |
#include <asm-generic/mm_hooks.h> |
1da177e4c
|
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
/* Current mm's pgd */ extern pgd_t *mmu_pdtp_cache; #define SR_ASID_MASK 0xffffffffff00ffffULL #define SR_ASID_SHIFT 16 #define MMU_CONTEXT_ASID_MASK 0x000000ff #define MMU_CONTEXT_VERSION_MASK 0xffffff00 #define MMU_CONTEXT_FIRST_VERSION 0x00000100 #define NO_CONTEXT 0 /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 /* * Virtual Page Number mask */ #define MMU_VPN_MASK 0xfffff000 |
ca5ed2f5c
|
50 |
static inline void |
1da177e4c
|
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
get_new_mmu_context(struct mm_struct *mm) { extern void flush_tlb_all(void); extern void flush_cache_all(void); unsigned long mc = ++mmu_context_cache; if (!(mc & MMU_CONTEXT_ASID_MASK)) { /* We exhaust ASID of this version. Flush all TLB and start new cycle. */ flush_tlb_all(); /* We have to flush all caches as ASIDs are used in cache */ flush_cache_all(); /* Fix version if needed. Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */ if (!mc) mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; } mm->context = mc; } /* * Get MMU context if needed. */ static __inline__ void get_mmu_context(struct mm_struct *mm) { if (mm) { unsigned long mc = mmu_context_cache; /* Check if we have old version of context. If it's old, we need to get new context with new version. */ if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) get_new_mmu_context(mm); } } /* * Initialize the context related info for a new mm_struct * instance. */ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context = NO_CONTEXT; return 0; } /* * Destroy context related info for an mm_struct that is about * to be put to rest. */ static inline void destroy_context(struct mm_struct *mm) { extern void flush_tlb_mm(struct mm_struct *mm); /* Well, at least free TLB entries */ flush_tlb_mm(mm); } #endif /* __ASSEMBLY__ */ /* Common defines */ #define TLB_STEP 0x00000010 #define TLB_PTEH 0x00000000 #define TLB_PTEL 0x00000008 /* PTEH defines */ #define PTEH_ASID_SHIFT 2 #define PTEH_VALID 0x0000000000000001 #define PTEH_SHARED 0x0000000000000002 #define PTEH_MATCH_ASID 0x00000000000003ff #ifndef __ASSEMBLY__ /* This has to be a common function because the next location to fill * information is shared. */ extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); /* Profiling counter. */ #ifdef CONFIG_SH64_PROC_TLB extern unsigned long long calls_to_do_fast_page_fault; #endif static inline unsigned long get_asid(void) { unsigned long long sr; asm volatile ("getcon " __SR ", %0 \t" : "=r" (sr)); sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; return (unsigned long) sr; } /* Set ASID into SR */ static inline void set_asid(unsigned long asid) { unsigned long long sr, pc; asm volatile ("getcon " __SR ", %0" : "=r" (sr)); sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); /* * It is possible that this function may be inlined and so to avoid * the assembler reporting duplicate symbols we make use of the gas trick * of generating symbols using numerics and forward reference. */ asm volatile ("movi 1, %1 \t" "shlli %1, 28, %1 \t" "or %0, %1, %1 \t" "putcon %1, " __SR " \t" "putcon %0, " __SSR " \t" "movi 1f, %1 \t" "ori %1, 1 , %1 \t" "putcon %1, " __SPC " \t" "rte " "1: \t" : "=r" (sr), "=r" (pc) : "0" (sr)); } /* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ static __inline__ void activate_context(struct mm_struct *mm) { get_mmu_context(mm); set_asid(mm->context & MMU_CONTEXT_ASID_MASK); } static __inline__ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { mmu_pdtp_cache = next->pgd; activate_context(next); } } #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev, next) \ switch_mm((prev),(next),NULL) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH64_MMU_CONTEXT_H */ |