Blame view
include/asm-xtensa/mmu_context.h
2.91 KB
9a8fd5589
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * include/asm-xtensa/mmu_context.h * * Switch an MMU context. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. */ #ifndef _XTENSA_MMU_CONTEXT_H #define _XTENSA_MMU_CONTEXT_H |
9a8fd5589
|
15 |
#include <linux/stringify.h> |
de4f6e5b4
|
16 |
#include <linux/sched.h> |
9a8fd5589
|
17 18 |
#include <asm/pgtable.h> |
9a8fd5589
|
19 20 |
#include <asm/cacheflush.h> #include <asm/tlbflush.h> |
d6dd61c83
|
21 |
#include <asm-generic/mm_hooks.h> |
9a8fd5589
|
22 |
|
173d66813
|
23 |
#define XCHAL_MMU_ASID_BITS 8 |
9a8fd5589
|
24 25 26 27 |
#if (XCHAL_HAVE_TLBS != 1) # error "Linux must have an MMU!" #endif |
9a8fd5589
|
28 |
extern unsigned long asid_cache; |
9a8fd5589
|
29 30 31 |
/* * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
173d66813
|
32 33 34 35 36 37 38 |
* any user or kernel context. * * 0 invalid * 1 kernel * 2 reserved * 3 reserved * 4...255 available |
9a8fd5589
|
39 |
*/ |
173d66813
|
40 41 42 43 |
#define NO_CONTEXT 0 #define ASID_USER_FIRST 4 #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) |
9a8fd5589
|
44 |
|
d99cf715a
|
45 |
static inline void set_rasid_register (unsigned long val) |
9a8fd5589
|
46 47 48 49 50 51 |
{ __asm__ __volatile__ (" wsr %0, "__stringify(RASID)" \t" " isync " : : "a" (val)); } |
d99cf715a
|
52 |
static inline unsigned long get_rasid_register (void) |
9a8fd5589
|
53 54 |
{ unsigned long tmp; |
173d66813
|
55 56 |
__asm__ __volatile__ (" rsr %0,"__stringify(RASID)" \t" : "=a" (tmp)); |
9a8fd5589
|
57 58 |
return tmp; } |
d99cf715a
|
59 |
static inline void |
173d66813
|
60 |
__get_new_mmu_context(struct mm_struct *mm) |
9a8fd5589
|
61 62 |
{ extern void flush_tlb_all(void); |
173d66813
|
63 |
if (! (++asid_cache & ASID_MASK) ) { |
9a8fd5589
|
64 |
flush_tlb_all(); /* start new asid cycle */ |
173d66813
|
65 |
asid_cache += ASID_USER_FIRST; |
9a8fd5589
|
66 |
} |
173d66813
|
67 |
mm->context = asid_cache; |
9a8fd5589
|
68 |
} |
d99cf715a
|
69 |
static inline void |
173d66813
|
70 |
__load_mmu_context(struct mm_struct *mm) |
9a8fd5589
|
71 |
{ |
173d66813
|
72 73 |
set_rasid_register(ASID_INSERT(mm->context)); invalidate_page_directory(); |
9a8fd5589
|
74 |
} |
9a8fd5589
|
75 76 77 78 |
/* * Initialize the context related info for a new mm_struct * instance. */ |
d99cf715a
|
79 |
static inline int |
9a8fd5589
|
80 81 82 83 84 |
init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context = NO_CONTEXT; return 0; } |
173d66813
|
85 86 87 88 89 90 91 92 93 94 95 96 |
/* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { /* Unconditionally get a new ASID. */ __get_new_mmu_context(next); __load_mmu_context(next); } |
d99cf715a
|
97 |
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
9a8fd5589
|
98 99 100 101 102 |
struct task_struct *tsk) { unsigned long asid = asid_cache; /* Check if our ASID is of an older version and thus invalid */ |
173d66813
|
103 104 |
if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) __get_new_mmu_context(next); |
9a8fd5589
|
105 |
|
173d66813
|
106 |
__load_mmu_context(next); |
9a8fd5589
|
107 108 109 110 111 112 113 114 |
} #define deactivate_mm(tsk, mm) do { } while(0) /* * Destroy context related info for an mm_struct that is about * to be put to rest. */ |
d99cf715a
|
115 |
static inline void destroy_context(struct mm_struct *mm) |
9a8fd5589
|
116 |
{ |
9a8fd5589
|
117 118 119 120 121 122 123 124 125 126 127 |
invalidate_page_directory(); } static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { /* Nothing to do. */ } #endif /* _XTENSA_MMU_CONTEXT_H */ |