Commit 3372f5a7d005dd42e754490fed6a0171c4a018c6

Authored by Alexander Stein
Committed by Greg Ungerer
1 parent 57e00098cc

m68k: Do not set global share for non-kernel shared pages

If the SG bit is set in MMUTR the page is accessible for all
userspace processes (ignoring the ASID). So a process might randomly
access a page from a different process which had a shared page
(from shared memory) in its context.

Signed-off-by: Alexander Stein <alexander.stein@systec-electronic.com>
Signed-off-by: Greg Ungerer <gerg@uclinux.org>

Showing 1 changed file with 5 additions and 4 deletions Inline Diff

arch/m68k/mm/mcfmmu.c
1 /* 1 /*
2 * Based upon linux/arch/m68k/mm/sun3mmu.c 2 * Based upon linux/arch/m68k/mm/sun3mmu.c
3 * Based upon linux/arch/ppc/mm/mmu_context.c 3 * Based upon linux/arch/ppc/mm/mmu_context.c
4 * 4 *
5 * Implementations of mm routines specific to the Coldfire MMU. 5 * Implementations of mm routines specific to the Coldfire MMU.
6 * 6 *
7 * Copyright (c) 2008 Freescale Semiconductor, Inc. 7 * Copyright (c) 2008 Freescale Semiconductor, Inc.
8 */ 8 */
9 9
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/types.h> 11 #include <linux/types.h>
12 #include <linux/mm.h> 12 #include <linux/mm.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/string.h> 14 #include <linux/string.h>
15 #include <linux/bootmem.h> 15 #include <linux/bootmem.h>
16 16
17 #include <asm/setup.h> 17 #include <asm/setup.h>
18 #include <asm/page.h> 18 #include <asm/page.h>
19 #include <asm/pgtable.h> 19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h> 20 #include <asm/mmu_context.h>
21 #include <asm/mcf_pgalloc.h> 21 #include <asm/mcf_pgalloc.h>
22 #include <asm/tlbflush.h> 22 #include <asm/tlbflush.h>
23 23
24 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) 24 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
25 25
26 mm_context_t next_mmu_context; 26 mm_context_t next_mmu_context;
27 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; 27 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
28 atomic_t nr_free_contexts; 28 atomic_t nr_free_contexts;
29 struct mm_struct *context_mm[LAST_CONTEXT+1]; 29 struct mm_struct *context_mm[LAST_CONTEXT+1];
30 extern unsigned long num_pages; 30 extern unsigned long num_pages;
31 31
32 void free_initmem(void) 32 void free_initmem(void)
33 { 33 {
34 } 34 }
35 35
36 /* 36 /*
37 * ColdFire paging_init derived from sun3. 37 * ColdFire paging_init derived from sun3.
38 */ 38 */
39 void __init paging_init(void) 39 void __init paging_init(void)
40 { 40 {
41 pgd_t *pg_dir; 41 pgd_t *pg_dir;
42 pte_t *pg_table; 42 pte_t *pg_table;
43 unsigned long address, size; 43 unsigned long address, size;
44 unsigned long next_pgtable, bootmem_end; 44 unsigned long next_pgtable, bootmem_end;
45 unsigned long zones_size[MAX_NR_ZONES]; 45 unsigned long zones_size[MAX_NR_ZONES];
46 enum zone_type zone; 46 enum zone_type zone;
47 int i; 47 int i;
48 48
49 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); 49 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
50 memset((void *) empty_zero_page, 0, PAGE_SIZE); 50 memset((void *) empty_zero_page, 0, PAGE_SIZE);
51 51
52 pg_dir = swapper_pg_dir; 52 pg_dir = swapper_pg_dir;
53 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 53 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
54 54
55 size = num_pages * sizeof(pte_t); 55 size = num_pages * sizeof(pte_t);
56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57 next_pgtable = (unsigned long) alloc_bootmem_pages(size); 57 next_pgtable = (unsigned long) alloc_bootmem_pages(size);
58 58
59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; 60 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
61 61
62 address = PAGE_OFFSET; 62 address = PAGE_OFFSET;
63 while (address < (unsigned long)high_memory) { 63 while (address < (unsigned long)high_memory) {
64 pg_table = (pte_t *) next_pgtable; 64 pg_table = (pte_t *) next_pgtable;
65 next_pgtable += PTRS_PER_PTE * sizeof(pte_t); 65 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
66 pgd_val(*pg_dir) = (unsigned long) pg_table; 66 pgd_val(*pg_dir) = (unsigned long) pg_table;
67 pg_dir++; 67 pg_dir++;
68 68
69 /* now change pg_table to kernel virtual addresses */ 69 /* now change pg_table to kernel virtual addresses */
70 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { 70 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
71 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); 71 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
72 if (address >= (unsigned long) high_memory) 72 if (address >= (unsigned long) high_memory)
73 pte_val(pte) = 0; 73 pte_val(pte) = 0;
74 74
75 set_pte(pg_table, pte); 75 set_pte(pg_table, pte);
76 address += PAGE_SIZE; 76 address += PAGE_SIZE;
77 } 77 }
78 } 78 }
79 79
80 current->mm = NULL; 80 current->mm = NULL;
81 81
82 for (zone = 0; zone < MAX_NR_ZONES; zone++) 82 for (zone = 0; zone < MAX_NR_ZONES; zone++)
83 zones_size[zone] = 0x0; 83 zones_size[zone] = 0x0;
84 zones_size[ZONE_DMA] = num_pages; 84 zones_size[ZONE_DMA] = num_pages;
85 free_area_init(zones_size); 85 free_area_init(zones_size);
86 } 86 }
87 87
88 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) 88 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
89 { 89 {
90 unsigned long flags, mmuar; 90 unsigned long flags, mmuar, mmutr;
91 struct mm_struct *mm; 91 struct mm_struct *mm;
92 pgd_t *pgd; 92 pgd_t *pgd;
93 pmd_t *pmd; 93 pmd_t *pmd;
94 pte_t *pte; 94 pte_t *pte;
95 int asid; 95 int asid;
96 96
97 local_irq_save(flags); 97 local_irq_save(flags);
98 98
99 mmuar = (dtlb) ? mmu_read(MMUAR) : 99 mmuar = (dtlb) ? mmu_read(MMUAR) :
100 regs->pc + (extension_word * sizeof(long)); 100 regs->pc + (extension_word * sizeof(long));
101 101
102 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; 102 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103 if (!mm) { 103 if (!mm) {
104 local_irq_restore(flags); 104 local_irq_restore(flags);
105 return -1; 105 return -1;
106 } 106 }
107 107
108 pgd = pgd_offset(mm, mmuar); 108 pgd = pgd_offset(mm, mmuar);
109 if (pgd_none(*pgd)) { 109 if (pgd_none(*pgd)) {
110 local_irq_restore(flags); 110 local_irq_restore(flags);
111 return -1; 111 return -1;
112 } 112 }
113 113
114 pmd = pmd_offset(pgd, mmuar); 114 pmd = pmd_offset(pgd, mmuar);
115 if (pmd_none(*pmd)) { 115 if (pmd_none(*pmd)) {
116 local_irq_restore(flags); 116 local_irq_restore(flags);
117 return -1; 117 return -1;
118 } 118 }
119 119
120 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) 120 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
121 : pte_offset_map(pmd, mmuar); 121 : pte_offset_map(pmd, mmuar);
122 if (pte_none(*pte) || !pte_present(*pte)) { 122 if (pte_none(*pte) || !pte_present(*pte)) {
123 local_irq_restore(flags); 123 local_irq_restore(flags);
124 return -1; 124 return -1;
125 } 125 }
126 126
127 if (write) { 127 if (write) {
128 if (!pte_write(*pte)) { 128 if (!pte_write(*pte)) {
129 local_irq_restore(flags); 129 local_irq_restore(flags);
130 return -1; 130 return -1;
131 } 131 }
132 set_pte(pte, pte_mkdirty(*pte)); 132 set_pte(pte, pte_mkdirty(*pte));
133 } 133 }
134 134
135 set_pte(pte, pte_mkyoung(*pte)); 135 set_pte(pte, pte_mkyoung(*pte));
136 asid = mm->context & 0xff; 136 asid = mm->context & 0xff;
137 if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) 137 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
138 set_pte(pte, pte_wrprotect(*pte)); 138 set_pte(pte, pte_wrprotect(*pte));
139 139
140 mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | 140 mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
141 (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK) 141 if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
142 >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V); 142 mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
143 mmu_write(MMUTR, mmutr);
143 144
144 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | 145 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
145 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); 146 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
146 147
147 if (dtlb) 148 if (dtlb)
148 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); 149 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
149 else 150 else
150 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); 151 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
151 152
152 local_irq_restore(flags); 153 local_irq_restore(flags);
153 return 0; 154 return 0;
154 } 155 }
155 156
156 /* 157 /*
157 * Initialize the context management stuff. 158 * Initialize the context management stuff.
158 * The following was taken from arch/ppc/mmu_context.c 159 * The following was taken from arch/ppc/mmu_context.c
159 */ 160 */
160 void __init mmu_context_init(void) 161 void __init mmu_context_init(void)
161 { 162 {
162 /* 163 /*
163 * Some processors have too few contexts to reserve one for 164 * Some processors have too few contexts to reserve one for
164 * init_mm, and require using context 0 for a normal task. 165 * init_mm, and require using context 0 for a normal task.
165 * Other processors reserve the use of context zero for the kernel. 166 * Other processors reserve the use of context zero for the kernel.
166 * This code assumes FIRST_CONTEXT < 32. 167 * This code assumes FIRST_CONTEXT < 32.
167 */ 168 */
168 context_map[0] = (1 << FIRST_CONTEXT) - 1; 169 context_map[0] = (1 << FIRST_CONTEXT) - 1;
169 next_mmu_context = FIRST_CONTEXT; 170 next_mmu_context = FIRST_CONTEXT;
170 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); 171 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
171 } 172 }
172 173
173 /* 174 /*
174 * Steal a context from a task that has one at the moment. 175 * Steal a context from a task that has one at the moment.
175 * This is only used on 8xx and 4xx and we presently assume that 176 * This is only used on 8xx and 4xx and we presently assume that
176 * they don't do SMP. If they do then thicfpgalloc.hs will have to check 177 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
177 * whether the MM we steal is in use. 178 * whether the MM we steal is in use.
178 * We also assume that this is only used on systems that don't 179 * We also assume that this is only used on systems that don't
179 * use an MMU hash table - this is true for 8xx and 4xx. 180 * use an MMU hash table - this is true for 8xx and 4xx.
180 * This isn't an LRU system, it just frees up each context in 181 * This isn't an LRU system, it just frees up each context in
181 * turn (sort-of pseudo-random replacement :). This would be the 182 * turn (sort-of pseudo-random replacement :). This would be the
182 * place to implement an LRU scheme if anyone was motivated to do it. 183 * place to implement an LRU scheme if anyone was motivated to do it.
183 * -- paulus 184 * -- paulus
184 */ 185 */
185 void steal_context(void) 186 void steal_context(void)
186 { 187 {
187 struct mm_struct *mm; 188 struct mm_struct *mm;
188 /* 189 /*
189 * free up context `next_mmu_context' 190 * free up context `next_mmu_context'
190 * if we shouldn't free context 0, don't... 191 * if we shouldn't free context 0, don't...
191 */ 192 */
192 if (next_mmu_context < FIRST_CONTEXT) 193 if (next_mmu_context < FIRST_CONTEXT)
193 next_mmu_context = FIRST_CONTEXT; 194 next_mmu_context = FIRST_CONTEXT;
194 mm = context_mm[next_mmu_context]; 195 mm = context_mm[next_mmu_context];
195 flush_tlb_mm(mm); 196 flush_tlb_mm(mm);
196 destroy_context(mm); 197 destroy_context(mm);
197 } 198 }
198 199
199 200