Commit 37743487704322e0d5bbf7e003d28c143fcc2a2b
Committed by
David S. Miller
1 parent
34d4accfe0
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
sparc32: drop btfixup for check_pgt_cache
It is a noop for srmmu - so use a define as sparc64 does. And drop all sparc callers - no need to confuse our-self be calling a noop function. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 4 changed files with 1 additions and 27 deletions Inline Diff
arch/sparc/include/asm/pgalloc_32.h
1 | #ifndef _SPARC_PGALLOC_H | 1 | #ifndef _SPARC_PGALLOC_H |
2 | #define _SPARC_PGALLOC_H | 2 | #define _SPARC_PGALLOC_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | 6 | ||
7 | #include <asm/page.h> | 7 | #include <asm/page.h> |
8 | #include <asm/btfixup.h> | 8 | #include <asm/btfixup.h> |
9 | 9 | ||
10 | struct page; | 10 | struct page; |
11 | 11 | ||
12 | extern struct pgtable_cache_struct { | 12 | extern struct pgtable_cache_struct { |
13 | unsigned long *pgd_cache; | 13 | unsigned long *pgd_cache; |
14 | unsigned long *pte_cache; | 14 | unsigned long *pte_cache; |
15 | unsigned long pgtable_cache_sz; | 15 | unsigned long pgtable_cache_sz; |
16 | unsigned long pgd_cache_sz; | 16 | unsigned long pgd_cache_sz; |
17 | } pgt_quicklists; | 17 | } pgt_quicklists; |
18 | #define pgd_quicklist (pgt_quicklists.pgd_cache) | 18 | #define pgd_quicklist (pgt_quicklists.pgd_cache) |
19 | #define pmd_quicklist ((unsigned long *)0) | 19 | #define pmd_quicklist ((unsigned long *)0) |
20 | #define pte_quicklist (pgt_quicklists.pte_cache) | 20 | #define pte_quicklist (pgt_quicklists.pte_cache) |
21 | #define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz) | 21 | #define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz) |
22 | #define pgd_cache_size (pgt_quicklists.pgd_cache_sz) | 22 | #define pgd_cache_size (pgt_quicklists.pgd_cache_sz) |
23 | 23 | ||
24 | extern void check_pgt_cache(void); | 24 | #define check_pgt_cache() do { } while (0) |
25 | BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int) | ||
26 | #define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high) | ||
27 | 25 | ||
28 | BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void) | 26 | BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void) |
29 | #define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)() | 27 | #define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)() |
30 | 28 | ||
31 | BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *) | 29 | BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *) |
32 | #define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd) | 30 | #define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd) |
33 | 31 | ||
34 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) | 32 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) |
35 | #define pgd_alloc(mm) get_pgd_fast() | 33 | #define pgd_alloc(mm) get_pgd_fast() |
36 | 34 | ||
37 | BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *) | 35 | BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *) |
38 | #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp) | 36 | #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp) |
39 | #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) | 37 | #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) |
40 | 38 | ||
41 | BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long) | 39 | BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long) |
42 | #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address) | 40 | #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address) |
43 | 41 | ||
44 | BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) | 42 | BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) |
45 | #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) | 43 | #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) |
46 | 44 | ||
47 | #define pmd_free(mm, pmd) free_pmd_fast(pmd) | 45 | #define pmd_free(mm, pmd) free_pmd_fast(pmd) |
48 | #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) | 46 | #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) |
49 | 47 | ||
50 | BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) | 48 | BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) |
51 | #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) | 49 | #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) |
52 | #define pmd_pgtable(pmd) pmd_page(pmd) | 50 | #define pmd_pgtable(pmd) pmd_page(pmd) |
53 | BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *) | 51 | BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *) |
54 | #define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE) | 52 | #define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE) |
55 | 53 | ||
56 | BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long) | 54 | BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long) |
57 | #define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address) | 55 | #define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address) |
58 | BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long) | 56 | BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long) |
59 | #define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr) | 57 | #define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr) |
60 | 58 | ||
61 | BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *) | 59 | BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *) |
62 | #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) | 60 | #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) |
63 | 61 | ||
64 | BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) | 62 | BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) |
65 | #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) | 63 | #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) |
66 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) | 64 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) |
67 | 65 | ||
68 | #endif /* _SPARC_PGALLOC_H */ | 66 | #endif /* _SPARC_PGALLOC_H */ |
69 | 67 |
arch/sparc/kernel/process_32.c
1 | /* linux/arch/sparc/kernel/process.c | 1 | /* linux/arch/sparc/kernel/process.c |
2 | * | 2 | * |
3 | * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) |
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * This file handles the architecture-dependent parts of process handling.. | 8 | * This file handles the architecture-dependent parts of process handling.. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <stdarg.h> | 11 | #include <stdarg.h> |
12 | 12 | ||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
20 | #include <linux/user.h> | 20 | #include <linux/user.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/reboot.h> | 22 | #include <linux/reboot.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/pm.h> | 24 | #include <linux/pm.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | 27 | ||
28 | #include <asm/auxio.h> | 28 | #include <asm/auxio.h> |
29 | #include <asm/oplib.h> | 29 | #include <asm/oplib.h> |
30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
31 | #include <asm/page.h> | 31 | #include <asm/page.h> |
32 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
34 | #include <asm/delay.h> | 34 | #include <asm/delay.h> |
35 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
36 | #include <asm/psr.h> | 36 | #include <asm/psr.h> |
37 | #include <asm/elf.h> | 37 | #include <asm/elf.h> |
38 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
39 | #include <asm/unistd.h> | 39 | #include <asm/unistd.h> |
40 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Power management idle function | 43 | * Power management idle function |
44 | * Set in pm platform drivers (apc.c and pmc.c) | 44 | * Set in pm platform drivers (apc.c and pmc.c) |
45 | */ | 45 | */ |
46 | void (*pm_idle)(void); | 46 | void (*pm_idle)(void); |
47 | EXPORT_SYMBOL(pm_idle); | 47 | EXPORT_SYMBOL(pm_idle); |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Power-off handler instantiation for pm.h compliance | 50 | * Power-off handler instantiation for pm.h compliance |
51 | * This is done via auxio, but could be used as a fallback | 51 | * This is done via auxio, but could be used as a fallback |
52 | * handler when auxio is not present-- unused for now... | 52 | * handler when auxio is not present-- unused for now... |
53 | */ | 53 | */ |
54 | void (*pm_power_off)(void) = machine_power_off; | 54 | void (*pm_power_off)(void) = machine_power_off; |
55 | EXPORT_SYMBOL(pm_power_off); | 55 | EXPORT_SYMBOL(pm_power_off); |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * sysctl - toggle power-off restriction for serial console | 58 | * sysctl - toggle power-off restriction for serial console |
59 | * systems in machine_power_off() | 59 | * systems in machine_power_off() |
60 | */ | 60 | */ |
61 | int scons_pwroff = 1; | 61 | int scons_pwroff = 1; |
62 | 62 | ||
63 | extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); | 63 | extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); |
64 | 64 | ||
65 | struct task_struct *last_task_used_math = NULL; | 65 | struct task_struct *last_task_used_math = NULL; |
66 | struct thread_info *current_set[NR_CPUS]; | 66 | struct thread_info *current_set[NR_CPUS]; |
67 | 67 | ||
68 | #ifndef CONFIG_SMP | 68 | #ifndef CONFIG_SMP |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * the idle loop on a Sparc... ;) | 71 | * the idle loop on a Sparc... ;) |
72 | */ | 72 | */ |
73 | void cpu_idle(void) | 73 | void cpu_idle(void) |
74 | { | 74 | { |
75 | /* endless idle loop with no priority at all */ | 75 | /* endless idle loop with no priority at all */ |
76 | for (;;) { | 76 | for (;;) { |
77 | if (pm_idle) { | 77 | if (pm_idle) { |
78 | while (!need_resched()) | 78 | while (!need_resched()) |
79 | (*pm_idle)(); | 79 | (*pm_idle)(); |
80 | } else { | 80 | } else { |
81 | while (!need_resched()) | 81 | while (!need_resched()) |
82 | cpu_relax(); | 82 | cpu_relax(); |
83 | } | 83 | } |
84 | schedule_preempt_disabled(); | 84 | schedule_preempt_disabled(); |
85 | check_pgt_cache(); | ||
86 | } | 85 | } |
87 | } | 86 | } |
88 | 87 | ||
89 | #else | 88 | #else |
90 | 89 | ||
91 | /* This is being executed in task 0 'user space'. */ | 90 | /* This is being executed in task 0 'user space'. */ |
92 | void cpu_idle(void) | 91 | void cpu_idle(void) |
93 | { | 92 | { |
94 | set_thread_flag(TIF_POLLING_NRFLAG); | 93 | set_thread_flag(TIF_POLLING_NRFLAG); |
95 | /* endless idle loop with no priority at all */ | 94 | /* endless idle loop with no priority at all */ |
96 | while(1) { | 95 | while(1) { |
97 | #ifdef CONFIG_SPARC_LEON | 96 | #ifdef CONFIG_SPARC_LEON |
98 | if (pm_idle) { | 97 | if (pm_idle) { |
99 | while (!need_resched()) | 98 | while (!need_resched()) |
100 | (*pm_idle)(); | 99 | (*pm_idle)(); |
101 | } else | 100 | } else |
102 | #endif | 101 | #endif |
103 | { | 102 | { |
104 | while (!need_resched()) | 103 | while (!need_resched()) |
105 | cpu_relax(); | 104 | cpu_relax(); |
106 | } | 105 | } |
107 | schedule_preempt_disabled(); | 106 | schedule_preempt_disabled(); |
108 | check_pgt_cache(); | ||
109 | } | 107 | } |
110 | } | 108 | } |
111 | 109 | ||
112 | #endif | 110 | #endif |
113 | 111 | ||
114 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ | 112 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ |
115 | void machine_halt(void) | 113 | void machine_halt(void) |
116 | { | 114 | { |
117 | local_irq_enable(); | 115 | local_irq_enable(); |
118 | mdelay(8); | 116 | mdelay(8); |
119 | local_irq_disable(); | 117 | local_irq_disable(); |
120 | prom_halt(); | 118 | prom_halt(); |
121 | panic("Halt failed!"); | 119 | panic("Halt failed!"); |
122 | } | 120 | } |
123 | 121 | ||
124 | void machine_restart(char * cmd) | 122 | void machine_restart(char * cmd) |
125 | { | 123 | { |
126 | char *p; | 124 | char *p; |
127 | 125 | ||
128 | local_irq_enable(); | 126 | local_irq_enable(); |
129 | mdelay(8); | 127 | mdelay(8); |
130 | local_irq_disable(); | 128 | local_irq_disable(); |
131 | 129 | ||
132 | p = strchr (reboot_command, '\n'); | 130 | p = strchr (reboot_command, '\n'); |
133 | if (p) *p = 0; | 131 | if (p) *p = 0; |
134 | if (cmd) | 132 | if (cmd) |
135 | prom_reboot(cmd); | 133 | prom_reboot(cmd); |
136 | if (*reboot_command) | 134 | if (*reboot_command) |
137 | prom_reboot(reboot_command); | 135 | prom_reboot(reboot_command); |
138 | prom_feval ("reset"); | 136 | prom_feval ("reset"); |
139 | panic("Reboot failed!"); | 137 | panic("Reboot failed!"); |
140 | } | 138 | } |
141 | 139 | ||
142 | void machine_power_off(void) | 140 | void machine_power_off(void) |
143 | { | 141 | { |
144 | if (auxio_power_register && | 142 | if (auxio_power_register && |
145 | (strcmp(of_console_device->type, "serial") || scons_pwroff)) | 143 | (strcmp(of_console_device->type, "serial") || scons_pwroff)) |
146 | *auxio_power_register |= AUXIO_POWER_OFF; | 144 | *auxio_power_register |= AUXIO_POWER_OFF; |
147 | machine_halt(); | 145 | machine_halt(); |
148 | } | 146 | } |
149 | 147 | ||
150 | #if 0 | 148 | #if 0 |
151 | 149 | ||
152 | static DEFINE_SPINLOCK(sparc_backtrace_lock); | 150 | static DEFINE_SPINLOCK(sparc_backtrace_lock); |
153 | 151 | ||
154 | void __show_backtrace(unsigned long fp) | 152 | void __show_backtrace(unsigned long fp) |
155 | { | 153 | { |
156 | struct reg_window32 *rw; | 154 | struct reg_window32 *rw; |
157 | unsigned long flags; | 155 | unsigned long flags; |
158 | int cpu = smp_processor_id(); | 156 | int cpu = smp_processor_id(); |
159 | 157 | ||
160 | spin_lock_irqsave(&sparc_backtrace_lock, flags); | 158 | spin_lock_irqsave(&sparc_backtrace_lock, flags); |
161 | 159 | ||
162 | rw = (struct reg_window32 *)fp; | 160 | rw = (struct reg_window32 *)fp; |
163 | while(rw && (((unsigned long) rw) >= PAGE_OFFSET) && | 161 | while(rw && (((unsigned long) rw) >= PAGE_OFFSET) && |
164 | !(((unsigned long) rw) & 0x7)) { | 162 | !(((unsigned long) rw) & 0x7)) { |
165 | printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] " | 163 | printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] " |
166 | "FP[%08lx] CALLER[%08lx]: ", cpu, | 164 | "FP[%08lx] CALLER[%08lx]: ", cpu, |
167 | rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], | 165 | rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], |
168 | rw->ins[4], rw->ins[5], | 166 | rw->ins[4], rw->ins[5], |
169 | rw->ins[6], | 167 | rw->ins[6], |
170 | rw->ins[7]); | 168 | rw->ins[7]); |
171 | printk("%pS\n", (void *) rw->ins[7]); | 169 | printk("%pS\n", (void *) rw->ins[7]); |
172 | rw = (struct reg_window32 *) rw->ins[6]; | 170 | rw = (struct reg_window32 *) rw->ins[6]; |
173 | } | 171 | } |
174 | spin_unlock_irqrestore(&sparc_backtrace_lock, flags); | 172 | spin_unlock_irqrestore(&sparc_backtrace_lock, flags); |
175 | } | 173 | } |
176 | 174 | ||
177 | #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") | 175 | #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") |
178 | #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") | 176 | #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") |
179 | #define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp)) | 177 | #define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp)) |
180 | 178 | ||
181 | void show_backtrace(void) | 179 | void show_backtrace(void) |
182 | { | 180 | { |
183 | unsigned long fp; | 181 | unsigned long fp; |
184 | 182 | ||
185 | __SAVE; __SAVE; __SAVE; __SAVE; | 183 | __SAVE; __SAVE; __SAVE; __SAVE; |
186 | __SAVE; __SAVE; __SAVE; __SAVE; | 184 | __SAVE; __SAVE; __SAVE; __SAVE; |
187 | __RESTORE; __RESTORE; __RESTORE; __RESTORE; | 185 | __RESTORE; __RESTORE; __RESTORE; __RESTORE; |
188 | __RESTORE; __RESTORE; __RESTORE; __RESTORE; | 186 | __RESTORE; __RESTORE; __RESTORE; __RESTORE; |
189 | 187 | ||
190 | __GET_FP(fp); | 188 | __GET_FP(fp); |
191 | 189 | ||
192 | __show_backtrace(fp); | 190 | __show_backtrace(fp); |
193 | } | 191 | } |
194 | 192 | ||
195 | #ifdef CONFIG_SMP | 193 | #ifdef CONFIG_SMP |
196 | void smp_show_backtrace_all_cpus(void) | 194 | void smp_show_backtrace_all_cpus(void) |
197 | { | 195 | { |
198 | xc0((smpfunc_t) show_backtrace); | 196 | xc0((smpfunc_t) show_backtrace); |
199 | show_backtrace(); | 197 | show_backtrace(); |
200 | } | 198 | } |
201 | #endif | 199 | #endif |
202 | 200 | ||
203 | void show_stackframe(struct sparc_stackf *sf) | 201 | void show_stackframe(struct sparc_stackf *sf) |
204 | { | 202 | { |
205 | unsigned long size; | 203 | unsigned long size; |
206 | unsigned long *stk; | 204 | unsigned long *stk; |
207 | int i; | 205 | int i; |
208 | 206 | ||
209 | printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx " | 207 | printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx " |
210 | "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n", | 208 | "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n", |
211 | sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3], | 209 | sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3], |
212 | sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); | 210 | sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); |
213 | printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx " | 211 | printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx " |
214 | "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n", | 212 | "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n", |
215 | sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3], | 213 | sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3], |
216 | sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc); | 214 | sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc); |
217 | printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx " | 215 | printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx " |
218 | "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n", | 216 | "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n", |
219 | (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1], | 217 | (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1], |
220 | sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], | 218 | sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], |
221 | sf->xxargs[0]); | 219 | sf->xxargs[0]); |
222 | size = ((unsigned long)sf->fp) - ((unsigned long)sf); | 220 | size = ((unsigned long)sf->fp) - ((unsigned long)sf); |
223 | size -= STACKFRAME_SZ; | 221 | size -= STACKFRAME_SZ; |
224 | stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ); | 222 | stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ); |
225 | i = 0; | 223 | i = 0; |
226 | do { | 224 | do { |
227 | printk("s%d: %08lx\n", i++, *stk++); | 225 | printk("s%d: %08lx\n", i++, *stk++); |
228 | } while ((size -= sizeof(unsigned long))); | 226 | } while ((size -= sizeof(unsigned long))); |
229 | } | 227 | } |
230 | #endif | 228 | #endif |
231 | 229 | ||
232 | void show_regs(struct pt_regs *r) | 230 | void show_regs(struct pt_regs *r) |
233 | { | 231 | { |
234 | struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; | 232 | struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; |
235 | 233 | ||
236 | printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", | 234 | printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", |
237 | r->psr, r->pc, r->npc, r->y, print_tainted()); | 235 | r->psr, r->pc, r->npc, r->y, print_tainted()); |
238 | printk("PC: <%pS>\n", (void *) r->pc); | 236 | printk("PC: <%pS>\n", (void *) r->pc); |
239 | printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 237 | printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
240 | r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], | 238 | r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], |
241 | r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); | 239 | r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); |
242 | printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 240 | printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
243 | r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], | 241 | r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], |
244 | r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); | 242 | r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); |
245 | printk("RPC: <%pS>\n", (void *) r->u_regs[15]); | 243 | printk("RPC: <%pS>\n", (void *) r->u_regs[15]); |
246 | 244 | ||
247 | printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 245 | printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
248 | rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], | 246 | rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], |
249 | rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]); | 247 | rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]); |
250 | printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 248 | printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
251 | rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], | 249 | rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], |
252 | rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); | 250 | rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); |
253 | } | 251 | } |
254 | 252 | ||
255 | /* | 253 | /* |
256 | * The show_stack is an external API which we do not use ourselves. | 254 | * The show_stack is an external API which we do not use ourselves. |
257 | * The oops is printed in die_if_kernel. | 255 | * The oops is printed in die_if_kernel. |
258 | */ | 256 | */ |
259 | void show_stack(struct task_struct *tsk, unsigned long *_ksp) | 257 | void show_stack(struct task_struct *tsk, unsigned long *_ksp) |
260 | { | 258 | { |
261 | unsigned long pc, fp; | 259 | unsigned long pc, fp; |
262 | unsigned long task_base; | 260 | unsigned long task_base; |
263 | struct reg_window32 *rw; | 261 | struct reg_window32 *rw; |
264 | int count = 0; | 262 | int count = 0; |
265 | 263 | ||
266 | if (tsk != NULL) | 264 | if (tsk != NULL) |
267 | task_base = (unsigned long) task_stack_page(tsk); | 265 | task_base = (unsigned long) task_stack_page(tsk); |
268 | else | 266 | else |
269 | task_base = (unsigned long) current_thread_info(); | 267 | task_base = (unsigned long) current_thread_info(); |
270 | 268 | ||
271 | fp = (unsigned long) _ksp; | 269 | fp = (unsigned long) _ksp; |
272 | do { | 270 | do { |
273 | /* Bogus frame pointer? */ | 271 | /* Bogus frame pointer? */ |
274 | if (fp < (task_base + sizeof(struct thread_info)) || | 272 | if (fp < (task_base + sizeof(struct thread_info)) || |
275 | fp >= (task_base + (PAGE_SIZE << 1))) | 273 | fp >= (task_base + (PAGE_SIZE << 1))) |
276 | break; | 274 | break; |
277 | rw = (struct reg_window32 *) fp; | 275 | rw = (struct reg_window32 *) fp; |
278 | pc = rw->ins[7]; | 276 | pc = rw->ins[7]; |
279 | printk("[%08lx : ", pc); | 277 | printk("[%08lx : ", pc); |
280 | printk("%pS ] ", (void *) pc); | 278 | printk("%pS ] ", (void *) pc); |
281 | fp = rw->ins[6]; | 279 | fp = rw->ins[6]; |
282 | } while (++count < 16); | 280 | } while (++count < 16); |
283 | printk("\n"); | 281 | printk("\n"); |
284 | } | 282 | } |
285 | 283 | ||
286 | void dump_stack(void) | 284 | void dump_stack(void) |
287 | { | 285 | { |
288 | unsigned long *ksp; | 286 | unsigned long *ksp; |
289 | 287 | ||
290 | __asm__ __volatile__("mov %%fp, %0" | 288 | __asm__ __volatile__("mov %%fp, %0" |
291 | : "=r" (ksp)); | 289 | : "=r" (ksp)); |
292 | show_stack(current, ksp); | 290 | show_stack(current, ksp); |
293 | } | 291 | } |
294 | 292 | ||
295 | EXPORT_SYMBOL(dump_stack); | 293 | EXPORT_SYMBOL(dump_stack); |
296 | 294 | ||
297 | /* | 295 | /* |
298 | * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. | 296 | * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. |
299 | */ | 297 | */ |
300 | unsigned long thread_saved_pc(struct task_struct *tsk) | 298 | unsigned long thread_saved_pc(struct task_struct *tsk) |
301 | { | 299 | { |
302 | return task_thread_info(tsk)->kpc; | 300 | return task_thread_info(tsk)->kpc; |
303 | } | 301 | } |
304 | 302 | ||
305 | /* | 303 | /* |
306 | * Free current thread data structures etc.. | 304 | * Free current thread data structures etc.. |
307 | */ | 305 | */ |
308 | void exit_thread(void) | 306 | void exit_thread(void) |
309 | { | 307 | { |
310 | #ifndef CONFIG_SMP | 308 | #ifndef CONFIG_SMP |
311 | if(last_task_used_math == current) { | 309 | if(last_task_used_math == current) { |
312 | #else | 310 | #else |
313 | if (test_thread_flag(TIF_USEDFPU)) { | 311 | if (test_thread_flag(TIF_USEDFPU)) { |
314 | #endif | 312 | #endif |
315 | /* Keep process from leaving FPU in a bogon state. */ | 313 | /* Keep process from leaving FPU in a bogon state. */ |
316 | put_psr(get_psr() | PSR_EF); | 314 | put_psr(get_psr() | PSR_EF); |
317 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | 315 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, |
318 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | 316 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); |
319 | #ifndef CONFIG_SMP | 317 | #ifndef CONFIG_SMP |
320 | last_task_used_math = NULL; | 318 | last_task_used_math = NULL; |
321 | #else | 319 | #else |
322 | clear_thread_flag(TIF_USEDFPU); | 320 | clear_thread_flag(TIF_USEDFPU); |
323 | #endif | 321 | #endif |
324 | } | 322 | } |
325 | } | 323 | } |
326 | 324 | ||
327 | void flush_thread(void) | 325 | void flush_thread(void) |
328 | { | 326 | { |
329 | current_thread_info()->w_saved = 0; | 327 | current_thread_info()->w_saved = 0; |
330 | 328 | ||
331 | #ifndef CONFIG_SMP | 329 | #ifndef CONFIG_SMP |
332 | if(last_task_used_math == current) { | 330 | if(last_task_used_math == current) { |
333 | #else | 331 | #else |
334 | if (test_thread_flag(TIF_USEDFPU)) { | 332 | if (test_thread_flag(TIF_USEDFPU)) { |
335 | #endif | 333 | #endif |
336 | /* Clean the fpu. */ | 334 | /* Clean the fpu. */ |
337 | put_psr(get_psr() | PSR_EF); | 335 | put_psr(get_psr() | PSR_EF); |
338 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | 336 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, |
339 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | 337 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); |
340 | #ifndef CONFIG_SMP | 338 | #ifndef CONFIG_SMP |
341 | last_task_used_math = NULL; | 339 | last_task_used_math = NULL; |
342 | #else | 340 | #else |
343 | clear_thread_flag(TIF_USEDFPU); | 341 | clear_thread_flag(TIF_USEDFPU); |
344 | #endif | 342 | #endif |
345 | } | 343 | } |
346 | 344 | ||
347 | /* This task is no longer a kernel thread. */ | 345 | /* This task is no longer a kernel thread. */ |
348 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { | 346 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { |
349 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; | 347 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; |
350 | 348 | ||
351 | /* We must fixup kregs as well. */ | 349 | /* We must fixup kregs as well. */ |
352 | /* XXX This was not fixed for ti for a while, worked. Unused? */ | 350 | /* XXX This was not fixed for ti for a while, worked. Unused? */ |
353 | current->thread.kregs = (struct pt_regs *) | 351 | current->thread.kregs = (struct pt_regs *) |
354 | (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); | 352 | (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); |
355 | } | 353 | } |
356 | } | 354 | } |
357 | 355 | ||
358 | static inline struct sparc_stackf __user * | 356 | static inline struct sparc_stackf __user * |
359 | clone_stackframe(struct sparc_stackf __user *dst, | 357 | clone_stackframe(struct sparc_stackf __user *dst, |
360 | struct sparc_stackf __user *src) | 358 | struct sparc_stackf __user *src) |
361 | { | 359 | { |
362 | unsigned long size, fp; | 360 | unsigned long size, fp; |
363 | struct sparc_stackf *tmp; | 361 | struct sparc_stackf *tmp; |
364 | struct sparc_stackf __user *sp; | 362 | struct sparc_stackf __user *sp; |
365 | 363 | ||
366 | if (get_user(tmp, &src->fp)) | 364 | if (get_user(tmp, &src->fp)) |
367 | return NULL; | 365 | return NULL; |
368 | 366 | ||
369 | fp = (unsigned long) tmp; | 367 | fp = (unsigned long) tmp; |
370 | size = (fp - ((unsigned long) src)); | 368 | size = (fp - ((unsigned long) src)); |
371 | fp = (unsigned long) dst; | 369 | fp = (unsigned long) dst; |
372 | sp = (struct sparc_stackf __user *)(fp - size); | 370 | sp = (struct sparc_stackf __user *)(fp - size); |
373 | 371 | ||
374 | /* do_fork() grabs the parent semaphore, we must release it | 372 | /* do_fork() grabs the parent semaphore, we must release it |
375 | * temporarily so we can build the child clone stack frame | 373 | * temporarily so we can build the child clone stack frame |
376 | * without deadlocking. | 374 | * without deadlocking. |
377 | */ | 375 | */ |
378 | if (__copy_user(sp, src, size)) | 376 | if (__copy_user(sp, src, size)) |
379 | sp = NULL; | 377 | sp = NULL; |
380 | else if (put_user(fp, &sp->fp)) | 378 | else if (put_user(fp, &sp->fp)) |
381 | sp = NULL; | 379 | sp = NULL; |
382 | 380 | ||
383 | return sp; | 381 | return sp; |
384 | } | 382 | } |
385 | 383 | ||
386 | asmlinkage int sparc_do_fork(unsigned long clone_flags, | 384 | asmlinkage int sparc_do_fork(unsigned long clone_flags, |
387 | unsigned long stack_start, | 385 | unsigned long stack_start, |
388 | struct pt_regs *regs, | 386 | struct pt_regs *regs, |
389 | unsigned long stack_size) | 387 | unsigned long stack_size) |
390 | { | 388 | { |
391 | unsigned long parent_tid_ptr, child_tid_ptr; | 389 | unsigned long parent_tid_ptr, child_tid_ptr; |
392 | unsigned long orig_i1 = regs->u_regs[UREG_I1]; | 390 | unsigned long orig_i1 = regs->u_regs[UREG_I1]; |
393 | long ret; | 391 | long ret; |
394 | 392 | ||
395 | parent_tid_ptr = regs->u_regs[UREG_I2]; | 393 | parent_tid_ptr = regs->u_regs[UREG_I2]; |
396 | child_tid_ptr = regs->u_regs[UREG_I4]; | 394 | child_tid_ptr = regs->u_regs[UREG_I4]; |
397 | 395 | ||
398 | ret = do_fork(clone_flags, stack_start, | 396 | ret = do_fork(clone_flags, stack_start, |
399 | regs, stack_size, | 397 | regs, stack_size, |
400 | (int __user *) parent_tid_ptr, | 398 | (int __user *) parent_tid_ptr, |
401 | (int __user *) child_tid_ptr); | 399 | (int __user *) child_tid_ptr); |
402 | 400 | ||
403 | /* If we get an error and potentially restart the system | 401 | /* If we get an error and potentially restart the system |
404 | * call, we're screwed because copy_thread() clobbered | 402 | * call, we're screwed because copy_thread() clobbered |
405 | * the parent's %o1. So detect that case and restore it | 403 | * the parent's %o1. So detect that case and restore it |
406 | * here. | 404 | * here. |
407 | */ | 405 | */ |
408 | if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) | 406 | if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) |
409 | regs->u_regs[UREG_I1] = orig_i1; | 407 | regs->u_regs[UREG_I1] = orig_i1; |
410 | 408 | ||
411 | return ret; | 409 | return ret; |
412 | } | 410 | } |
413 | 411 | ||
414 | /* Copy a Sparc thread. The fork() return value conventions | 412 | /* Copy a Sparc thread. The fork() return value conventions |
415 | * under SunOS are nothing short of bletcherous: | 413 | * under SunOS are nothing short of bletcherous: |
416 | * Parent --> %o0 == childs pid, %o1 == 0 | 414 | * Parent --> %o0 == childs pid, %o1 == 0 |
417 | * Child --> %o0 == parents pid, %o1 == 1 | 415 | * Child --> %o0 == parents pid, %o1 == 1 |
418 | * | 416 | * |
419 | * NOTE: We have a separate fork kpsr/kwim because | 417 | * NOTE: We have a separate fork kpsr/kwim because |
420 | * the parent could change these values between | 418 | * the parent could change these values between |
421 | * sys_fork invocation and when we reach here | 419 | * sys_fork invocation and when we reach here |
422 | * if the parent should sleep while trying to | 420 | * if the parent should sleep while trying to |
423 | * allocate the task_struct and kernel stack in | 421 | * allocate the task_struct and kernel stack in |
424 | * do_fork(). | 422 | * do_fork(). |
425 | * XXX See comment above sys_vfork in sparc64. todo. | 423 | * XXX See comment above sys_vfork in sparc64. todo. |
426 | */ | 424 | */ |
427 | extern void ret_from_fork(void); | 425 | extern void ret_from_fork(void); |
428 | 426 | ||
429 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 427 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
430 | unsigned long unused, | 428 | unsigned long unused, |
431 | struct task_struct *p, struct pt_regs *regs) | 429 | struct task_struct *p, struct pt_regs *regs) |
432 | { | 430 | { |
433 | struct thread_info *ti = task_thread_info(p); | 431 | struct thread_info *ti = task_thread_info(p); |
434 | struct pt_regs *childregs; | 432 | struct pt_regs *childregs; |
435 | char *new_stack; | 433 | char *new_stack; |
436 | 434 | ||
437 | #ifndef CONFIG_SMP | 435 | #ifndef CONFIG_SMP |
438 | if(last_task_used_math == current) { | 436 | if(last_task_used_math == current) { |
439 | #else | 437 | #else |
440 | if (test_thread_flag(TIF_USEDFPU)) { | 438 | if (test_thread_flag(TIF_USEDFPU)) { |
441 | #endif | 439 | #endif |
442 | put_psr(get_psr() | PSR_EF); | 440 | put_psr(get_psr() | PSR_EF); |
443 | fpsave(&p->thread.float_regs[0], &p->thread.fsr, | 441 | fpsave(&p->thread.float_regs[0], &p->thread.fsr, |
444 | &p->thread.fpqueue[0], &p->thread.fpqdepth); | 442 | &p->thread.fpqueue[0], &p->thread.fpqdepth); |
445 | #ifdef CONFIG_SMP | 443 | #ifdef CONFIG_SMP |
446 | clear_thread_flag(TIF_USEDFPU); | 444 | clear_thread_flag(TIF_USEDFPU); |
447 | #endif | 445 | #endif |
448 | } | 446 | } |
449 | 447 | ||
450 | /* | 448 | /* |
451 | * p->thread_info new_stack childregs | 449 | * p->thread_info new_stack childregs |
452 | * ! ! ! {if(PSR_PS) } | 450 | * ! ! ! {if(PSR_PS) } |
453 | * V V (stk.fr.) V (pt_regs) { (stk.fr.) } | 451 | * V V (stk.fr.) V (pt_regs) { (stk.fr.) } |
454 | * +----- - - - - - ------+===========+============={+==========}+ | 452 | * +----- - - - - - ------+===========+============={+==========}+ |
455 | */ | 453 | */ |
456 | new_stack = task_stack_page(p) + THREAD_SIZE; | 454 | new_stack = task_stack_page(p) + THREAD_SIZE; |
457 | if (regs->psr & PSR_PS) | 455 | if (regs->psr & PSR_PS) |
458 | new_stack -= STACKFRAME_SZ; | 456 | new_stack -= STACKFRAME_SZ; |
459 | new_stack -= STACKFRAME_SZ + TRACEREG_SZ; | 457 | new_stack -= STACKFRAME_SZ + TRACEREG_SZ; |
460 | memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); | 458 | memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); |
461 | childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); | 459 | childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); |
462 | 460 | ||
463 | /* | 461 | /* |
464 | * A new process must start with interrupts closed in 2.5, | 462 | * A new process must start with interrupts closed in 2.5, |
465 | * because this is how Mingo's scheduler works (see schedule_tail | 463 | * because this is how Mingo's scheduler works (see schedule_tail |
466 | * and finish_arch_switch). If we do not do it, a timer interrupt hits | 464 | * and finish_arch_switch). If we do not do it, a timer interrupt hits |
467 | * before we unlock, attempts to re-take the rq->lock, and then we die. | 465 | * before we unlock, attempts to re-take the rq->lock, and then we die. |
468 | * Thus, kpsr|=PSR_PIL. | 466 | * Thus, kpsr|=PSR_PIL. |
469 | */ | 467 | */ |
470 | ti->ksp = (unsigned long) new_stack; | 468 | ti->ksp = (unsigned long) new_stack; |
471 | ti->kpc = (((unsigned long) ret_from_fork) - 0x8); | 469 | ti->kpc = (((unsigned long) ret_from_fork) - 0x8); |
472 | ti->kpsr = current->thread.fork_kpsr | PSR_PIL; | 470 | ti->kpsr = current->thread.fork_kpsr | PSR_PIL; |
473 | ti->kwim = current->thread.fork_kwim; | 471 | ti->kwim = current->thread.fork_kwim; |
474 | 472 | ||
475 | if(regs->psr & PSR_PS) { | 473 | if(regs->psr & PSR_PS) { |
476 | extern struct pt_regs fake_swapper_regs; | 474 | extern struct pt_regs fake_swapper_regs; |
477 | 475 | ||
478 | p->thread.kregs = &fake_swapper_regs; | 476 | p->thread.kregs = &fake_swapper_regs; |
479 | new_stack += STACKFRAME_SZ + TRACEREG_SZ; | 477 | new_stack += STACKFRAME_SZ + TRACEREG_SZ; |
480 | childregs->u_regs[UREG_FP] = (unsigned long) new_stack; | 478 | childregs->u_regs[UREG_FP] = (unsigned long) new_stack; |
481 | p->thread.flags |= SPARC_FLAG_KTHREAD; | 479 | p->thread.flags |= SPARC_FLAG_KTHREAD; |
482 | p->thread.current_ds = KERNEL_DS; | 480 | p->thread.current_ds = KERNEL_DS; |
483 | memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ); | 481 | memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ); |
484 | childregs->u_regs[UREG_G6] = (unsigned long) ti; | 482 | childregs->u_regs[UREG_G6] = (unsigned long) ti; |
485 | } else { | 483 | } else { |
486 | p->thread.kregs = childregs; | 484 | p->thread.kregs = childregs; |
487 | childregs->u_regs[UREG_FP] = sp; | 485 | childregs->u_regs[UREG_FP] = sp; |
488 | p->thread.flags &= ~SPARC_FLAG_KTHREAD; | 486 | p->thread.flags &= ~SPARC_FLAG_KTHREAD; |
489 | p->thread.current_ds = USER_DS; | 487 | p->thread.current_ds = USER_DS; |
490 | 488 | ||
491 | if (sp != regs->u_regs[UREG_FP]) { | 489 | if (sp != regs->u_regs[UREG_FP]) { |
492 | struct sparc_stackf __user *childstack; | 490 | struct sparc_stackf __user *childstack; |
493 | struct sparc_stackf __user *parentstack; | 491 | struct sparc_stackf __user *parentstack; |
494 | 492 | ||
495 | /* | 493 | /* |
496 | * This is a clone() call with supplied user stack. | 494 | * This is a clone() call with supplied user stack. |
497 | * Set some valid stack frames to give to the child. | 495 | * Set some valid stack frames to give to the child. |
498 | */ | 496 | */ |
499 | childstack = (struct sparc_stackf __user *) | 497 | childstack = (struct sparc_stackf __user *) |
500 | (sp & ~0xfUL); | 498 | (sp & ~0xfUL); |
501 | parentstack = (struct sparc_stackf __user *) | 499 | parentstack = (struct sparc_stackf __user *) |
502 | regs->u_regs[UREG_FP]; | 500 | regs->u_regs[UREG_FP]; |
503 | 501 | ||
504 | #if 0 | 502 | #if 0 |
505 | printk("clone: parent stack:\n"); | 503 | printk("clone: parent stack:\n"); |
506 | show_stackframe(parentstack); | 504 | show_stackframe(parentstack); |
507 | #endif | 505 | #endif |
508 | 506 | ||
509 | childstack = clone_stackframe(childstack, parentstack); | 507 | childstack = clone_stackframe(childstack, parentstack); |
510 | if (!childstack) | 508 | if (!childstack) |
511 | return -EFAULT; | 509 | return -EFAULT; |
512 | 510 | ||
513 | #if 0 | 511 | #if 0 |
514 | printk("clone: child stack:\n"); | 512 | printk("clone: child stack:\n"); |
515 | show_stackframe(childstack); | 513 | show_stackframe(childstack); |
516 | #endif | 514 | #endif |
517 | 515 | ||
518 | childregs->u_regs[UREG_FP] = (unsigned long)childstack; | 516 | childregs->u_regs[UREG_FP] = (unsigned long)childstack; |
519 | } | 517 | } |
520 | } | 518 | } |
521 | 519 | ||
522 | #ifdef CONFIG_SMP | 520 | #ifdef CONFIG_SMP |
523 | /* FPU must be disabled on SMP. */ | 521 | /* FPU must be disabled on SMP. */ |
524 | childregs->psr &= ~PSR_EF; | 522 | childregs->psr &= ~PSR_EF; |
525 | #endif | 523 | #endif |
526 | 524 | ||
527 | /* Set the return value for the child. */ | 525 | /* Set the return value for the child. */ |
528 | childregs->u_regs[UREG_I0] = current->pid; | 526 | childregs->u_regs[UREG_I0] = current->pid; |
529 | childregs->u_regs[UREG_I1] = 1; | 527 | childregs->u_regs[UREG_I1] = 1; |
530 | 528 | ||
531 | /* Set the return value for the parent. */ | 529 | /* Set the return value for the parent. */ |
532 | regs->u_regs[UREG_I1] = 0; | 530 | regs->u_regs[UREG_I1] = 0; |
533 | 531 | ||
534 | if (clone_flags & CLONE_SETTLS) | 532 | if (clone_flags & CLONE_SETTLS) |
535 | childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; | 533 | childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; |
536 | 534 | ||
537 | return 0; | 535 | return 0; |
538 | } | 536 | } |
539 | 537 | ||
540 | /* | 538 | /* |
541 | * fill in the fpu structure for a core dump. | 539 | * fill in the fpu structure for a core dump. |
542 | */ | 540 | */ |
543 | int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) | 541 | int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) |
544 | { | 542 | { |
545 | if (used_math()) { | 543 | if (used_math()) { |
546 | memset(fpregs, 0, sizeof(*fpregs)); | 544 | memset(fpregs, 0, sizeof(*fpregs)); |
547 | fpregs->pr_q_entrysize = 8; | 545 | fpregs->pr_q_entrysize = 8; |
548 | return 1; | 546 | return 1; |
549 | } | 547 | } |
550 | #ifdef CONFIG_SMP | 548 | #ifdef CONFIG_SMP |
551 | if (test_thread_flag(TIF_USEDFPU)) { | 549 | if (test_thread_flag(TIF_USEDFPU)) { |
552 | put_psr(get_psr() | PSR_EF); | 550 | put_psr(get_psr() | PSR_EF); |
553 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | 551 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, |
554 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | 552 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); |
555 | if (regs != NULL) { | 553 | if (regs != NULL) { |
556 | regs->psr &= ~(PSR_EF); | 554 | regs->psr &= ~(PSR_EF); |
557 | clear_thread_flag(TIF_USEDFPU); | 555 | clear_thread_flag(TIF_USEDFPU); |
558 | } | 556 | } |
559 | } | 557 | } |
560 | #else | 558 | #else |
561 | if (current == last_task_used_math) { | 559 | if (current == last_task_used_math) { |
562 | put_psr(get_psr() | PSR_EF); | 560 | put_psr(get_psr() | PSR_EF); |
563 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | 561 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, |
564 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | 562 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); |
565 | if (regs != NULL) { | 563 | if (regs != NULL) { |
566 | regs->psr &= ~(PSR_EF); | 564 | regs->psr &= ~(PSR_EF); |
567 | last_task_used_math = NULL; | 565 | last_task_used_math = NULL; |
568 | } | 566 | } |
569 | } | 567 | } |
570 | #endif | 568 | #endif |
571 | memcpy(&fpregs->pr_fr.pr_regs[0], | 569 | memcpy(&fpregs->pr_fr.pr_regs[0], |
572 | ¤t->thread.float_regs[0], | 570 | ¤t->thread.float_regs[0], |
573 | (sizeof(unsigned long) * 32)); | 571 | (sizeof(unsigned long) * 32)); |
574 | fpregs->pr_fsr = current->thread.fsr; | 572 | fpregs->pr_fsr = current->thread.fsr; |
575 | fpregs->pr_qcnt = current->thread.fpqdepth; | 573 | fpregs->pr_qcnt = current->thread.fpqdepth; |
576 | fpregs->pr_q_entrysize = 8; | 574 | fpregs->pr_q_entrysize = 8; |
577 | fpregs->pr_en = 1; | 575 | fpregs->pr_en = 1; |
578 | if(fpregs->pr_qcnt != 0) { | 576 | if(fpregs->pr_qcnt != 0) { |
579 | memcpy(&fpregs->pr_q[0], | 577 | memcpy(&fpregs->pr_q[0], |
580 | ¤t->thread.fpqueue[0], | 578 | ¤t->thread.fpqueue[0], |
581 | sizeof(struct fpq) * fpregs->pr_qcnt); | 579 | sizeof(struct fpq) * fpregs->pr_qcnt); |
582 | } | 580 | } |
583 | /* Zero out the rest. */ | 581 | /* Zero out the rest. */ |
584 | memset(&fpregs->pr_q[fpregs->pr_qcnt], 0, | 582 | memset(&fpregs->pr_q[fpregs->pr_qcnt], 0, |
585 | sizeof(struct fpq) * (32 - fpregs->pr_qcnt)); | 583 | sizeof(struct fpq) * (32 - fpregs->pr_qcnt)); |
586 | return 1; | 584 | return 1; |
587 | } | 585 | } |
588 | 586 | ||
589 | /* | 587 | /* |
590 | * sparc_execve() executes a new program after the asm stub has set | 588 | * sparc_execve() executes a new program after the asm stub has set |
591 | * things up for us. This should basically do what I want it to. | 589 | * things up for us. This should basically do what I want it to. |
592 | */ | 590 | */ |
593 | asmlinkage int sparc_execve(struct pt_regs *regs) | 591 | asmlinkage int sparc_execve(struct pt_regs *regs) |
594 | { | 592 | { |
595 | int error, base = 0; | 593 | int error, base = 0; |
596 | char *filename; | 594 | char *filename; |
597 | 595 | ||
598 | /* Check for indirect call. */ | 596 | /* Check for indirect call. */ |
599 | if(regs->u_regs[UREG_G1] == 0) | 597 | if(regs->u_regs[UREG_G1] == 0) |
600 | base = 1; | 598 | base = 1; |
601 | 599 | ||
602 | filename = getname((char __user *)regs->u_regs[base + UREG_I0]); | 600 | filename = getname((char __user *)regs->u_regs[base + UREG_I0]); |
603 | error = PTR_ERR(filename); | 601 | error = PTR_ERR(filename); |
604 | if(IS_ERR(filename)) | 602 | if(IS_ERR(filename)) |
605 | goto out; | 603 | goto out; |
606 | error = do_execve(filename, | 604 | error = do_execve(filename, |
607 | (const char __user *const __user *) | 605 | (const char __user *const __user *) |
608 | regs->u_regs[base + UREG_I1], | 606 | regs->u_regs[base + UREG_I1], |
609 | (const char __user *const __user *) | 607 | (const char __user *const __user *) |
610 | regs->u_regs[base + UREG_I2], | 608 | regs->u_regs[base + UREG_I2], |
611 | regs); | 609 | regs); |
612 | putname(filename); | 610 | putname(filename); |
613 | out: | 611 | out: |
614 | return error; | 612 | return error; |
615 | } | 613 | } |
616 | 614 | ||
617 | /* | 615 | /* |
618 | * This is the mechanism for creating a new kernel thread. | 616 | * This is the mechanism for creating a new kernel thread. |
619 | * | 617 | * |
620 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants | 618 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants |
621 | * who haven't done an "execve()") should use this: it will work within | 619 | * who haven't done an "execve()") should use this: it will work within |
622 | * a system call from a "real" process, but the process memory space will | 620 | * a system call from a "real" process, but the process memory space will |
623 | * not be freed until both the parent and the child have exited. | 621 | * not be freed until both the parent and the child have exited. |
624 | */ | 622 | */ |
625 | pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | 623 | pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
626 | { | 624 | { |
627 | long retval; | 625 | long retval; |
628 | 626 | ||
629 | __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */ | 627 | __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */ |
630 | "mov %5, %%g3\n\t" /* and arg. */ | 628 | "mov %5, %%g3\n\t" /* and arg. */ |
631 | "mov %1, %%g1\n\t" | 629 | "mov %1, %%g1\n\t" |
632 | "mov %2, %%o0\n\t" /* Clone flags. */ | 630 | "mov %2, %%o0\n\t" /* Clone flags. */ |
633 | "mov 0, %%o1\n\t" /* usp arg == 0 */ | 631 | "mov 0, %%o1\n\t" /* usp arg == 0 */ |
634 | "t 0x10\n\t" /* Linux/Sparc clone(). */ | 632 | "t 0x10\n\t" /* Linux/Sparc clone(). */ |
635 | "cmp %%o1, 0\n\t" | 633 | "cmp %%o1, 0\n\t" |
636 | "be 1f\n\t" /* The parent, just return. */ | 634 | "be 1f\n\t" /* The parent, just return. */ |
637 | " nop\n\t" /* Delay slot. */ | 635 | " nop\n\t" /* Delay slot. */ |
638 | "jmpl %%g2, %%o7\n\t" /* Call the function. */ | 636 | "jmpl %%g2, %%o7\n\t" /* Call the function. */ |
639 | " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */ | 637 | " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */ |
640 | "mov %3, %%g1\n\t" | 638 | "mov %3, %%g1\n\t" |
641 | "t 0x10\n\t" /* Linux/Sparc exit(). */ | 639 | "t 0x10\n\t" /* Linux/Sparc exit(). */ |
642 | /* Notreached by child. */ | 640 | /* Notreached by child. */ |
643 | "1: mov %%o0, %0\n\t" : | 641 | "1: mov %%o0, %0\n\t" : |
644 | "=r" (retval) : | 642 | "=r" (retval) : |
645 | "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), | 643 | "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), |
646 | "i" (__NR_exit), "r" (fn), "r" (arg) : | 644 | "i" (__NR_exit), "r" (fn), "r" (arg) : |
647 | "g1", "g2", "g3", "o0", "o1", "memory", "cc"); | 645 | "g1", "g2", "g3", "o0", "o1", "memory", "cc"); |
648 | return retval; | 646 | return retval; |
649 | } | 647 | } |
650 | EXPORT_SYMBOL(kernel_thread); | 648 | EXPORT_SYMBOL(kernel_thread); |
651 | 649 | ||
652 | unsigned long get_wchan(struct task_struct *task) | 650 | unsigned long get_wchan(struct task_struct *task) |
653 | { | 651 | { |
654 | unsigned long pc, fp, bias = 0; | 652 | unsigned long pc, fp, bias = 0; |
655 | unsigned long task_base = (unsigned long) task; | 653 | unsigned long task_base = (unsigned long) task; |
656 | unsigned long ret = 0; | 654 | unsigned long ret = 0; |
657 | struct reg_window32 *rw; | 655 | struct reg_window32 *rw; |
658 | int count = 0; | 656 | int count = 0; |
659 | 657 | ||
660 | if (!task || task == current || | 658 | if (!task || task == current || |
661 | task->state == TASK_RUNNING) | 659 | task->state == TASK_RUNNING) |
662 | goto out; | 660 | goto out; |
663 | 661 | ||
664 | fp = task_thread_info(task)->ksp + bias; | 662 | fp = task_thread_info(task)->ksp + bias; |
665 | do { | 663 | do { |
666 | /* Bogus frame pointer? */ | 664 | /* Bogus frame pointer? */ |
667 | if (fp < (task_base + sizeof(struct thread_info)) || | 665 | if (fp < (task_base + sizeof(struct thread_info)) || |
668 | fp >= (task_base + (2 * PAGE_SIZE))) | 666 | fp >= (task_base + (2 * PAGE_SIZE))) |
669 | break; | 667 | break; |
670 | rw = (struct reg_window32 *) fp; | 668 | rw = (struct reg_window32 *) fp; |
671 | pc = rw->ins[7]; | 669 | pc = rw->ins[7]; |
672 | if (!in_sched_functions(pc)) { | 670 | if (!in_sched_functions(pc)) { |
673 | ret = pc; | 671 | ret = pc; |
674 | goto out; | 672 | goto out; |
675 | } | 673 | } |
676 | fp = rw->ins[6] + bias; | 674 | fp = rw->ins[6] + bias; |
677 | } while (++count < 16); | 675 | } while (++count < 16); |
678 | 676 | ||
679 | out: | 677 | out: |
680 | return ret; | 678 | return ret; |
681 | } | 679 | } |
682 | 680 | ||
683 | 681 |
arch/sparc/mm/init_32.c
1 | /* | 1 | /* |
2 | * linux/arch/sparc/mm/init.c | 2 | * linux/arch/sparc/mm/init.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
5 | * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be) | 5 | * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 6 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
7 | * Copyright (C) 2000 Anton Blanchard (anton@samba.org) | 7 | * Copyright (C) 2000 Anton Blanchard (anton@samba.org) |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/signal.h> | 11 | #include <linux/signal.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/mman.h> | 18 | #include <linux/mman.h> |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/swap.h> | 20 | #include <linux/swap.h> |
21 | #include <linux/initrd.h> | 21 | #include <linux/initrd.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/bootmem.h> | 24 | #include <linux/bootmem.h> |
25 | #include <linux/pagemap.h> | 25 | #include <linux/pagemap.h> |
26 | #include <linux/poison.h> | 26 | #include <linux/poison.h> |
27 | #include <linux/gfp.h> | 27 | #include <linux/gfp.h> |
28 | 28 | ||
29 | #include <asm/sections.h> | 29 | #include <asm/sections.h> |
30 | #include <asm/page.h> | 30 | #include <asm/page.h> |
31 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
32 | #include <asm/vaddrs.h> | 32 | #include <asm/vaddrs.h> |
33 | #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ | 33 | #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ |
34 | #include <asm/tlb.h> | 34 | #include <asm/tlb.h> |
35 | #include <asm/prom.h> | 35 | #include <asm/prom.h> |
36 | #include <asm/leon.h> | 36 | #include <asm/leon.h> |
37 | 37 | ||
38 | unsigned long *sparc_valid_addr_bitmap; | 38 | unsigned long *sparc_valid_addr_bitmap; |
39 | EXPORT_SYMBOL(sparc_valid_addr_bitmap); | 39 | EXPORT_SYMBOL(sparc_valid_addr_bitmap); |
40 | 40 | ||
41 | unsigned long phys_base; | 41 | unsigned long phys_base; |
42 | EXPORT_SYMBOL(phys_base); | 42 | EXPORT_SYMBOL(phys_base); |
43 | 43 | ||
44 | unsigned long pfn_base; | 44 | unsigned long pfn_base; |
45 | EXPORT_SYMBOL(pfn_base); | 45 | EXPORT_SYMBOL(pfn_base); |
46 | 46 | ||
47 | unsigned long page_kernel; | 47 | unsigned long page_kernel; |
48 | EXPORT_SYMBOL(page_kernel); | 48 | EXPORT_SYMBOL(page_kernel); |
49 | 49 | ||
50 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; | 50 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; |
51 | unsigned long sparc_unmapped_base; | 51 | unsigned long sparc_unmapped_base; |
52 | 52 | ||
53 | struct pgtable_cache_struct pgt_quicklists; | 53 | struct pgtable_cache_struct pgt_quicklists; |
54 | 54 | ||
55 | /* Initial ramdisk setup */ | 55 | /* Initial ramdisk setup */ |
56 | extern unsigned int sparc_ramdisk_image; | 56 | extern unsigned int sparc_ramdisk_image; |
57 | extern unsigned int sparc_ramdisk_size; | 57 | extern unsigned int sparc_ramdisk_size; |
58 | 58 | ||
59 | unsigned long highstart_pfn, highend_pfn; | 59 | unsigned long highstart_pfn, highend_pfn; |
60 | 60 | ||
61 | pte_t *kmap_pte; | 61 | pte_t *kmap_pte; |
62 | pgprot_t kmap_prot; | 62 | pgprot_t kmap_prot; |
63 | 63 | ||
64 | #define kmap_get_fixmap_pte(vaddr) \ | 64 | #define kmap_get_fixmap_pte(vaddr) \ |
65 | pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) | 65 | pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) |
66 | 66 | ||
67 | void __init kmap_init(void) | 67 | void __init kmap_init(void) |
68 | { | 68 | { |
69 | /* cache the first kmap pte */ | 69 | /* cache the first kmap pte */ |
70 | kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); | 70 | kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); |
71 | kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); | 71 | kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); |
72 | } | 72 | } |
73 | 73 | ||
74 | void show_mem(unsigned int filter) | 74 | void show_mem(unsigned int filter) |
75 | { | 75 | { |
76 | printk("Mem-info:\n"); | 76 | printk("Mem-info:\n"); |
77 | show_free_areas(filter); | 77 | show_free_areas(filter); |
78 | printk("Free swap: %6ldkB\n", | 78 | printk("Free swap: %6ldkB\n", |
79 | nr_swap_pages << (PAGE_SHIFT-10)); | 79 | nr_swap_pages << (PAGE_SHIFT-10)); |
80 | printk("%ld pages of RAM\n", totalram_pages); | 80 | printk("%ld pages of RAM\n", totalram_pages); |
81 | printk("%ld free pages\n", nr_free_pages()); | 81 | printk("%ld free pages\n", nr_free_pages()); |
82 | #if 0 /* undefined pgtable_cache_size, pgd_cache_size */ | 82 | #if 0 /* undefined pgtable_cache_size, pgd_cache_size */ |
83 | printk("%ld pages in page table cache\n",pgtable_cache_size); | 83 | printk("%ld pages in page table cache\n",pgtable_cache_size); |
84 | #ifndef CONFIG_SMP | 84 | #ifndef CONFIG_SMP |
85 | if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) | 85 | if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) |
86 | printk("%ld entries in page dir cache\n",pgd_cache_size); | 86 | printk("%ld entries in page dir cache\n",pgd_cache_size); |
87 | #endif | 87 | #endif |
88 | #endif | 88 | #endif |
89 | } | 89 | } |
90 | 90 | ||
91 | void __init sparc_context_init(int numctx) | 91 | void __init sparc_context_init(int numctx) |
92 | { | 92 | { |
93 | int ctx; | 93 | int ctx; |
94 | 94 | ||
95 | ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); | 95 | ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); |
96 | 96 | ||
97 | for(ctx = 0; ctx < numctx; ctx++) { | 97 | for(ctx = 0; ctx < numctx; ctx++) { |
98 | struct ctx_list *clist; | 98 | struct ctx_list *clist; |
99 | 99 | ||
100 | clist = (ctx_list_pool + ctx); | 100 | clist = (ctx_list_pool + ctx); |
101 | clist->ctx_number = ctx; | 101 | clist->ctx_number = ctx; |
102 | clist->ctx_mm = NULL; | 102 | clist->ctx_mm = NULL; |
103 | } | 103 | } |
104 | ctx_free.next = ctx_free.prev = &ctx_free; | 104 | ctx_free.next = ctx_free.prev = &ctx_free; |
105 | ctx_used.next = ctx_used.prev = &ctx_used; | 105 | ctx_used.next = ctx_used.prev = &ctx_used; |
106 | for(ctx = 0; ctx < numctx; ctx++) | 106 | for(ctx = 0; ctx < numctx; ctx++) |
107 | add_to_free_ctxlist(ctx_list_pool + ctx); | 107 | add_to_free_ctxlist(ctx_list_pool + ctx); |
108 | } | 108 | } |
109 | 109 | ||
110 | extern unsigned long cmdline_memory_size; | 110 | extern unsigned long cmdline_memory_size; |
111 | unsigned long last_valid_pfn; | 111 | unsigned long last_valid_pfn; |
112 | 112 | ||
113 | unsigned long calc_highpages(void) | 113 | unsigned long calc_highpages(void) |
114 | { | 114 | { |
115 | int i; | 115 | int i; |
116 | int nr = 0; | 116 | int nr = 0; |
117 | 117 | ||
118 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 118 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
119 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | 119 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; |
120 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | 120 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; |
121 | 121 | ||
122 | if (end_pfn <= max_low_pfn) | 122 | if (end_pfn <= max_low_pfn) |
123 | continue; | 123 | continue; |
124 | 124 | ||
125 | if (start_pfn < max_low_pfn) | 125 | if (start_pfn < max_low_pfn) |
126 | start_pfn = max_low_pfn; | 126 | start_pfn = max_low_pfn; |
127 | 127 | ||
128 | nr += end_pfn - start_pfn; | 128 | nr += end_pfn - start_pfn; |
129 | } | 129 | } |
130 | 130 | ||
131 | return nr; | 131 | return nr; |
132 | } | 132 | } |
133 | 133 | ||
134 | static unsigned long calc_max_low_pfn(void) | 134 | static unsigned long calc_max_low_pfn(void) |
135 | { | 135 | { |
136 | int i; | 136 | int i; |
137 | unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | 137 | unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); |
138 | unsigned long curr_pfn, last_pfn; | 138 | unsigned long curr_pfn, last_pfn; |
139 | 139 | ||
140 | last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; | 140 | last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; |
141 | for (i = 1; sp_banks[i].num_bytes != 0; i++) { | 141 | for (i = 1; sp_banks[i].num_bytes != 0; i++) { |
142 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | 142 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; |
143 | 143 | ||
144 | if (curr_pfn >= tmp) { | 144 | if (curr_pfn >= tmp) { |
145 | if (last_pfn < tmp) | 145 | if (last_pfn < tmp) |
146 | tmp = last_pfn; | 146 | tmp = last_pfn; |
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | 149 | ||
150 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | 150 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; |
151 | } | 151 | } |
152 | 152 | ||
153 | return tmp; | 153 | return tmp; |
154 | } | 154 | } |
155 | 155 | ||
156 | unsigned long __init bootmem_init(unsigned long *pages_avail) | 156 | unsigned long __init bootmem_init(unsigned long *pages_avail) |
157 | { | 157 | { |
158 | unsigned long bootmap_size, start_pfn; | 158 | unsigned long bootmap_size, start_pfn; |
159 | unsigned long end_of_phys_memory = 0UL; | 159 | unsigned long end_of_phys_memory = 0UL; |
160 | unsigned long bootmap_pfn, bytes_avail, size; | 160 | unsigned long bootmap_pfn, bytes_avail, size; |
161 | int i; | 161 | int i; |
162 | 162 | ||
163 | bytes_avail = 0UL; | 163 | bytes_avail = 0UL; |
164 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 164 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
165 | end_of_phys_memory = sp_banks[i].base_addr + | 165 | end_of_phys_memory = sp_banks[i].base_addr + |
166 | sp_banks[i].num_bytes; | 166 | sp_banks[i].num_bytes; |
167 | bytes_avail += sp_banks[i].num_bytes; | 167 | bytes_avail += sp_banks[i].num_bytes; |
168 | if (cmdline_memory_size) { | 168 | if (cmdline_memory_size) { |
169 | if (bytes_avail > cmdline_memory_size) { | 169 | if (bytes_avail > cmdline_memory_size) { |
170 | unsigned long slack = bytes_avail - cmdline_memory_size; | 170 | unsigned long slack = bytes_avail - cmdline_memory_size; |
171 | 171 | ||
172 | bytes_avail -= slack; | 172 | bytes_avail -= slack; |
173 | end_of_phys_memory -= slack; | 173 | end_of_phys_memory -= slack; |
174 | 174 | ||
175 | sp_banks[i].num_bytes -= slack; | 175 | sp_banks[i].num_bytes -= slack; |
176 | if (sp_banks[i].num_bytes == 0) { | 176 | if (sp_banks[i].num_bytes == 0) { |
177 | sp_banks[i].base_addr = 0xdeadbeef; | 177 | sp_banks[i].base_addr = 0xdeadbeef; |
178 | } else { | 178 | } else { |
179 | sp_banks[i+1].num_bytes = 0; | 179 | sp_banks[i+1].num_bytes = 0; |
180 | sp_banks[i+1].base_addr = 0xdeadbeef; | 180 | sp_banks[i+1].base_addr = 0xdeadbeef; |
181 | } | 181 | } |
182 | break; | 182 | break; |
183 | } | 183 | } |
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
187 | /* Start with page aligned address of last symbol in kernel | 187 | /* Start with page aligned address of last symbol in kernel |
188 | * image. | 188 | * image. |
189 | */ | 189 | */ |
190 | start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); | 190 | start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); |
191 | 191 | ||
192 | /* Now shift down to get the real physical page frame number. */ | 192 | /* Now shift down to get the real physical page frame number. */ |
193 | start_pfn >>= PAGE_SHIFT; | 193 | start_pfn >>= PAGE_SHIFT; |
194 | 194 | ||
195 | bootmap_pfn = start_pfn; | 195 | bootmap_pfn = start_pfn; |
196 | 196 | ||
197 | max_pfn = end_of_phys_memory >> PAGE_SHIFT; | 197 | max_pfn = end_of_phys_memory >> PAGE_SHIFT; |
198 | 198 | ||
199 | max_low_pfn = max_pfn; | 199 | max_low_pfn = max_pfn; |
200 | highstart_pfn = highend_pfn = max_pfn; | 200 | highstart_pfn = highend_pfn = max_pfn; |
201 | 201 | ||
202 | if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { | 202 | if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { |
203 | highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | 203 | highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); |
204 | max_low_pfn = calc_max_low_pfn(); | 204 | max_low_pfn = calc_max_low_pfn(); |
205 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 205 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
206 | calc_highpages() >> (20 - PAGE_SHIFT)); | 206 | calc_highpages() >> (20 - PAGE_SHIFT)); |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_BLK_DEV_INITRD | 209 | #ifdef CONFIG_BLK_DEV_INITRD |
210 | /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ | 210 | /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ |
211 | if (sparc_ramdisk_image) { | 211 | if (sparc_ramdisk_image) { |
212 | if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) | 212 | if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) |
213 | sparc_ramdisk_image -= KERNBASE; | 213 | sparc_ramdisk_image -= KERNBASE; |
214 | initrd_start = sparc_ramdisk_image + phys_base; | 214 | initrd_start = sparc_ramdisk_image + phys_base; |
215 | initrd_end = initrd_start + sparc_ramdisk_size; | 215 | initrd_end = initrd_start + sparc_ramdisk_size; |
216 | if (initrd_end > end_of_phys_memory) { | 216 | if (initrd_end > end_of_phys_memory) { |
217 | printk(KERN_CRIT "initrd extends beyond end of memory " | 217 | printk(KERN_CRIT "initrd extends beyond end of memory " |
218 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | 218 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", |
219 | initrd_end, end_of_phys_memory); | 219 | initrd_end, end_of_phys_memory); |
220 | initrd_start = 0; | 220 | initrd_start = 0; |
221 | } | 221 | } |
222 | if (initrd_start) { | 222 | if (initrd_start) { |
223 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | 223 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && |
224 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | 224 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) |
225 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | 225 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; |
226 | } | 226 | } |
227 | } | 227 | } |
228 | #endif | 228 | #endif |
229 | /* Initialize the boot-time allocator. */ | 229 | /* Initialize the boot-time allocator. */ |
230 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, | 230 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, |
231 | max_low_pfn); | 231 | max_low_pfn); |
232 | 232 | ||
233 | /* Now register the available physical memory with the | 233 | /* Now register the available physical memory with the |
234 | * allocator. | 234 | * allocator. |
235 | */ | 235 | */ |
236 | *pages_avail = 0; | 236 | *pages_avail = 0; |
237 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 237 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
238 | unsigned long curr_pfn, last_pfn; | 238 | unsigned long curr_pfn, last_pfn; |
239 | 239 | ||
240 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | 240 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; |
241 | if (curr_pfn >= max_low_pfn) | 241 | if (curr_pfn >= max_low_pfn) |
242 | break; | 242 | break; |
243 | 243 | ||
244 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | 244 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; |
245 | if (last_pfn > max_low_pfn) | 245 | if (last_pfn > max_low_pfn) |
246 | last_pfn = max_low_pfn; | 246 | last_pfn = max_low_pfn; |
247 | 247 | ||
248 | /* | 248 | /* |
249 | * .. finally, did all the rounding and playing | 249 | * .. finally, did all the rounding and playing |
250 | * around just make the area go away? | 250 | * around just make the area go away? |
251 | */ | 251 | */ |
252 | if (last_pfn <= curr_pfn) | 252 | if (last_pfn <= curr_pfn) |
253 | continue; | 253 | continue; |
254 | 254 | ||
255 | size = (last_pfn - curr_pfn) << PAGE_SHIFT; | 255 | size = (last_pfn - curr_pfn) << PAGE_SHIFT; |
256 | *pages_avail += last_pfn - curr_pfn; | 256 | *pages_avail += last_pfn - curr_pfn; |
257 | 257 | ||
258 | free_bootmem(sp_banks[i].base_addr, size); | 258 | free_bootmem(sp_banks[i].base_addr, size); |
259 | } | 259 | } |
260 | 260 | ||
261 | #ifdef CONFIG_BLK_DEV_INITRD | 261 | #ifdef CONFIG_BLK_DEV_INITRD |
262 | if (initrd_start) { | 262 | if (initrd_start) { |
263 | /* Reserve the initrd image area. */ | 263 | /* Reserve the initrd image area. */ |
264 | size = initrd_end - initrd_start; | 264 | size = initrd_end - initrd_start; |
265 | reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT); | 265 | reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT); |
266 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | 266 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; |
267 | 267 | ||
268 | initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; | 268 | initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; |
269 | initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; | 269 | initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; |
270 | } | 270 | } |
271 | #endif | 271 | #endif |
272 | /* Reserve the kernel text/data/bss. */ | 272 | /* Reserve the kernel text/data/bss. */ |
273 | size = (start_pfn << PAGE_SHIFT) - phys_base; | 273 | size = (start_pfn << PAGE_SHIFT) - phys_base; |
274 | reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT); | 274 | reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT); |
275 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | 275 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; |
276 | 276 | ||
277 | /* Reserve the bootmem map. We do not account for it | 277 | /* Reserve the bootmem map. We do not account for it |
278 | * in pages_avail because we will release that memory | 278 | * in pages_avail because we will release that memory |
279 | * in free_all_bootmem. | 279 | * in free_all_bootmem. |
280 | */ | 280 | */ |
281 | size = bootmap_size; | 281 | size = bootmap_size; |
282 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); | 282 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); |
283 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | 283 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; |
284 | 284 | ||
285 | return max_pfn; | 285 | return max_pfn; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * check_pgt_cache | ||
290 | * | ||
291 | * This is called at the end of unmapping of VMA (zap_page_range), | ||
292 | * to rescan the page cache for architecture specific things. | ||
293 | * Most architectures define check_pgt_cache empty. | ||
294 | * | ||
295 | * We simply copy the 2.4 implementation for now. | ||
296 | */ | ||
297 | static int pgt_cache_water[2] = { 25, 50 }; | ||
298 | |||
299 | void check_pgt_cache(void) | ||
300 | { | ||
301 | do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]); | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * paging_init() sets up the page tables: We call the MMU specific | 289 | * paging_init() sets up the page tables: We call the MMU specific |
306 | * init routine based upon the Sun model type on the Sparc. | 290 | * init routine based upon the Sun model type on the Sparc. |
307 | * | 291 | * |
308 | */ | 292 | */ |
309 | extern void srmmu_paging_init(void); | 293 | extern void srmmu_paging_init(void); |
310 | extern void device_scan(void); | 294 | extern void device_scan(void); |
311 | 295 | ||
312 | pgprot_t PAGE_SHARED __read_mostly; | 296 | pgprot_t PAGE_SHARED __read_mostly; |
313 | EXPORT_SYMBOL(PAGE_SHARED); | 297 | EXPORT_SYMBOL(PAGE_SHARED); |
314 | 298 | ||
315 | void __init paging_init(void) | 299 | void __init paging_init(void) |
316 | { | 300 | { |
317 | switch(sparc_cpu_model) { | 301 | switch(sparc_cpu_model) { |
318 | case sparc_leon: | 302 | case sparc_leon: |
319 | leon_init(); | 303 | leon_init(); |
320 | /* fall through */ | 304 | /* fall through */ |
321 | case sun4m: | 305 | case sun4m: |
322 | case sun4d: | 306 | case sun4d: |
323 | srmmu_paging_init(); | 307 | srmmu_paging_init(); |
324 | sparc_unmapped_base = 0x50000000; | 308 | sparc_unmapped_base = 0x50000000; |
325 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); | 309 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); |
326 | break; | 310 | break; |
327 | default: | 311 | default: |
328 | prom_printf("paging_init: Cannot init paging on this Sparc\n"); | 312 | prom_printf("paging_init: Cannot init paging on this Sparc\n"); |
329 | prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); | 313 | prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); |
330 | prom_printf("paging_init: Halting...\n"); | 314 | prom_printf("paging_init: Halting...\n"); |
331 | prom_halt(); | 315 | prom_halt(); |
332 | } | 316 | } |
333 | 317 | ||
334 | /* Initialize the protection map with non-constant, MMU dependent values. */ | 318 | /* Initialize the protection map with non-constant, MMU dependent values. */ |
335 | protection_map[0] = PAGE_NONE; | 319 | protection_map[0] = PAGE_NONE; |
336 | protection_map[1] = PAGE_READONLY; | 320 | protection_map[1] = PAGE_READONLY; |
337 | protection_map[2] = PAGE_COPY; | 321 | protection_map[2] = PAGE_COPY; |
338 | protection_map[3] = PAGE_COPY; | 322 | protection_map[3] = PAGE_COPY; |
339 | protection_map[4] = PAGE_READONLY; | 323 | protection_map[4] = PAGE_READONLY; |
340 | protection_map[5] = PAGE_READONLY; | 324 | protection_map[5] = PAGE_READONLY; |
341 | protection_map[6] = PAGE_COPY; | 325 | protection_map[6] = PAGE_COPY; |
342 | protection_map[7] = PAGE_COPY; | 326 | protection_map[7] = PAGE_COPY; |
343 | protection_map[8] = PAGE_NONE; | 327 | protection_map[8] = PAGE_NONE; |
344 | protection_map[9] = PAGE_READONLY; | 328 | protection_map[9] = PAGE_READONLY; |
345 | protection_map[10] = PAGE_SHARED; | 329 | protection_map[10] = PAGE_SHARED; |
346 | protection_map[11] = PAGE_SHARED; | 330 | protection_map[11] = PAGE_SHARED; |
347 | protection_map[12] = PAGE_READONLY; | 331 | protection_map[12] = PAGE_READONLY; |
348 | protection_map[13] = PAGE_READONLY; | 332 | protection_map[13] = PAGE_READONLY; |
349 | protection_map[14] = PAGE_SHARED; | 333 | protection_map[14] = PAGE_SHARED; |
350 | protection_map[15] = PAGE_SHARED; | 334 | protection_map[15] = PAGE_SHARED; |
351 | btfixup(); | 335 | btfixup(); |
352 | prom_build_devicetree(); | 336 | prom_build_devicetree(); |
353 | of_fill_in_cpu_data(); | 337 | of_fill_in_cpu_data(); |
354 | device_scan(); | 338 | device_scan(); |
355 | } | 339 | } |
356 | 340 | ||
357 | static void __init taint_real_pages(void) | 341 | static void __init taint_real_pages(void) |
358 | { | 342 | { |
359 | int i; | 343 | int i; |
360 | 344 | ||
361 | for (i = 0; sp_banks[i].num_bytes; i++) { | 345 | for (i = 0; sp_banks[i].num_bytes; i++) { |
362 | unsigned long start, end; | 346 | unsigned long start, end; |
363 | 347 | ||
364 | start = sp_banks[i].base_addr; | 348 | start = sp_banks[i].base_addr; |
365 | end = start + sp_banks[i].num_bytes; | 349 | end = start + sp_banks[i].num_bytes; |
366 | 350 | ||
367 | while (start < end) { | 351 | while (start < end) { |
368 | set_bit(start >> 20, sparc_valid_addr_bitmap); | 352 | set_bit(start >> 20, sparc_valid_addr_bitmap); |
369 | start += PAGE_SIZE; | 353 | start += PAGE_SIZE; |
370 | } | 354 | } |
371 | } | 355 | } |
372 | } | 356 | } |
373 | 357 | ||
374 | static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) | 358 | static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) |
375 | { | 359 | { |
376 | unsigned long tmp; | 360 | unsigned long tmp; |
377 | 361 | ||
378 | #ifdef CONFIG_DEBUG_HIGHMEM | 362 | #ifdef CONFIG_DEBUG_HIGHMEM |
379 | printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); | 363 | printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); |
380 | #endif | 364 | #endif |
381 | 365 | ||
382 | for (tmp = start_pfn; tmp < end_pfn; tmp++) { | 366 | for (tmp = start_pfn; tmp < end_pfn; tmp++) { |
383 | struct page *page = pfn_to_page(tmp); | 367 | struct page *page = pfn_to_page(tmp); |
384 | 368 | ||
385 | ClearPageReserved(page); | 369 | ClearPageReserved(page); |
386 | init_page_count(page); | 370 | init_page_count(page); |
387 | __free_page(page); | 371 | __free_page(page); |
388 | totalhigh_pages++; | 372 | totalhigh_pages++; |
389 | } | 373 | } |
390 | } | 374 | } |
391 | 375 | ||
392 | void __init mem_init(void) | 376 | void __init mem_init(void) |
393 | { | 377 | { |
394 | int codepages = 0; | 378 | int codepages = 0; |
395 | int datapages = 0; | 379 | int datapages = 0; |
396 | int initpages = 0; | 380 | int initpages = 0; |
397 | int reservedpages = 0; | 381 | int reservedpages = 0; |
398 | int i; | 382 | int i; |
399 | 383 | ||
400 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | 384 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { |
401 | prom_printf("BUG: fixmap and pkmap areas overlap\n"); | 385 | prom_printf("BUG: fixmap and pkmap areas overlap\n"); |
402 | prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n", | 386 | prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n", |
403 | PKMAP_BASE, | 387 | PKMAP_BASE, |
404 | (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | 388 | (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
405 | FIXADDR_START); | 389 | FIXADDR_START); |
406 | prom_printf("Please mail sparclinux@vger.kernel.org.\n"); | 390 | prom_printf("Please mail sparclinux@vger.kernel.org.\n"); |
407 | prom_halt(); | 391 | prom_halt(); |
408 | } | 392 | } |
409 | 393 | ||
410 | 394 | ||
411 | /* Saves us work later. */ | 395 | /* Saves us work later. */ |
412 | memset((void *)&empty_zero_page, 0, PAGE_SIZE); | 396 | memset((void *)&empty_zero_page, 0, PAGE_SIZE); |
413 | 397 | ||
414 | i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); | 398 | i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); |
415 | i += 1; | 399 | i += 1; |
416 | sparc_valid_addr_bitmap = (unsigned long *) | 400 | sparc_valid_addr_bitmap = (unsigned long *) |
417 | __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL); | 401 | __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL); |
418 | 402 | ||
419 | if (sparc_valid_addr_bitmap == NULL) { | 403 | if (sparc_valid_addr_bitmap == NULL) { |
420 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | 404 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); |
421 | prom_halt(); | 405 | prom_halt(); |
422 | } | 406 | } |
423 | memset(sparc_valid_addr_bitmap, 0, i << 2); | 407 | memset(sparc_valid_addr_bitmap, 0, i << 2); |
424 | 408 | ||
425 | taint_real_pages(); | 409 | taint_real_pages(); |
426 | 410 | ||
427 | max_mapnr = last_valid_pfn - pfn_base; | 411 | max_mapnr = last_valid_pfn - pfn_base; |
428 | high_memory = __va(max_low_pfn << PAGE_SHIFT); | 412 | high_memory = __va(max_low_pfn << PAGE_SHIFT); |
429 | 413 | ||
430 | totalram_pages = free_all_bootmem(); | 414 | totalram_pages = free_all_bootmem(); |
431 | 415 | ||
432 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 416 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
433 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | 417 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; |
434 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | 418 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; |
435 | 419 | ||
436 | num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT; | 420 | num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT; |
437 | 421 | ||
438 | if (end_pfn <= highstart_pfn) | 422 | if (end_pfn <= highstart_pfn) |
439 | continue; | 423 | continue; |
440 | 424 | ||
441 | if (start_pfn < highstart_pfn) | 425 | if (start_pfn < highstart_pfn) |
442 | start_pfn = highstart_pfn; | 426 | start_pfn = highstart_pfn; |
443 | 427 | ||
444 | map_high_region(start_pfn, end_pfn); | 428 | map_high_region(start_pfn, end_pfn); |
445 | } | 429 | } |
446 | 430 | ||
447 | totalram_pages += totalhigh_pages; | 431 | totalram_pages += totalhigh_pages; |
448 | 432 | ||
449 | codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); | 433 | codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); |
450 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | 434 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; |
451 | datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); | 435 | datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); |
452 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | 436 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; |
453 | initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); | 437 | initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); |
454 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | 438 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; |
455 | 439 | ||
456 | /* Ignore memory holes for the purpose of counting reserved pages */ | 440 | /* Ignore memory holes for the purpose of counting reserved pages */ |
457 | for (i=0; i < max_low_pfn; i++) | 441 | for (i=0; i < max_low_pfn; i++) |
458 | if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap) | 442 | if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap) |
459 | && PageReserved(pfn_to_page(i))) | 443 | && PageReserved(pfn_to_page(i))) |
460 | reservedpages++; | 444 | reservedpages++; |
461 | 445 | ||
462 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | 446 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", |
463 | nr_free_pages() << (PAGE_SHIFT-10), | 447 | nr_free_pages() << (PAGE_SHIFT-10), |
464 | num_physpages << (PAGE_SHIFT - 10), | 448 | num_physpages << (PAGE_SHIFT - 10), |
465 | codepages << (PAGE_SHIFT-10), | 449 | codepages << (PAGE_SHIFT-10), |
466 | reservedpages << (PAGE_SHIFT - 10), | 450 | reservedpages << (PAGE_SHIFT - 10), |
467 | datapages << (PAGE_SHIFT-10), | 451 | datapages << (PAGE_SHIFT-10), |
468 | initpages << (PAGE_SHIFT-10), | 452 | initpages << (PAGE_SHIFT-10), |
469 | totalhigh_pages << (PAGE_SHIFT-10)); | 453 | totalhigh_pages << (PAGE_SHIFT-10)); |
470 | } | 454 | } |
471 | 455 | ||
472 | void free_initmem (void) | 456 | void free_initmem (void) |
473 | { | 457 | { |
474 | unsigned long addr; | 458 | unsigned long addr; |
475 | unsigned long freed; | 459 | unsigned long freed; |
476 | 460 | ||
477 | addr = (unsigned long)(&__init_begin); | 461 | addr = (unsigned long)(&__init_begin); |
478 | freed = (unsigned long)(&__init_end) - addr; | 462 | freed = (unsigned long)(&__init_end) - addr; |
479 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 463 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { |
480 | struct page *p; | 464 | struct page *p; |
481 | 465 | ||
482 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 466 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
483 | p = virt_to_page(addr); | 467 | p = virt_to_page(addr); |
484 | 468 | ||
485 | ClearPageReserved(p); | 469 | ClearPageReserved(p); |
486 | init_page_count(p); | 470 | init_page_count(p); |
487 | __free_page(p); | 471 | __free_page(p); |
488 | totalram_pages++; | 472 | totalram_pages++; |
489 | num_physpages++; | 473 | num_physpages++; |
490 | } | 474 | } |
491 | printk(KERN_INFO "Freeing unused kernel memory: %ldk freed\n", | 475 | printk(KERN_INFO "Freeing unused kernel memory: %ldk freed\n", |
492 | freed >> 10); | 476 | freed >> 10); |
493 | } | 477 | } |
494 | 478 | ||
495 | #ifdef CONFIG_BLK_DEV_INITRD | 479 | #ifdef CONFIG_BLK_DEV_INITRD |
496 | void free_initrd_mem(unsigned long start, unsigned long end) | 480 | void free_initrd_mem(unsigned long start, unsigned long end) |
497 | { | 481 | { |
498 | if (start < end) | 482 | if (start < end) |
499 | printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", | 483 | printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", |
500 | (end - start) >> 10); | 484 | (end - start) >> 10); |
501 | for (; start < end; start += PAGE_SIZE) { | 485 | for (; start < end; start += PAGE_SIZE) { |
502 | struct page *p; | 486 | struct page *p; |
503 | 487 | ||
504 | memset((void *)start, POISON_FREE_INITMEM, PAGE_SIZE); | 488 | memset((void *)start, POISON_FREE_INITMEM, PAGE_SIZE); |
505 | p = virt_to_page(start); | 489 | p = virt_to_page(start); |
506 | 490 | ||
507 | ClearPageReserved(p); | 491 | ClearPageReserved(p); |
508 | init_page_count(p); | 492 | init_page_count(p); |
509 | __free_page(p); | 493 | __free_page(p); |
510 | totalram_pages++; | 494 | totalram_pages++; |
511 | num_physpages++; | 495 | num_physpages++; |
512 | } | 496 | } |
513 | } | 497 | } |
514 | #endif | 498 | #endif |
515 | 499 | ||
516 | void sparc_flush_page_to_ram(struct page *page) | 500 | void sparc_flush_page_to_ram(struct page *page) |
517 | { | 501 | { |
518 | unsigned long vaddr = (unsigned long)page_address(page); | 502 | unsigned long vaddr = (unsigned long)page_address(page); |
519 | 503 | ||
520 | if (vaddr) | 504 | if (vaddr) |
521 | __flush_page_to_ram(vaddr); | 505 | __flush_page_to_ram(vaddr); |
522 | } | 506 | } |
523 | EXPORT_SYMBOL(sparc_flush_page_to_ram); | 507 | EXPORT_SYMBOL(sparc_flush_page_to_ram); |
524 | 508 |
arch/sparc/mm/srmmu.c
1 | /* | 1 | /* |
2 | * srmmu.c: SRMMU specific routines for memory management. | 2 | * srmmu.c: SRMMU specific routines for memory management. |
3 | * | 3 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) | 5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) |
6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
8 | * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) | 8 | * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/bootmem.h> | 17 | #include <linux/bootmem.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/kdebug.h> | 20 | #include <linux/kdebug.h> |
21 | #include <linux/log2.h> | 21 | #include <linux/log2.h> |
22 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
23 | 23 | ||
24 | #include <asm/bitext.h> | 24 | #include <asm/bitext.h> |
25 | #include <asm/page.h> | 25 | #include <asm/page.h> |
26 | #include <asm/pgalloc.h> | 26 | #include <asm/pgalloc.h> |
27 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
28 | #include <asm/io.h> | 28 | #include <asm/io.h> |
29 | #include <asm/vaddrs.h> | 29 | #include <asm/vaddrs.h> |
30 | #include <asm/traps.h> | 30 | #include <asm/traps.h> |
31 | #include <asm/smp.h> | 31 | #include <asm/smp.h> |
32 | #include <asm/mbus.h> | 32 | #include <asm/mbus.h> |
33 | #include <asm/cache.h> | 33 | #include <asm/cache.h> |
34 | #include <asm/oplib.h> | 34 | #include <asm/oplib.h> |
35 | #include <asm/asi.h> | 35 | #include <asm/asi.h> |
36 | #include <asm/msi.h> | 36 | #include <asm/msi.h> |
37 | #include <asm/mmu_context.h> | 37 | #include <asm/mmu_context.h> |
38 | #include <asm/io-unit.h> | 38 | #include <asm/io-unit.h> |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | 41 | ||
42 | /* Now the cpu specific definitions. */ | 42 | /* Now the cpu specific definitions. */ |
43 | #include <asm/viking.h> | 43 | #include <asm/viking.h> |
44 | #include <asm/mxcc.h> | 44 | #include <asm/mxcc.h> |
45 | #include <asm/ross.h> | 45 | #include <asm/ross.h> |
46 | #include <asm/tsunami.h> | 46 | #include <asm/tsunami.h> |
47 | #include <asm/swift.h> | 47 | #include <asm/swift.h> |
48 | #include <asm/turbosparc.h> | 48 | #include <asm/turbosparc.h> |
49 | #include <asm/leon.h> | 49 | #include <asm/leon.h> |
50 | 50 | ||
51 | #include <asm/btfixup.h> | 51 | #include <asm/btfixup.h> |
52 | 52 | ||
53 | enum mbus_module srmmu_modtype; | 53 | enum mbus_module srmmu_modtype; |
54 | static unsigned int hwbug_bitmask; | 54 | static unsigned int hwbug_bitmask; |
55 | int vac_cache_size; | 55 | int vac_cache_size; |
56 | int vac_line_size; | 56 | int vac_line_size; |
57 | 57 | ||
58 | extern struct resource sparc_iomap; | 58 | extern struct resource sparc_iomap; |
59 | 59 | ||
60 | extern unsigned long last_valid_pfn; | 60 | extern unsigned long last_valid_pfn; |
61 | 61 | ||
62 | extern unsigned long page_kernel; | 62 | extern unsigned long page_kernel; |
63 | 63 | ||
64 | static pgd_t *srmmu_swapper_pg_dir; | 64 | static pgd_t *srmmu_swapper_pg_dir; |
65 | 65 | ||
66 | #ifdef CONFIG_SMP | 66 | #ifdef CONFIG_SMP |
67 | #define FLUSH_BEGIN(mm) | 67 | #define FLUSH_BEGIN(mm) |
68 | #define FLUSH_END | 68 | #define FLUSH_END |
69 | #else | 69 | #else |
70 | #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { | 70 | #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { |
71 | #define FLUSH_END } | 71 | #define FLUSH_END } |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) | 74 | BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) |
75 | #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) | 75 | #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) |
76 | 76 | ||
77 | int flush_page_for_dma_global = 1; | 77 | int flush_page_for_dma_global = 1; |
78 | 78 | ||
79 | #ifdef CONFIG_SMP | 79 | #ifdef CONFIG_SMP |
80 | BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) | 80 | BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) |
81 | #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) | 81 | #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) |
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | char *srmmu_name; | 84 | char *srmmu_name; |
85 | 85 | ||
86 | ctxd_t *srmmu_ctx_table_phys; | 86 | ctxd_t *srmmu_ctx_table_phys; |
87 | static ctxd_t *srmmu_context_table; | 87 | static ctxd_t *srmmu_context_table; |
88 | 88 | ||
89 | int viking_mxcc_present; | 89 | int viking_mxcc_present; |
90 | static DEFINE_SPINLOCK(srmmu_context_spinlock); | 90 | static DEFINE_SPINLOCK(srmmu_context_spinlock); |
91 | 91 | ||
92 | static int is_hypersparc; | 92 | static int is_hypersparc; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * In general all page table modifications should use the V8 atomic | 95 | * In general all page table modifications should use the V8 atomic |
96 | * swap instruction. This insures the mmu and the cpu are in sync | 96 | * swap instruction. This insures the mmu and the cpu are in sync |
97 | * with respect to ref/mod bits in the page tables. | 97 | * with respect to ref/mod bits in the page tables. |
98 | */ | 98 | */ |
99 | static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) | 99 | static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) |
100 | { | 100 | { |
101 | __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); | 101 | __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); |
102 | return value; | 102 | return value; |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) | 105 | static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) |
106 | { | 106 | { |
107 | srmmu_swap((unsigned long *)ptep, pte_val(pteval)); | 107 | srmmu_swap((unsigned long *)ptep, pte_val(pteval)); |
108 | } | 108 | } |
109 | 109 | ||
110 | /* The very generic SRMMU page table operations. */ | 110 | /* The very generic SRMMU page table operations. */ |
111 | static inline int srmmu_device_memory(unsigned long x) | 111 | static inline int srmmu_device_memory(unsigned long x) |
112 | { | 112 | { |
113 | return ((x & 0xF0000000) != 0); | 113 | return ((x & 0xF0000000) != 0); |
114 | } | 114 | } |
115 | 115 | ||
116 | static int srmmu_cache_pagetables; | 116 | static int srmmu_cache_pagetables; |
117 | 117 | ||
118 | /* these will be initialized in srmmu_nocache_calcsize() */ | 118 | /* these will be initialized in srmmu_nocache_calcsize() */ |
119 | static unsigned long srmmu_nocache_size; | 119 | static unsigned long srmmu_nocache_size; |
120 | static unsigned long srmmu_nocache_end; | 120 | static unsigned long srmmu_nocache_end; |
121 | 121 | ||
122 | /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ | 122 | /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ |
123 | #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) | 123 | #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) |
124 | 124 | ||
125 | /* The context table is a nocache user with the biggest alignment needs. */ | 125 | /* The context table is a nocache user with the biggest alignment needs. */ |
126 | #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) | 126 | #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) |
127 | 127 | ||
128 | void *srmmu_nocache_pool; | 128 | void *srmmu_nocache_pool; |
129 | void *srmmu_nocache_bitmap; | 129 | void *srmmu_nocache_bitmap; |
130 | static struct bit_map srmmu_nocache_map; | 130 | static struct bit_map srmmu_nocache_map; |
131 | 131 | ||
132 | static unsigned long srmmu_pte_pfn(pte_t pte) | 132 | static unsigned long srmmu_pte_pfn(pte_t pte) |
133 | { | 133 | { |
134 | if (srmmu_device_memory(pte_val(pte))) { | 134 | if (srmmu_device_memory(pte_val(pte))) { |
135 | /* Just return something that will cause | 135 | /* Just return something that will cause |
136 | * pfn_valid() to return false. This makes | 136 | * pfn_valid() to return false. This makes |
137 | * copy_one_pte() to just directly copy to | 137 | * copy_one_pte() to just directly copy to |
138 | * PTE over. | 138 | * PTE over. |
139 | */ | 139 | */ |
140 | return ~0UL; | 140 | return ~0UL; |
141 | } | 141 | } |
142 | return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); | 142 | return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); |
143 | } | 143 | } |
144 | 144 | ||
145 | static struct page *srmmu_pmd_page(pmd_t pmd) | 145 | static struct page *srmmu_pmd_page(pmd_t pmd) |
146 | { | 146 | { |
147 | 147 | ||
148 | if (srmmu_device_memory(pmd_val(pmd))) | 148 | if (srmmu_device_memory(pmd_val(pmd))) |
149 | BUG(); | 149 | BUG(); |
150 | return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); | 150 | return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline unsigned long srmmu_pgd_page(pgd_t pgd) | 153 | static inline unsigned long srmmu_pgd_page(pgd_t pgd) |
154 | { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } | 154 | { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } |
155 | 155 | ||
156 | 156 | ||
157 | static inline int srmmu_pte_none(pte_t pte) | 157 | static inline int srmmu_pte_none(pte_t pte) |
158 | { return !(pte_val(pte) & 0xFFFFFFF); } | 158 | { return !(pte_val(pte) & 0xFFFFFFF); } |
159 | 159 | ||
160 | static inline int srmmu_pte_present(pte_t pte) | 160 | static inline int srmmu_pte_present(pte_t pte) |
161 | { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } | 161 | { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } |
162 | 162 | ||
163 | static inline void srmmu_pte_clear(pte_t *ptep) | 163 | static inline void srmmu_pte_clear(pte_t *ptep) |
164 | { srmmu_set_pte(ptep, __pte(0)); } | 164 | { srmmu_set_pte(ptep, __pte(0)); } |
165 | 165 | ||
166 | static inline int srmmu_pmd_none(pmd_t pmd) | 166 | static inline int srmmu_pmd_none(pmd_t pmd) |
167 | { return !(pmd_val(pmd) & 0xFFFFFFF); } | 167 | { return !(pmd_val(pmd) & 0xFFFFFFF); } |
168 | 168 | ||
169 | static inline int srmmu_pmd_bad(pmd_t pmd) | 169 | static inline int srmmu_pmd_bad(pmd_t pmd) |
170 | { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } | 170 | { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } |
171 | 171 | ||
172 | static inline int srmmu_pmd_present(pmd_t pmd) | 172 | static inline int srmmu_pmd_present(pmd_t pmd) |
173 | { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } | 173 | { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } |
174 | 174 | ||
175 | static inline void srmmu_pmd_clear(pmd_t *pmdp) { | 175 | static inline void srmmu_pmd_clear(pmd_t *pmdp) { |
176 | int i; | 176 | int i; |
177 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) | 177 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) |
178 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); | 178 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); |
179 | } | 179 | } |
180 | 180 | ||
181 | static inline int srmmu_pgd_none(pgd_t pgd) | 181 | static inline int srmmu_pgd_none(pgd_t pgd) |
182 | { return !(pgd_val(pgd) & 0xFFFFFFF); } | 182 | { return !(pgd_val(pgd) & 0xFFFFFFF); } |
183 | 183 | ||
184 | static inline int srmmu_pgd_bad(pgd_t pgd) | 184 | static inline int srmmu_pgd_bad(pgd_t pgd) |
185 | { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } | 185 | { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } |
186 | 186 | ||
187 | static inline int srmmu_pgd_present(pgd_t pgd) | 187 | static inline int srmmu_pgd_present(pgd_t pgd) |
188 | { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } | 188 | { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } |
189 | 189 | ||
190 | static inline void srmmu_pgd_clear(pgd_t * pgdp) | 190 | static inline void srmmu_pgd_clear(pgd_t * pgdp) |
191 | { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } | 191 | { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } |
192 | 192 | ||
193 | static inline pte_t srmmu_pte_wrprotect(pte_t pte) | 193 | static inline pte_t srmmu_pte_wrprotect(pte_t pte) |
194 | { return __pte(pte_val(pte) & ~SRMMU_WRITE);} | 194 | { return __pte(pte_val(pte) & ~SRMMU_WRITE);} |
195 | 195 | ||
196 | static inline pte_t srmmu_pte_mkclean(pte_t pte) | 196 | static inline pte_t srmmu_pte_mkclean(pte_t pte) |
197 | { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} | 197 | { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} |
198 | 198 | ||
199 | static inline pte_t srmmu_pte_mkold(pte_t pte) | 199 | static inline pte_t srmmu_pte_mkold(pte_t pte) |
200 | { return __pte(pte_val(pte) & ~SRMMU_REF);} | 200 | { return __pte(pte_val(pte) & ~SRMMU_REF);} |
201 | 201 | ||
202 | static inline pte_t srmmu_pte_mkwrite(pte_t pte) | 202 | static inline pte_t srmmu_pte_mkwrite(pte_t pte) |
203 | { return __pte(pte_val(pte) | SRMMU_WRITE);} | 203 | { return __pte(pte_val(pte) | SRMMU_WRITE);} |
204 | 204 | ||
205 | static inline pte_t srmmu_pte_mkdirty(pte_t pte) | 205 | static inline pte_t srmmu_pte_mkdirty(pte_t pte) |
206 | { return __pte(pte_val(pte) | SRMMU_DIRTY);} | 206 | { return __pte(pte_val(pte) | SRMMU_DIRTY);} |
207 | 207 | ||
208 | static inline pte_t srmmu_pte_mkyoung(pte_t pte) | 208 | static inline pte_t srmmu_pte_mkyoung(pte_t pte) |
209 | { return __pte(pte_val(pte) | SRMMU_REF);} | 209 | { return __pte(pte_val(pte) | SRMMU_REF);} |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Conversion functions: convert a page and protection to a page entry, | 212 | * Conversion functions: convert a page and protection to a page entry, |
213 | * and a page entry and page directory to the page they refer to. | 213 | * and a page entry and page directory to the page they refer to. |
214 | */ | 214 | */ |
215 | static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) | 215 | static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) |
216 | { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } | 216 | { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } |
217 | 217 | ||
218 | static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) | 218 | static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) |
219 | { return __pte(((page) >> 4) | pgprot_val(pgprot)); } | 219 | { return __pte(((page) >> 4) | pgprot_val(pgprot)); } |
220 | 220 | ||
221 | static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) | 221 | static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) |
222 | { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } | 222 | { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } |
223 | 223 | ||
224 | /* XXX should we hyper_flush_whole_icache here - Anton */ | 224 | /* XXX should we hyper_flush_whole_icache here - Anton */ |
225 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) | 225 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) |
226 | { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } | 226 | { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } |
227 | 227 | ||
228 | static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) | 228 | static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) |
229 | { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } | 229 | { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } |
230 | 230 | ||
231 | static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) | 231 | static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) |
232 | { | 232 | { |
233 | unsigned long ptp; /* Physical address, shifted right by 4 */ | 233 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
234 | int i; | 234 | int i; |
235 | 235 | ||
236 | ptp = __nocache_pa((unsigned long) ptep) >> 4; | 236 | ptp = __nocache_pa((unsigned long) ptep) >> 4; |
237 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { | 237 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { |
238 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); | 238 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); |
239 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); | 239 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); |
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
243 | static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) | 243 | static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) |
244 | { | 244 | { |
245 | unsigned long ptp; /* Physical address, shifted right by 4 */ | 245 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
246 | int i; | 246 | int i; |
247 | 247 | ||
248 | ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ | 248 | ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ |
249 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { | 249 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { |
250 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); | 250 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); |
251 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); | 251 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); |
252 | } | 252 | } |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) | 255 | static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) |
256 | { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } | 256 | { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } |
257 | 257 | ||
258 | /* to find an entry in a top-level page table... */ | 258 | /* to find an entry in a top-level page table... */ |
259 | static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) | 259 | static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) |
260 | { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } | 260 | { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } |
261 | 261 | ||
262 | /* Find an entry in the second-level page table.. */ | 262 | /* Find an entry in the second-level page table.. */ |
263 | static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) | 263 | static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) |
264 | { | 264 | { |
265 | return (pmd_t *) srmmu_pgd_page(*dir) + | 265 | return (pmd_t *) srmmu_pgd_page(*dir) + |
266 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | 266 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); |
267 | } | 267 | } |
268 | 268 | ||
269 | /* Find an entry in the third-level page table.. */ | 269 | /* Find an entry in the third-level page table.. */ |
270 | static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) | 270 | static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) |
271 | { | 271 | { |
272 | void *pte; | 272 | void *pte; |
273 | 273 | ||
274 | pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); | 274 | pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); |
275 | return (pte_t *) pte + | 275 | return (pte_t *) pte + |
276 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | 276 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
277 | } | 277 | } |
278 | 278 | ||
279 | static unsigned long srmmu_swp_type(swp_entry_t entry) | 279 | static unsigned long srmmu_swp_type(swp_entry_t entry) |
280 | { | 280 | { |
281 | return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; | 281 | return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; |
282 | } | 282 | } |
283 | 283 | ||
284 | static unsigned long srmmu_swp_offset(swp_entry_t entry) | 284 | static unsigned long srmmu_swp_offset(swp_entry_t entry) |
285 | { | 285 | { |
286 | return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; | 286 | return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; |
287 | } | 287 | } |
288 | 288 | ||
289 | static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) | 289 | static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) |
290 | { | 290 | { |
291 | return (swp_entry_t) { | 291 | return (swp_entry_t) { |
292 | (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | 292 | (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT |
293 | | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; | 293 | | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; |
294 | } | 294 | } |
295 | 295 | ||
296 | /* | 296 | /* |
297 | * size: bytes to allocate in the nocache area. | 297 | * size: bytes to allocate in the nocache area. |
298 | * align: bytes, number to align at. | 298 | * align: bytes, number to align at. |
299 | * Returns the virtual address of the allocated area. | 299 | * Returns the virtual address of the allocated area. |
300 | */ | 300 | */ |
301 | static unsigned long __srmmu_get_nocache(int size, int align) | 301 | static unsigned long __srmmu_get_nocache(int size, int align) |
302 | { | 302 | { |
303 | int offset; | 303 | int offset; |
304 | 304 | ||
305 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { | 305 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { |
306 | printk("Size 0x%x too small for nocache request\n", size); | 306 | printk("Size 0x%x too small for nocache request\n", size); |
307 | size = SRMMU_NOCACHE_BITMAP_SHIFT; | 307 | size = SRMMU_NOCACHE_BITMAP_SHIFT; |
308 | } | 308 | } |
309 | if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { | 309 | if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { |
310 | printk("Size 0x%x unaligned int nocache request\n", size); | 310 | printk("Size 0x%x unaligned int nocache request\n", size); |
311 | size += SRMMU_NOCACHE_BITMAP_SHIFT-1; | 311 | size += SRMMU_NOCACHE_BITMAP_SHIFT-1; |
312 | } | 312 | } |
313 | BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); | 313 | BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); |
314 | 314 | ||
315 | offset = bit_map_string_get(&srmmu_nocache_map, | 315 | offset = bit_map_string_get(&srmmu_nocache_map, |
316 | size >> SRMMU_NOCACHE_BITMAP_SHIFT, | 316 | size >> SRMMU_NOCACHE_BITMAP_SHIFT, |
317 | align >> SRMMU_NOCACHE_BITMAP_SHIFT); | 317 | align >> SRMMU_NOCACHE_BITMAP_SHIFT); |
318 | if (offset == -1) { | 318 | if (offset == -1) { |
319 | printk("srmmu: out of nocache %d: %d/%d\n", | 319 | printk("srmmu: out of nocache %d: %d/%d\n", |
320 | size, (int) srmmu_nocache_size, | 320 | size, (int) srmmu_nocache_size, |
321 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); | 321 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); |
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); | 325 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); |
326 | } | 326 | } |
327 | 327 | ||
328 | static unsigned long srmmu_get_nocache(int size, int align) | 328 | static unsigned long srmmu_get_nocache(int size, int align) |
329 | { | 329 | { |
330 | unsigned long tmp; | 330 | unsigned long tmp; |
331 | 331 | ||
332 | tmp = __srmmu_get_nocache(size, align); | 332 | tmp = __srmmu_get_nocache(size, align); |
333 | 333 | ||
334 | if (tmp) | 334 | if (tmp) |
335 | memset((void *)tmp, 0, size); | 335 | memset((void *)tmp, 0, size); |
336 | 336 | ||
337 | return tmp; | 337 | return tmp; |
338 | } | 338 | } |
339 | 339 | ||
340 | static void srmmu_free_nocache(unsigned long vaddr, int size) | 340 | static void srmmu_free_nocache(unsigned long vaddr, int size) |
341 | { | 341 | { |
342 | int offset; | 342 | int offset; |
343 | 343 | ||
344 | if (vaddr < SRMMU_NOCACHE_VADDR) { | 344 | if (vaddr < SRMMU_NOCACHE_VADDR) { |
345 | printk("Vaddr %lx is smaller than nocache base 0x%lx\n", | 345 | printk("Vaddr %lx is smaller than nocache base 0x%lx\n", |
346 | vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); | 346 | vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); |
347 | BUG(); | 347 | BUG(); |
348 | } | 348 | } |
349 | if (vaddr+size > srmmu_nocache_end) { | 349 | if (vaddr+size > srmmu_nocache_end) { |
350 | printk("Vaddr %lx is bigger than nocache end 0x%lx\n", | 350 | printk("Vaddr %lx is bigger than nocache end 0x%lx\n", |
351 | vaddr, srmmu_nocache_end); | 351 | vaddr, srmmu_nocache_end); |
352 | BUG(); | 352 | BUG(); |
353 | } | 353 | } |
354 | if (!is_power_of_2(size)) { | 354 | if (!is_power_of_2(size)) { |
355 | printk("Size 0x%x is not a power of 2\n", size); | 355 | printk("Size 0x%x is not a power of 2\n", size); |
356 | BUG(); | 356 | BUG(); |
357 | } | 357 | } |
358 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { | 358 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { |
359 | printk("Size 0x%x is too small\n", size); | 359 | printk("Size 0x%x is too small\n", size); |
360 | BUG(); | 360 | BUG(); |
361 | } | 361 | } |
362 | if (vaddr & (size-1)) { | 362 | if (vaddr & (size-1)) { |
363 | printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); | 363 | printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); |
364 | BUG(); | 364 | BUG(); |
365 | } | 365 | } |
366 | 366 | ||
367 | offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; | 367 | offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; |
368 | size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; | 368 | size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; |
369 | 369 | ||
370 | bit_map_clear(&srmmu_nocache_map, offset, size); | 370 | bit_map_clear(&srmmu_nocache_map, offset, size); |
371 | } | 371 | } |
372 | 372 | ||
373 | static void srmmu_early_allocate_ptable_skeleton(unsigned long start, | 373 | static void srmmu_early_allocate_ptable_skeleton(unsigned long start, |
374 | unsigned long end); | 374 | unsigned long end); |
375 | 375 | ||
376 | extern unsigned long probe_memory(void); /* in fault.c */ | 376 | extern unsigned long probe_memory(void); /* in fault.c */ |
377 | 377 | ||
378 | /* | 378 | /* |
379 | * Reserve nocache dynamically proportionally to the amount of | 379 | * Reserve nocache dynamically proportionally to the amount of |
380 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 | 380 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 |
381 | */ | 381 | */ |
382 | static void srmmu_nocache_calcsize(void) | 382 | static void srmmu_nocache_calcsize(void) |
383 | { | 383 | { |
384 | unsigned long sysmemavail = probe_memory() / 1024; | 384 | unsigned long sysmemavail = probe_memory() / 1024; |
385 | int srmmu_nocache_npages; | 385 | int srmmu_nocache_npages; |
386 | 386 | ||
387 | srmmu_nocache_npages = | 387 | srmmu_nocache_npages = |
388 | sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; | 388 | sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; |
389 | 389 | ||
390 | /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ | 390 | /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ |
391 | // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; | 391 | // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; |
392 | if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) | 392 | if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) |
393 | srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; | 393 | srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; |
394 | 394 | ||
395 | /* anything above 1280 blows up */ | 395 | /* anything above 1280 blows up */ |
396 | if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) | 396 | if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) |
397 | srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; | 397 | srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; |
398 | 398 | ||
399 | srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; | 399 | srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; |
400 | srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; | 400 | srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; |
401 | } | 401 | } |
402 | 402 | ||
403 | static void __init srmmu_nocache_init(void) | 403 | static void __init srmmu_nocache_init(void) |
404 | { | 404 | { |
405 | unsigned int bitmap_bits; | 405 | unsigned int bitmap_bits; |
406 | pgd_t *pgd; | 406 | pgd_t *pgd; |
407 | pmd_t *pmd; | 407 | pmd_t *pmd; |
408 | pte_t *pte; | 408 | pte_t *pte; |
409 | unsigned long paddr, vaddr; | 409 | unsigned long paddr, vaddr; |
410 | unsigned long pteval; | 410 | unsigned long pteval; |
411 | 411 | ||
412 | bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; | 412 | bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; |
413 | 413 | ||
414 | srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, | 414 | srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, |
415 | SRMMU_NOCACHE_ALIGN_MAX, 0UL); | 415 | SRMMU_NOCACHE_ALIGN_MAX, 0UL); |
416 | memset(srmmu_nocache_pool, 0, srmmu_nocache_size); | 416 | memset(srmmu_nocache_pool, 0, srmmu_nocache_size); |
417 | 417 | ||
418 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); | 418 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); |
419 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); | 419 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); |
420 | 420 | ||
421 | srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | 421 | srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
422 | memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); | 422 | memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); |
423 | init_mm.pgd = srmmu_swapper_pg_dir; | 423 | init_mm.pgd = srmmu_swapper_pg_dir; |
424 | 424 | ||
425 | srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); | 425 | srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); |
426 | 426 | ||
427 | paddr = __pa((unsigned long)srmmu_nocache_pool); | 427 | paddr = __pa((unsigned long)srmmu_nocache_pool); |
428 | vaddr = SRMMU_NOCACHE_VADDR; | 428 | vaddr = SRMMU_NOCACHE_VADDR; |
429 | 429 | ||
430 | while (vaddr < srmmu_nocache_end) { | 430 | while (vaddr < srmmu_nocache_end) { |
431 | pgd = pgd_offset_k(vaddr); | 431 | pgd = pgd_offset_k(vaddr); |
432 | pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); | 432 | pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); |
433 | pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); | 433 | pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); |
434 | 434 | ||
435 | pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); | 435 | pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); |
436 | 436 | ||
437 | if (srmmu_cache_pagetables) | 437 | if (srmmu_cache_pagetables) |
438 | pteval |= SRMMU_CACHE; | 438 | pteval |= SRMMU_CACHE; |
439 | 439 | ||
440 | srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); | 440 | srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); |
441 | 441 | ||
442 | vaddr += PAGE_SIZE; | 442 | vaddr += PAGE_SIZE; |
443 | paddr += PAGE_SIZE; | 443 | paddr += PAGE_SIZE; |
444 | } | 444 | } |
445 | 445 | ||
446 | flush_cache_all(); | 446 | flush_cache_all(); |
447 | flush_tlb_all(); | 447 | flush_tlb_all(); |
448 | } | 448 | } |
449 | 449 | ||
450 | static inline pgd_t *srmmu_get_pgd_fast(void) | 450 | static inline pgd_t *srmmu_get_pgd_fast(void) |
451 | { | 451 | { |
452 | pgd_t *pgd = NULL; | 452 | pgd_t *pgd = NULL; |
453 | 453 | ||
454 | pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | 454 | pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
455 | if (pgd) { | 455 | if (pgd) { |
456 | pgd_t *init = pgd_offset_k(0); | 456 | pgd_t *init = pgd_offset_k(0); |
457 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | 457 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
458 | memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | 458 | memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, |
459 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 459 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
460 | } | 460 | } |
461 | 461 | ||
462 | return pgd; | 462 | return pgd; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void srmmu_free_pgd_fast(pgd_t *pgd) | 465 | static void srmmu_free_pgd_fast(pgd_t *pgd) |
466 | { | 466 | { |
467 | srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); | 467 | srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); |
468 | } | 468 | } |
469 | 469 | ||
470 | static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 470 | static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
471 | { | 471 | { |
472 | return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 472 | return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
473 | } | 473 | } |
474 | 474 | ||
475 | static void srmmu_pmd_free(pmd_t * pmd) | 475 | static void srmmu_pmd_free(pmd_t * pmd) |
476 | { | 476 | { |
477 | srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); | 477 | srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); |
478 | } | 478 | } |
479 | 479 | ||
480 | /* | 480 | /* |
481 | * Hardware needs alignment to 256 only, but we align to whole page size | 481 | * Hardware needs alignment to 256 only, but we align to whole page size |
482 | * to reduce fragmentation problems due to the buddy principle. | 482 | * to reduce fragmentation problems due to the buddy principle. |
483 | * XXX Provide actual fragmentation statistics in /proc. | 483 | * XXX Provide actual fragmentation statistics in /proc. |
484 | * | 484 | * |
485 | * Alignments up to the page size are the same for physical and virtual | 485 | * Alignments up to the page size are the same for physical and virtual |
486 | * addresses of the nocache area. | 486 | * addresses of the nocache area. |
487 | */ | 487 | */ |
488 | static pte_t * | 488 | static pte_t * |
489 | srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 489 | srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
490 | { | 490 | { |
491 | return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | 491 | return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
492 | } | 492 | } |
493 | 493 | ||
494 | static pgtable_t | 494 | static pgtable_t |
495 | srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) | 495 | srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) |
496 | { | 496 | { |
497 | unsigned long pte; | 497 | unsigned long pte; |
498 | struct page *page; | 498 | struct page *page; |
499 | 499 | ||
500 | if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) | 500 | if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) |
501 | return NULL; | 501 | return NULL; |
502 | page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); | 502 | page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); |
503 | pgtable_page_ctor(page); | 503 | pgtable_page_ctor(page); |
504 | return page; | 504 | return page; |
505 | } | 505 | } |
506 | 506 | ||
507 | static void srmmu_free_pte_fast(pte_t *pte) | 507 | static void srmmu_free_pte_fast(pte_t *pte) |
508 | { | 508 | { |
509 | srmmu_free_nocache((unsigned long)pte, PTE_SIZE); | 509 | srmmu_free_nocache((unsigned long)pte, PTE_SIZE); |
510 | } | 510 | } |
511 | 511 | ||
512 | static void srmmu_pte_free(pgtable_t pte) | 512 | static void srmmu_pte_free(pgtable_t pte) |
513 | { | 513 | { |
514 | unsigned long p; | 514 | unsigned long p; |
515 | 515 | ||
516 | pgtable_page_dtor(pte); | 516 | pgtable_page_dtor(pte); |
517 | p = (unsigned long)page_address(pte); /* Cached address (for test) */ | 517 | p = (unsigned long)page_address(pte); /* Cached address (for test) */ |
518 | if (p == 0) | 518 | if (p == 0) |
519 | BUG(); | 519 | BUG(); |
520 | p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ | 520 | p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ |
521 | p = (unsigned long) __nocache_va(p); /* Nocached virtual */ | 521 | p = (unsigned long) __nocache_va(p); /* Nocached virtual */ |
522 | srmmu_free_nocache(p, PTE_SIZE); | 522 | srmmu_free_nocache(p, PTE_SIZE); |
523 | } | 523 | } |
524 | 524 | ||
525 | /* | 525 | /* |
526 | */ | 526 | */ |
527 | static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) | 527 | static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) |
528 | { | 528 | { |
529 | struct ctx_list *ctxp; | 529 | struct ctx_list *ctxp; |
530 | 530 | ||
531 | ctxp = ctx_free.next; | 531 | ctxp = ctx_free.next; |
532 | if(ctxp != &ctx_free) { | 532 | if(ctxp != &ctx_free) { |
533 | remove_from_ctx_list(ctxp); | 533 | remove_from_ctx_list(ctxp); |
534 | add_to_used_ctxlist(ctxp); | 534 | add_to_used_ctxlist(ctxp); |
535 | mm->context = ctxp->ctx_number; | 535 | mm->context = ctxp->ctx_number; |
536 | ctxp->ctx_mm = mm; | 536 | ctxp->ctx_mm = mm; |
537 | return; | 537 | return; |
538 | } | 538 | } |
539 | ctxp = ctx_used.next; | 539 | ctxp = ctx_used.next; |
540 | if(ctxp->ctx_mm == old_mm) | 540 | if(ctxp->ctx_mm == old_mm) |
541 | ctxp = ctxp->next; | 541 | ctxp = ctxp->next; |
542 | if(ctxp == &ctx_used) | 542 | if(ctxp == &ctx_used) |
543 | panic("out of mmu contexts"); | 543 | panic("out of mmu contexts"); |
544 | flush_cache_mm(ctxp->ctx_mm); | 544 | flush_cache_mm(ctxp->ctx_mm); |
545 | flush_tlb_mm(ctxp->ctx_mm); | 545 | flush_tlb_mm(ctxp->ctx_mm); |
546 | remove_from_ctx_list(ctxp); | 546 | remove_from_ctx_list(ctxp); |
547 | add_to_used_ctxlist(ctxp); | 547 | add_to_used_ctxlist(ctxp); |
548 | ctxp->ctx_mm->context = NO_CONTEXT; | 548 | ctxp->ctx_mm->context = NO_CONTEXT; |
549 | ctxp->ctx_mm = mm; | 549 | ctxp->ctx_mm = mm; |
550 | mm->context = ctxp->ctx_number; | 550 | mm->context = ctxp->ctx_number; |
551 | } | 551 | } |
552 | 552 | ||
553 | static inline void free_context(int context) | 553 | static inline void free_context(int context) |
554 | { | 554 | { |
555 | struct ctx_list *ctx_old; | 555 | struct ctx_list *ctx_old; |
556 | 556 | ||
557 | ctx_old = ctx_list_pool + context; | 557 | ctx_old = ctx_list_pool + context; |
558 | remove_from_ctx_list(ctx_old); | 558 | remove_from_ctx_list(ctx_old); |
559 | add_to_free_ctxlist(ctx_old); | 559 | add_to_free_ctxlist(ctx_old); |
560 | } | 560 | } |
561 | 561 | ||
562 | 562 | ||
563 | void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, | 563 | void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, |
564 | struct task_struct *tsk) | 564 | struct task_struct *tsk) |
565 | { | 565 | { |
566 | if(mm->context == NO_CONTEXT) { | 566 | if(mm->context == NO_CONTEXT) { |
567 | spin_lock(&srmmu_context_spinlock); | 567 | spin_lock(&srmmu_context_spinlock); |
568 | alloc_context(old_mm, mm); | 568 | alloc_context(old_mm, mm); |
569 | spin_unlock(&srmmu_context_spinlock); | 569 | spin_unlock(&srmmu_context_spinlock); |
570 | srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); | 570 | srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); |
571 | } | 571 | } |
572 | 572 | ||
573 | if (sparc_cpu_model == sparc_leon) | 573 | if (sparc_cpu_model == sparc_leon) |
574 | leon_switch_mm(); | 574 | leon_switch_mm(); |
575 | 575 | ||
576 | if (is_hypersparc) | 576 | if (is_hypersparc) |
577 | hyper_flush_whole_icache(); | 577 | hyper_flush_whole_icache(); |
578 | 578 | ||
579 | srmmu_set_context(mm->context); | 579 | srmmu_set_context(mm->context); |
580 | } | 580 | } |
581 | 581 | ||
582 | /* Low level IO area allocation on the SRMMU. */ | 582 | /* Low level IO area allocation on the SRMMU. */ |
583 | static inline void srmmu_mapioaddr(unsigned long physaddr, | 583 | static inline void srmmu_mapioaddr(unsigned long physaddr, |
584 | unsigned long virt_addr, int bus_type) | 584 | unsigned long virt_addr, int bus_type) |
585 | { | 585 | { |
586 | pgd_t *pgdp; | 586 | pgd_t *pgdp; |
587 | pmd_t *pmdp; | 587 | pmd_t *pmdp; |
588 | pte_t *ptep; | 588 | pte_t *ptep; |
589 | unsigned long tmp; | 589 | unsigned long tmp; |
590 | 590 | ||
591 | physaddr &= PAGE_MASK; | 591 | physaddr &= PAGE_MASK; |
592 | pgdp = pgd_offset_k(virt_addr); | 592 | pgdp = pgd_offset_k(virt_addr); |
593 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); | 593 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); |
594 | ptep = srmmu_pte_offset(pmdp, virt_addr); | 594 | ptep = srmmu_pte_offset(pmdp, virt_addr); |
595 | tmp = (physaddr >> 4) | SRMMU_ET_PTE; | 595 | tmp = (physaddr >> 4) | SRMMU_ET_PTE; |
596 | 596 | ||
597 | /* | 597 | /* |
598 | * I need to test whether this is consistent over all | 598 | * I need to test whether this is consistent over all |
599 | * sun4m's. The bus_type represents the upper 4 bits of | 599 | * sun4m's. The bus_type represents the upper 4 bits of |
600 | * 36-bit physical address on the I/O space lines... | 600 | * 36-bit physical address on the I/O space lines... |
601 | */ | 601 | */ |
602 | tmp |= (bus_type << 28); | 602 | tmp |= (bus_type << 28); |
603 | tmp |= SRMMU_PRIV; | 603 | tmp |= SRMMU_PRIV; |
604 | __flush_page_to_ram(virt_addr); | 604 | __flush_page_to_ram(virt_addr); |
605 | srmmu_set_pte(ptep, __pte(tmp)); | 605 | srmmu_set_pte(ptep, __pte(tmp)); |
606 | } | 606 | } |
607 | 607 | ||
608 | static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, | 608 | static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, |
609 | unsigned long xva, unsigned int len) | 609 | unsigned long xva, unsigned int len) |
610 | { | 610 | { |
611 | while (len != 0) { | 611 | while (len != 0) { |
612 | len -= PAGE_SIZE; | 612 | len -= PAGE_SIZE; |
613 | srmmu_mapioaddr(xpa, xva, bus); | 613 | srmmu_mapioaddr(xpa, xva, bus); |
614 | xva += PAGE_SIZE; | 614 | xva += PAGE_SIZE; |
615 | xpa += PAGE_SIZE; | 615 | xpa += PAGE_SIZE; |
616 | } | 616 | } |
617 | flush_tlb_all(); | 617 | flush_tlb_all(); |
618 | } | 618 | } |
619 | 619 | ||
620 | static inline void srmmu_unmapioaddr(unsigned long virt_addr) | 620 | static inline void srmmu_unmapioaddr(unsigned long virt_addr) |
621 | { | 621 | { |
622 | pgd_t *pgdp; | 622 | pgd_t *pgdp; |
623 | pmd_t *pmdp; | 623 | pmd_t *pmdp; |
624 | pte_t *ptep; | 624 | pte_t *ptep; |
625 | 625 | ||
626 | pgdp = pgd_offset_k(virt_addr); | 626 | pgdp = pgd_offset_k(virt_addr); |
627 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); | 627 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); |
628 | ptep = srmmu_pte_offset(pmdp, virt_addr); | 628 | ptep = srmmu_pte_offset(pmdp, virt_addr); |
629 | 629 | ||
630 | /* No need to flush uncacheable page. */ | 630 | /* No need to flush uncacheable page. */ |
631 | srmmu_pte_clear(ptep); | 631 | srmmu_pte_clear(ptep); |
632 | } | 632 | } |
633 | 633 | ||
634 | static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) | 634 | static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) |
635 | { | 635 | { |
636 | while (len != 0) { | 636 | while (len != 0) { |
637 | len -= PAGE_SIZE; | 637 | len -= PAGE_SIZE; |
638 | srmmu_unmapioaddr(virt_addr); | 638 | srmmu_unmapioaddr(virt_addr); |
639 | virt_addr += PAGE_SIZE; | 639 | virt_addr += PAGE_SIZE; |
640 | } | 640 | } |
641 | flush_tlb_all(); | 641 | flush_tlb_all(); |
642 | } | 642 | } |
643 | 643 | ||
644 | /* | 644 | /* |
645 | * On the SRMMU we do not have the problems with limited tlb entries | 645 | * On the SRMMU we do not have the problems with limited tlb entries |
646 | * for mapping kernel pages, so we just take things from the free page | 646 | * for mapping kernel pages, so we just take things from the free page |
647 | * pool. As a side effect we are putting a little too much pressure | 647 | * pool. As a side effect we are putting a little too much pressure |
648 | * on the gfp() subsystem. This setup also makes the logic of the | 648 | * on the gfp() subsystem. This setup also makes the logic of the |
649 | * iommu mapping code a lot easier as we can transparently handle | 649 | * iommu mapping code a lot easier as we can transparently handle |
650 | * mappings on the kernel stack without any special code. | 650 | * mappings on the kernel stack without any special code. |
651 | */ | 651 | */ |
652 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) | 652 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) |
653 | { | 653 | { |
654 | struct thread_info *ret; | 654 | struct thread_info *ret; |
655 | 655 | ||
656 | ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, | 656 | ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, |
657 | THREAD_INFO_ORDER); | 657 | THREAD_INFO_ORDER); |
658 | #ifdef CONFIG_DEBUG_STACK_USAGE | 658 | #ifdef CONFIG_DEBUG_STACK_USAGE |
659 | if (ret) | 659 | if (ret) |
660 | memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); | 660 | memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); |
661 | #endif /* DEBUG_STACK_USAGE */ | 661 | #endif /* DEBUG_STACK_USAGE */ |
662 | 662 | ||
663 | return ret; | 663 | return ret; |
664 | } | 664 | } |
665 | 665 | ||
666 | void free_thread_info(struct thread_info *ti) | 666 | void free_thread_info(struct thread_info *ti) |
667 | { | 667 | { |
668 | free_pages((unsigned long)ti, THREAD_INFO_ORDER); | 668 | free_pages((unsigned long)ti, THREAD_INFO_ORDER); |
669 | } | 669 | } |
670 | 670 | ||
671 | /* tsunami.S */ | 671 | /* tsunami.S */ |
672 | extern void tsunami_flush_cache_all(void); | 672 | extern void tsunami_flush_cache_all(void); |
673 | extern void tsunami_flush_cache_mm(struct mm_struct *mm); | 673 | extern void tsunami_flush_cache_mm(struct mm_struct *mm); |
674 | extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 674 | extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
675 | extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | 675 | extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
676 | extern void tsunami_flush_page_to_ram(unsigned long page); | 676 | extern void tsunami_flush_page_to_ram(unsigned long page); |
677 | extern void tsunami_flush_page_for_dma(unsigned long page); | 677 | extern void tsunami_flush_page_for_dma(unsigned long page); |
678 | extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); | 678 | extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); |
679 | extern void tsunami_flush_tlb_all(void); | 679 | extern void tsunami_flush_tlb_all(void); |
680 | extern void tsunami_flush_tlb_mm(struct mm_struct *mm); | 680 | extern void tsunami_flush_tlb_mm(struct mm_struct *mm); |
681 | extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 681 | extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
682 | extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | 682 | extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); |
683 | extern void tsunami_setup_blockops(void); | 683 | extern void tsunami_setup_blockops(void); |
684 | 684 | ||
685 | /* | 685 | /* |
686 | * Workaround, until we find what's going on with Swift. When low on memory, | 686 | * Workaround, until we find what's going on with Swift. When low on memory, |
687 | * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find | 687 | * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find |
688 | * out it is already in page tables/ fault again on the same instruction. | 688 | * out it is already in page tables/ fault again on the same instruction. |
689 | * I really don't understand it, have checked it and contexts | 689 | * I really don't understand it, have checked it and contexts |
690 | * are right, flush_tlb_all is done as well, and it faults again... | 690 | * are right, flush_tlb_all is done as well, and it faults again... |
691 | * Strange. -jj | 691 | * Strange. -jj |
692 | * | 692 | * |
693 | * The following code is a deadwood that may be necessary when | 693 | * The following code is a deadwood that may be necessary when |
694 | * we start to make precise page flushes again. --zaitcev | 694 | * we start to make precise page flushes again. --zaitcev |
695 | */ | 695 | */ |
696 | static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) | 696 | static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) |
697 | { | 697 | { |
698 | #if 0 | 698 | #if 0 |
699 | static unsigned long last; | 699 | static unsigned long last; |
700 | unsigned int val; | 700 | unsigned int val; |
701 | /* unsigned int n; */ | 701 | /* unsigned int n; */ |
702 | 702 | ||
703 | if (address == last) { | 703 | if (address == last) { |
704 | val = srmmu_hwprobe(address); | 704 | val = srmmu_hwprobe(address); |
705 | if (val != 0 && pte_val(*ptep) != val) { | 705 | if (val != 0 && pte_val(*ptep) != val) { |
706 | printk("swift_update_mmu_cache: " | 706 | printk("swift_update_mmu_cache: " |
707 | "addr %lx put %08x probed %08x from %pf\n", | 707 | "addr %lx put %08x probed %08x from %pf\n", |
708 | address, pte_val(*ptep), val, | 708 | address, pte_val(*ptep), val, |
709 | __builtin_return_address(0)); | 709 | __builtin_return_address(0)); |
710 | srmmu_flush_whole_tlb(); | 710 | srmmu_flush_whole_tlb(); |
711 | } | 711 | } |
712 | } | 712 | } |
713 | last = address; | 713 | last = address; |
714 | #endif | 714 | #endif |
715 | } | 715 | } |
716 | 716 | ||
717 | /* swift.S */ | 717 | /* swift.S */ |
718 | extern void swift_flush_cache_all(void); | 718 | extern void swift_flush_cache_all(void); |
719 | extern void swift_flush_cache_mm(struct mm_struct *mm); | 719 | extern void swift_flush_cache_mm(struct mm_struct *mm); |
720 | extern void swift_flush_cache_range(struct vm_area_struct *vma, | 720 | extern void swift_flush_cache_range(struct vm_area_struct *vma, |
721 | unsigned long start, unsigned long end); | 721 | unsigned long start, unsigned long end); |
722 | extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | 722 | extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
723 | extern void swift_flush_page_to_ram(unsigned long page); | 723 | extern void swift_flush_page_to_ram(unsigned long page); |
724 | extern void swift_flush_page_for_dma(unsigned long page); | 724 | extern void swift_flush_page_for_dma(unsigned long page); |
725 | extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); | 725 | extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); |
726 | extern void swift_flush_tlb_all(void); | 726 | extern void swift_flush_tlb_all(void); |
727 | extern void swift_flush_tlb_mm(struct mm_struct *mm); | 727 | extern void swift_flush_tlb_mm(struct mm_struct *mm); |
728 | extern void swift_flush_tlb_range(struct vm_area_struct *vma, | 728 | extern void swift_flush_tlb_range(struct vm_area_struct *vma, |
729 | unsigned long start, unsigned long end); | 729 | unsigned long start, unsigned long end); |
730 | extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | 730 | extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); |
731 | 731 | ||
732 | #if 0 /* P3: deadwood to debug precise flushes on Swift. */ | 732 | #if 0 /* P3: deadwood to debug precise flushes on Swift. */ |
733 | void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 733 | void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
734 | { | 734 | { |
735 | int cctx, ctx1; | 735 | int cctx, ctx1; |
736 | 736 | ||
737 | page &= PAGE_MASK; | 737 | page &= PAGE_MASK; |
738 | if ((ctx1 = vma->vm_mm->context) != -1) { | 738 | if ((ctx1 = vma->vm_mm->context) != -1) { |
739 | cctx = srmmu_get_context(); | 739 | cctx = srmmu_get_context(); |
740 | /* Is context # ever different from current context? P3 */ | 740 | /* Is context # ever different from current context? P3 */ |
741 | if (cctx != ctx1) { | 741 | if (cctx != ctx1) { |
742 | printk("flush ctx %02x curr %02x\n", ctx1, cctx); | 742 | printk("flush ctx %02x curr %02x\n", ctx1, cctx); |
743 | srmmu_set_context(ctx1); | 743 | srmmu_set_context(ctx1); |
744 | swift_flush_page(page); | 744 | swift_flush_page(page); |
745 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 745 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
746 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); | 746 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); |
747 | srmmu_set_context(cctx); | 747 | srmmu_set_context(cctx); |
748 | } else { | 748 | } else { |
749 | /* Rm. prot. bits from virt. c. */ | 749 | /* Rm. prot. bits from virt. c. */ |
750 | /* swift_flush_cache_all(); */ | 750 | /* swift_flush_cache_all(); */ |
751 | /* swift_flush_cache_page(vma, page); */ | 751 | /* swift_flush_cache_page(vma, page); */ |
752 | swift_flush_page(page); | 752 | swift_flush_page(page); |
753 | 753 | ||
754 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | 754 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
755 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); | 755 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); |
756 | /* same as above: srmmu_flush_tlb_page() */ | 756 | /* same as above: srmmu_flush_tlb_page() */ |
757 | } | 757 | } |
758 | } | 758 | } |
759 | } | 759 | } |
760 | #endif | 760 | #endif |
761 | 761 | ||
762 | /* | 762 | /* |
763 | * The following are all MBUS based SRMMU modules, and therefore could | 763 | * The following are all MBUS based SRMMU modules, and therefore could |
764 | * be found in a multiprocessor configuration. On the whole, these | 764 | * be found in a multiprocessor configuration. On the whole, these |
765 | * chips seems to be much more touchy about DVMA and page tables | 765 | * chips seems to be much more touchy about DVMA and page tables |
766 | * with respect to cache coherency. | 766 | * with respect to cache coherency. |
767 | */ | 767 | */ |
768 | 768 | ||
769 | /* Cypress flushes. */ | 769 | /* Cypress flushes. */ |
770 | static void cypress_flush_cache_all(void) | 770 | static void cypress_flush_cache_all(void) |
771 | { | 771 | { |
772 | volatile unsigned long cypress_sucks; | 772 | volatile unsigned long cypress_sucks; |
773 | unsigned long faddr, tagval; | 773 | unsigned long faddr, tagval; |
774 | 774 | ||
775 | flush_user_windows(); | 775 | flush_user_windows(); |
776 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { | 776 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { |
777 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : | 777 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : |
778 | "=r" (tagval) : | 778 | "=r" (tagval) : |
779 | "r" (faddr), "r" (0x40000), | 779 | "r" (faddr), "r" (0x40000), |
780 | "i" (ASI_M_DATAC_TAG)); | 780 | "i" (ASI_M_DATAC_TAG)); |
781 | 781 | ||
782 | /* If modified and valid, kick it. */ | 782 | /* If modified and valid, kick it. */ |
783 | if((tagval & 0x60) == 0x60) | 783 | if((tagval & 0x60) == 0x60) |
784 | cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); | 784 | cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); |
785 | } | 785 | } |
786 | } | 786 | } |
787 | 787 | ||
788 | static void cypress_flush_cache_mm(struct mm_struct *mm) | 788 | static void cypress_flush_cache_mm(struct mm_struct *mm) |
789 | { | 789 | { |
790 | register unsigned long a, b, c, d, e, f, g; | 790 | register unsigned long a, b, c, d, e, f, g; |
791 | unsigned long flags, faddr; | 791 | unsigned long flags, faddr; |
792 | int octx; | 792 | int octx; |
793 | 793 | ||
794 | FLUSH_BEGIN(mm) | 794 | FLUSH_BEGIN(mm) |
795 | flush_user_windows(); | 795 | flush_user_windows(); |
796 | local_irq_save(flags); | 796 | local_irq_save(flags); |
797 | octx = srmmu_get_context(); | 797 | octx = srmmu_get_context(); |
798 | srmmu_set_context(mm->context); | 798 | srmmu_set_context(mm->context); |
799 | a = 0x20; b = 0x40; c = 0x60; | 799 | a = 0x20; b = 0x40; c = 0x60; |
800 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | 800 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
801 | 801 | ||
802 | faddr = (0x10000 - 0x100); | 802 | faddr = (0x10000 - 0x100); |
803 | goto inside; | 803 | goto inside; |
804 | do { | 804 | do { |
805 | faddr -= 0x100; | 805 | faddr -= 0x100; |
806 | inside: | 806 | inside: |
807 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | 807 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
808 | "sta %%g0, [%0 + %2] %1\n\t" | 808 | "sta %%g0, [%0 + %2] %1\n\t" |
809 | "sta %%g0, [%0 + %3] %1\n\t" | 809 | "sta %%g0, [%0 + %3] %1\n\t" |
810 | "sta %%g0, [%0 + %4] %1\n\t" | 810 | "sta %%g0, [%0 + %4] %1\n\t" |
811 | "sta %%g0, [%0 + %5] %1\n\t" | 811 | "sta %%g0, [%0 + %5] %1\n\t" |
812 | "sta %%g0, [%0 + %6] %1\n\t" | 812 | "sta %%g0, [%0 + %6] %1\n\t" |
813 | "sta %%g0, [%0 + %7] %1\n\t" | 813 | "sta %%g0, [%0 + %7] %1\n\t" |
814 | "sta %%g0, [%0 + %8] %1\n\t" : : | 814 | "sta %%g0, [%0 + %8] %1\n\t" : : |
815 | "r" (faddr), "i" (ASI_M_FLUSH_CTX), | 815 | "r" (faddr), "i" (ASI_M_FLUSH_CTX), |
816 | "r" (a), "r" (b), "r" (c), "r" (d), | 816 | "r" (a), "r" (b), "r" (c), "r" (d), |
817 | "r" (e), "r" (f), "r" (g)); | 817 | "r" (e), "r" (f), "r" (g)); |
818 | } while(faddr); | 818 | } while(faddr); |
819 | srmmu_set_context(octx); | 819 | srmmu_set_context(octx); |
820 | local_irq_restore(flags); | 820 | local_irq_restore(flags); |
821 | FLUSH_END | 821 | FLUSH_END |
822 | } | 822 | } |
823 | 823 | ||
824 | static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 824 | static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
825 | { | 825 | { |
826 | struct mm_struct *mm = vma->vm_mm; | 826 | struct mm_struct *mm = vma->vm_mm; |
827 | register unsigned long a, b, c, d, e, f, g; | 827 | register unsigned long a, b, c, d, e, f, g; |
828 | unsigned long flags, faddr; | 828 | unsigned long flags, faddr; |
829 | int octx; | 829 | int octx; |
830 | 830 | ||
831 | FLUSH_BEGIN(mm) | 831 | FLUSH_BEGIN(mm) |
832 | flush_user_windows(); | 832 | flush_user_windows(); |
833 | local_irq_save(flags); | 833 | local_irq_save(flags); |
834 | octx = srmmu_get_context(); | 834 | octx = srmmu_get_context(); |
835 | srmmu_set_context(mm->context); | 835 | srmmu_set_context(mm->context); |
836 | a = 0x20; b = 0x40; c = 0x60; | 836 | a = 0x20; b = 0x40; c = 0x60; |
837 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | 837 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
838 | 838 | ||
839 | start &= SRMMU_REAL_PMD_MASK; | 839 | start &= SRMMU_REAL_PMD_MASK; |
840 | while(start < end) { | 840 | while(start < end) { |
841 | faddr = (start + (0x10000 - 0x100)); | 841 | faddr = (start + (0x10000 - 0x100)); |
842 | goto inside; | 842 | goto inside; |
843 | do { | 843 | do { |
844 | faddr -= 0x100; | 844 | faddr -= 0x100; |
845 | inside: | 845 | inside: |
846 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | 846 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
847 | "sta %%g0, [%0 + %2] %1\n\t" | 847 | "sta %%g0, [%0 + %2] %1\n\t" |
848 | "sta %%g0, [%0 + %3] %1\n\t" | 848 | "sta %%g0, [%0 + %3] %1\n\t" |
849 | "sta %%g0, [%0 + %4] %1\n\t" | 849 | "sta %%g0, [%0 + %4] %1\n\t" |
850 | "sta %%g0, [%0 + %5] %1\n\t" | 850 | "sta %%g0, [%0 + %5] %1\n\t" |
851 | "sta %%g0, [%0 + %6] %1\n\t" | 851 | "sta %%g0, [%0 + %6] %1\n\t" |
852 | "sta %%g0, [%0 + %7] %1\n\t" | 852 | "sta %%g0, [%0 + %7] %1\n\t" |
853 | "sta %%g0, [%0 + %8] %1\n\t" : : | 853 | "sta %%g0, [%0 + %8] %1\n\t" : : |
854 | "r" (faddr), | 854 | "r" (faddr), |
855 | "i" (ASI_M_FLUSH_SEG), | 855 | "i" (ASI_M_FLUSH_SEG), |
856 | "r" (a), "r" (b), "r" (c), "r" (d), | 856 | "r" (a), "r" (b), "r" (c), "r" (d), |
857 | "r" (e), "r" (f), "r" (g)); | 857 | "r" (e), "r" (f), "r" (g)); |
858 | } while (faddr != start); | 858 | } while (faddr != start); |
859 | start += SRMMU_REAL_PMD_SIZE; | 859 | start += SRMMU_REAL_PMD_SIZE; |
860 | } | 860 | } |
861 | srmmu_set_context(octx); | 861 | srmmu_set_context(octx); |
862 | local_irq_restore(flags); | 862 | local_irq_restore(flags); |
863 | FLUSH_END | 863 | FLUSH_END |
864 | } | 864 | } |
865 | 865 | ||
866 | static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | 866 | static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
867 | { | 867 | { |
868 | register unsigned long a, b, c, d, e, f, g; | 868 | register unsigned long a, b, c, d, e, f, g; |
869 | struct mm_struct *mm = vma->vm_mm; | 869 | struct mm_struct *mm = vma->vm_mm; |
870 | unsigned long flags, line; | 870 | unsigned long flags, line; |
871 | int octx; | 871 | int octx; |
872 | 872 | ||
873 | FLUSH_BEGIN(mm) | 873 | FLUSH_BEGIN(mm) |
874 | flush_user_windows(); | 874 | flush_user_windows(); |
875 | local_irq_save(flags); | 875 | local_irq_save(flags); |
876 | octx = srmmu_get_context(); | 876 | octx = srmmu_get_context(); |
877 | srmmu_set_context(mm->context); | 877 | srmmu_set_context(mm->context); |
878 | a = 0x20; b = 0x40; c = 0x60; | 878 | a = 0x20; b = 0x40; c = 0x60; |
879 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | 879 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
880 | 880 | ||
881 | page &= PAGE_MASK; | 881 | page &= PAGE_MASK; |
882 | line = (page + PAGE_SIZE) - 0x100; | 882 | line = (page + PAGE_SIZE) - 0x100; |
883 | goto inside; | 883 | goto inside; |
884 | do { | 884 | do { |
885 | line -= 0x100; | 885 | line -= 0x100; |
886 | inside: | 886 | inside: |
887 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | 887 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
888 | "sta %%g0, [%0 + %2] %1\n\t" | 888 | "sta %%g0, [%0 + %2] %1\n\t" |
889 | "sta %%g0, [%0 + %3] %1\n\t" | 889 | "sta %%g0, [%0 + %3] %1\n\t" |
890 | "sta %%g0, [%0 + %4] %1\n\t" | 890 | "sta %%g0, [%0 + %4] %1\n\t" |
891 | "sta %%g0, [%0 + %5] %1\n\t" | 891 | "sta %%g0, [%0 + %5] %1\n\t" |
892 | "sta %%g0, [%0 + %6] %1\n\t" | 892 | "sta %%g0, [%0 + %6] %1\n\t" |
893 | "sta %%g0, [%0 + %7] %1\n\t" | 893 | "sta %%g0, [%0 + %7] %1\n\t" |
894 | "sta %%g0, [%0 + %8] %1\n\t" : : | 894 | "sta %%g0, [%0 + %8] %1\n\t" : : |
895 | "r" (line), | 895 | "r" (line), |
896 | "i" (ASI_M_FLUSH_PAGE), | 896 | "i" (ASI_M_FLUSH_PAGE), |
897 | "r" (a), "r" (b), "r" (c), "r" (d), | 897 | "r" (a), "r" (b), "r" (c), "r" (d), |
898 | "r" (e), "r" (f), "r" (g)); | 898 | "r" (e), "r" (f), "r" (g)); |
899 | } while(line != page); | 899 | } while(line != page); |
900 | srmmu_set_context(octx); | 900 | srmmu_set_context(octx); |
901 | local_irq_restore(flags); | 901 | local_irq_restore(flags); |
902 | FLUSH_END | 902 | FLUSH_END |
903 | } | 903 | } |
904 | 904 | ||
905 | /* Cypress is copy-back, at least that is how we configure it. */ | 905 | /* Cypress is copy-back, at least that is how we configure it. */ |
906 | static void cypress_flush_page_to_ram(unsigned long page) | 906 | static void cypress_flush_page_to_ram(unsigned long page) |
907 | { | 907 | { |
908 | register unsigned long a, b, c, d, e, f, g; | 908 | register unsigned long a, b, c, d, e, f, g; |
909 | unsigned long line; | 909 | unsigned long line; |
910 | 910 | ||
911 | a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | 911 | a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
912 | page &= PAGE_MASK; | 912 | page &= PAGE_MASK; |
913 | line = (page + PAGE_SIZE) - 0x100; | 913 | line = (page + PAGE_SIZE) - 0x100; |
914 | goto inside; | 914 | goto inside; |
915 | do { | 915 | do { |
916 | line -= 0x100; | 916 | line -= 0x100; |
917 | inside: | 917 | inside: |
918 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | 918 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
919 | "sta %%g0, [%0 + %2] %1\n\t" | 919 | "sta %%g0, [%0 + %2] %1\n\t" |
920 | "sta %%g0, [%0 + %3] %1\n\t" | 920 | "sta %%g0, [%0 + %3] %1\n\t" |
921 | "sta %%g0, [%0 + %4] %1\n\t" | 921 | "sta %%g0, [%0 + %4] %1\n\t" |
922 | "sta %%g0, [%0 + %5] %1\n\t" | 922 | "sta %%g0, [%0 + %5] %1\n\t" |
923 | "sta %%g0, [%0 + %6] %1\n\t" | 923 | "sta %%g0, [%0 + %6] %1\n\t" |
924 | "sta %%g0, [%0 + %7] %1\n\t" | 924 | "sta %%g0, [%0 + %7] %1\n\t" |
925 | "sta %%g0, [%0 + %8] %1\n\t" : : | 925 | "sta %%g0, [%0 + %8] %1\n\t" : : |
926 | "r" (line), | 926 | "r" (line), |
927 | "i" (ASI_M_FLUSH_PAGE), | 927 | "i" (ASI_M_FLUSH_PAGE), |
928 | "r" (a), "r" (b), "r" (c), "r" (d), | 928 | "r" (a), "r" (b), "r" (c), "r" (d), |
929 | "r" (e), "r" (f), "r" (g)); | 929 | "r" (e), "r" (f), "r" (g)); |
930 | } while(line != page); | 930 | } while(line != page); |
931 | } | 931 | } |
932 | 932 | ||
933 | /* Cypress is also IO cache coherent. */ | 933 | /* Cypress is also IO cache coherent. */ |
934 | static void cypress_flush_page_for_dma(unsigned long page) | 934 | static void cypress_flush_page_for_dma(unsigned long page) |
935 | { | 935 | { |
936 | } | 936 | } |
937 | 937 | ||
938 | /* Cypress has unified L2 VIPT, from which both instructions and data | 938 | /* Cypress has unified L2 VIPT, from which both instructions and data |
939 | * are stored. It does not have an onboard icache of any sort, therefore | 939 | * are stored. It does not have an onboard icache of any sort, therefore |
940 | * no flush is necessary. | 940 | * no flush is necessary. |
941 | */ | 941 | */ |
942 | static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | 942 | static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) |
943 | { | 943 | { |
944 | } | 944 | } |
945 | 945 | ||
946 | static void cypress_flush_tlb_all(void) | 946 | static void cypress_flush_tlb_all(void) |
947 | { | 947 | { |
948 | srmmu_flush_whole_tlb(); | 948 | srmmu_flush_whole_tlb(); |
949 | } | 949 | } |
950 | 950 | ||
951 | static void cypress_flush_tlb_mm(struct mm_struct *mm) | 951 | static void cypress_flush_tlb_mm(struct mm_struct *mm) |
952 | { | 952 | { |
953 | FLUSH_BEGIN(mm) | 953 | FLUSH_BEGIN(mm) |
954 | __asm__ __volatile__( | 954 | __asm__ __volatile__( |
955 | "lda [%0] %3, %%g5\n\t" | 955 | "lda [%0] %3, %%g5\n\t" |
956 | "sta %2, [%0] %3\n\t" | 956 | "sta %2, [%0] %3\n\t" |
957 | "sta %%g0, [%1] %4\n\t" | 957 | "sta %%g0, [%1] %4\n\t" |
958 | "sta %%g5, [%0] %3\n" | 958 | "sta %%g5, [%0] %3\n" |
959 | : /* no outputs */ | 959 | : /* no outputs */ |
960 | : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), | 960 | : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), |
961 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) | 961 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) |
962 | : "g5"); | 962 | : "g5"); |
963 | FLUSH_END | 963 | FLUSH_END |
964 | } | 964 | } |
965 | 965 | ||
966 | static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 966 | static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
967 | { | 967 | { |
968 | struct mm_struct *mm = vma->vm_mm; | 968 | struct mm_struct *mm = vma->vm_mm; |
969 | unsigned long size; | 969 | unsigned long size; |
970 | 970 | ||
971 | FLUSH_BEGIN(mm) | 971 | FLUSH_BEGIN(mm) |
972 | start &= SRMMU_PGDIR_MASK; | 972 | start &= SRMMU_PGDIR_MASK; |
973 | size = SRMMU_PGDIR_ALIGN(end) - start; | 973 | size = SRMMU_PGDIR_ALIGN(end) - start; |
974 | __asm__ __volatile__( | 974 | __asm__ __volatile__( |
975 | "lda [%0] %5, %%g5\n\t" | 975 | "lda [%0] %5, %%g5\n\t" |
976 | "sta %1, [%0] %5\n" | 976 | "sta %1, [%0] %5\n" |
977 | "1:\n\t" | 977 | "1:\n\t" |
978 | "subcc %3, %4, %3\n\t" | 978 | "subcc %3, %4, %3\n\t" |
979 | "bne 1b\n\t" | 979 | "bne 1b\n\t" |
980 | " sta %%g0, [%2 + %3] %6\n\t" | 980 | " sta %%g0, [%2 + %3] %6\n\t" |
981 | "sta %%g5, [%0] %5\n" | 981 | "sta %%g5, [%0] %5\n" |
982 | : /* no outputs */ | 982 | : /* no outputs */ |
983 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), | 983 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), |
984 | "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), | 984 | "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), |
985 | "i" (ASI_M_FLUSH_PROBE) | 985 | "i" (ASI_M_FLUSH_PROBE) |
986 | : "g5", "cc"); | 986 | : "g5", "cc"); |
987 | FLUSH_END | 987 | FLUSH_END |
988 | } | 988 | } |
989 | 989 | ||
990 | static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 990 | static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
991 | { | 991 | { |
992 | struct mm_struct *mm = vma->vm_mm; | 992 | struct mm_struct *mm = vma->vm_mm; |
993 | 993 | ||
994 | FLUSH_BEGIN(mm) | 994 | FLUSH_BEGIN(mm) |
995 | __asm__ __volatile__( | 995 | __asm__ __volatile__( |
996 | "lda [%0] %3, %%g5\n\t" | 996 | "lda [%0] %3, %%g5\n\t" |
997 | "sta %1, [%0] %3\n\t" | 997 | "sta %1, [%0] %3\n\t" |
998 | "sta %%g0, [%2] %4\n\t" | 998 | "sta %%g0, [%2] %4\n\t" |
999 | "sta %%g5, [%0] %3\n" | 999 | "sta %%g5, [%0] %3\n" |
1000 | : /* no outputs */ | 1000 | : /* no outputs */ |
1001 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), | 1001 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), |
1002 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) | 1002 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) |
1003 | : "g5"); | 1003 | : "g5"); |
1004 | FLUSH_END | 1004 | FLUSH_END |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | /* viking.S */ | 1007 | /* viking.S */ |
1008 | extern void viking_flush_cache_all(void); | 1008 | extern void viking_flush_cache_all(void); |
1009 | extern void viking_flush_cache_mm(struct mm_struct *mm); | 1009 | extern void viking_flush_cache_mm(struct mm_struct *mm); |
1010 | extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 1010 | extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
1011 | unsigned long end); | 1011 | unsigned long end); |
1012 | extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | 1012 | extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
1013 | extern void viking_flush_page_to_ram(unsigned long page); | 1013 | extern void viking_flush_page_to_ram(unsigned long page); |
1014 | extern void viking_flush_page_for_dma(unsigned long page); | 1014 | extern void viking_flush_page_for_dma(unsigned long page); |
1015 | extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); | 1015 | extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); |
1016 | extern void viking_flush_page(unsigned long page); | 1016 | extern void viking_flush_page(unsigned long page); |
1017 | extern void viking_mxcc_flush_page(unsigned long page); | 1017 | extern void viking_mxcc_flush_page(unsigned long page); |
1018 | extern void viking_flush_tlb_all(void); | 1018 | extern void viking_flush_tlb_all(void); |
1019 | extern void viking_flush_tlb_mm(struct mm_struct *mm); | 1019 | extern void viking_flush_tlb_mm(struct mm_struct *mm); |
1020 | extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 1020 | extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
1021 | unsigned long end); | 1021 | unsigned long end); |
1022 | extern void viking_flush_tlb_page(struct vm_area_struct *vma, | 1022 | extern void viking_flush_tlb_page(struct vm_area_struct *vma, |
1023 | unsigned long page); | 1023 | unsigned long page); |
1024 | extern void sun4dsmp_flush_tlb_all(void); | 1024 | extern void sun4dsmp_flush_tlb_all(void); |
1025 | extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); | 1025 | extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); |
1026 | extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 1026 | extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
1027 | unsigned long end); | 1027 | unsigned long end); |
1028 | extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, | 1028 | extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, |
1029 | unsigned long page); | 1029 | unsigned long page); |
1030 | 1030 | ||
1031 | /* hypersparc.S */ | 1031 | /* hypersparc.S */ |
1032 | extern void hypersparc_flush_cache_all(void); | 1032 | extern void hypersparc_flush_cache_all(void); |
1033 | extern void hypersparc_flush_cache_mm(struct mm_struct *mm); | 1033 | extern void hypersparc_flush_cache_mm(struct mm_struct *mm); |
1034 | extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 1034 | extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
1035 | extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | 1035 | extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
1036 | extern void hypersparc_flush_page_to_ram(unsigned long page); | 1036 | extern void hypersparc_flush_page_to_ram(unsigned long page); |
1037 | extern void hypersparc_flush_page_for_dma(unsigned long page); | 1037 | extern void hypersparc_flush_page_for_dma(unsigned long page); |
1038 | extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); | 1038 | extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); |
1039 | extern void hypersparc_flush_tlb_all(void); | 1039 | extern void hypersparc_flush_tlb_all(void); |
1040 | extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); | 1040 | extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); |
1041 | extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 1041 | extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
1042 | extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | 1042 | extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); |
1043 | extern void hypersparc_setup_blockops(void); | 1043 | extern void hypersparc_setup_blockops(void); |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * NOTE: All of this startup code assumes the low 16mb (approx.) of | 1046 | * NOTE: All of this startup code assumes the low 16mb (approx.) of |
1047 | * kernel mappings are done with one single contiguous chunk of | 1047 | * kernel mappings are done with one single contiguous chunk of |
1048 | * ram. On small ram machines (classics mainly) we only get | 1048 | * ram. On small ram machines (classics mainly) we only get |
1049 | * around 8mb mapped for us. | 1049 | * around 8mb mapped for us. |
1050 | */ | 1050 | */ |
1051 | 1051 | ||
1052 | static void __init early_pgtable_allocfail(char *type) | 1052 | static void __init early_pgtable_allocfail(char *type) |
1053 | { | 1053 | { |
1054 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); | 1054 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); |
1055 | prom_halt(); | 1055 | prom_halt(); |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, | 1058 | static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, |
1059 | unsigned long end) | 1059 | unsigned long end) |
1060 | { | 1060 | { |
1061 | pgd_t *pgdp; | 1061 | pgd_t *pgdp; |
1062 | pmd_t *pmdp; | 1062 | pmd_t *pmdp; |
1063 | pte_t *ptep; | 1063 | pte_t *ptep; |
1064 | 1064 | ||
1065 | while(start < end) { | 1065 | while(start < end) { |
1066 | pgdp = pgd_offset_k(start); | 1066 | pgdp = pgd_offset_k(start); |
1067 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { | 1067 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { |
1068 | pmdp = (pmd_t *) __srmmu_get_nocache( | 1068 | pmdp = (pmd_t *) __srmmu_get_nocache( |
1069 | SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 1069 | SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
1070 | if (pmdp == NULL) | 1070 | if (pmdp == NULL) |
1071 | early_pgtable_allocfail("pmd"); | 1071 | early_pgtable_allocfail("pmd"); |
1072 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 1072 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
1073 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); | 1073 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); |
1074 | } | 1074 | } |
1075 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); | 1075 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); |
1076 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 1076 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
1077 | ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | 1077 | ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
1078 | if (ptep == NULL) | 1078 | if (ptep == NULL) |
1079 | early_pgtable_allocfail("pte"); | 1079 | early_pgtable_allocfail("pte"); |
1080 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | 1080 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
1081 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); | 1081 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); |
1082 | } | 1082 | } |
1083 | if (start > (0xffffffffUL - PMD_SIZE)) | 1083 | if (start > (0xffffffffUL - PMD_SIZE)) |
1084 | break; | 1084 | break; |
1085 | start = (start + PMD_SIZE) & PMD_MASK; | 1085 | start = (start + PMD_SIZE) & PMD_MASK; |
1086 | } | 1086 | } |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | static void __init srmmu_allocate_ptable_skeleton(unsigned long start, | 1089 | static void __init srmmu_allocate_ptable_skeleton(unsigned long start, |
1090 | unsigned long end) | 1090 | unsigned long end) |
1091 | { | 1091 | { |
1092 | pgd_t *pgdp; | 1092 | pgd_t *pgdp; |
1093 | pmd_t *pmdp; | 1093 | pmd_t *pmdp; |
1094 | pte_t *ptep; | 1094 | pte_t *ptep; |
1095 | 1095 | ||
1096 | while(start < end) { | 1096 | while(start < end) { |
1097 | pgdp = pgd_offset_k(start); | 1097 | pgdp = pgd_offset_k(start); |
1098 | if(srmmu_pgd_none(*pgdp)) { | 1098 | if(srmmu_pgd_none(*pgdp)) { |
1099 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 1099 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
1100 | if (pmdp == NULL) | 1100 | if (pmdp == NULL) |
1101 | early_pgtable_allocfail("pmd"); | 1101 | early_pgtable_allocfail("pmd"); |
1102 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); | 1102 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); |
1103 | srmmu_pgd_set(pgdp, pmdp); | 1103 | srmmu_pgd_set(pgdp, pmdp); |
1104 | } | 1104 | } |
1105 | pmdp = srmmu_pmd_offset(pgdp, start); | 1105 | pmdp = srmmu_pmd_offset(pgdp, start); |
1106 | if(srmmu_pmd_none(*pmdp)) { | 1106 | if(srmmu_pmd_none(*pmdp)) { |
1107 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, | 1107 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, |
1108 | PTE_SIZE); | 1108 | PTE_SIZE); |
1109 | if (ptep == NULL) | 1109 | if (ptep == NULL) |
1110 | early_pgtable_allocfail("pte"); | 1110 | early_pgtable_allocfail("pte"); |
1111 | memset(ptep, 0, PTE_SIZE); | 1111 | memset(ptep, 0, PTE_SIZE); |
1112 | srmmu_pmd_set(pmdp, ptep); | 1112 | srmmu_pmd_set(pmdp, ptep); |
1113 | } | 1113 | } |
1114 | if (start > (0xffffffffUL - PMD_SIZE)) | 1114 | if (start > (0xffffffffUL - PMD_SIZE)) |
1115 | break; | 1115 | break; |
1116 | start = (start + PMD_SIZE) & PMD_MASK; | 1116 | start = (start + PMD_SIZE) & PMD_MASK; |
1117 | } | 1117 | } |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* | 1120 | /* |
1121 | * This is much cleaner than poking around physical address space | 1121 | * This is much cleaner than poking around physical address space |
1122 | * looking at the prom's page table directly which is what most | 1122 | * looking at the prom's page table directly which is what most |
1123 | * other OS's do. Yuck... this is much better. | 1123 | * other OS's do. Yuck... this is much better. |
1124 | */ | 1124 | */ |
1125 | static void __init srmmu_inherit_prom_mappings(unsigned long start, | 1125 | static void __init srmmu_inherit_prom_mappings(unsigned long start, |
1126 | unsigned long end) | 1126 | unsigned long end) |
1127 | { | 1127 | { |
1128 | pgd_t *pgdp; | 1128 | pgd_t *pgdp; |
1129 | pmd_t *pmdp; | 1129 | pmd_t *pmdp; |
1130 | pte_t *ptep; | 1130 | pte_t *ptep; |
1131 | int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ | 1131 | int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ |
1132 | unsigned long prompte; | 1132 | unsigned long prompte; |
1133 | 1133 | ||
1134 | while(start <= end) { | 1134 | while(start <= end) { |
1135 | if (start == 0) | 1135 | if (start == 0) |
1136 | break; /* probably wrap around */ | 1136 | break; /* probably wrap around */ |
1137 | if(start == 0xfef00000) | 1137 | if(start == 0xfef00000) |
1138 | start = KADB_DEBUGGER_BEGVM; | 1138 | start = KADB_DEBUGGER_BEGVM; |
1139 | if(!(prompte = srmmu_hwprobe(start))) { | 1139 | if(!(prompte = srmmu_hwprobe(start))) { |
1140 | start += PAGE_SIZE; | 1140 | start += PAGE_SIZE; |
1141 | continue; | 1141 | continue; |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | /* A red snapper, see what it really is. */ | 1144 | /* A red snapper, see what it really is. */ |
1145 | what = 0; | 1145 | what = 0; |
1146 | 1146 | ||
1147 | if(!(start & ~(SRMMU_REAL_PMD_MASK))) { | 1147 | if(!(start & ~(SRMMU_REAL_PMD_MASK))) { |
1148 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) | 1148 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) |
1149 | what = 1; | 1149 | what = 1; |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | if(!(start & ~(SRMMU_PGDIR_MASK))) { | 1152 | if(!(start & ~(SRMMU_PGDIR_MASK))) { |
1153 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == | 1153 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == |
1154 | prompte) | 1154 | prompte) |
1155 | what = 2; | 1155 | what = 2; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | pgdp = pgd_offset_k(start); | 1158 | pgdp = pgd_offset_k(start); |
1159 | if(what == 2) { | 1159 | if(what == 2) { |
1160 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); | 1160 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); |
1161 | start += SRMMU_PGDIR_SIZE; | 1161 | start += SRMMU_PGDIR_SIZE; |
1162 | continue; | 1162 | continue; |
1163 | } | 1163 | } |
1164 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { | 1164 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { |
1165 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 1165 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
1166 | if (pmdp == NULL) | 1166 | if (pmdp == NULL) |
1167 | early_pgtable_allocfail("pmd"); | 1167 | early_pgtable_allocfail("pmd"); |
1168 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 1168 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
1169 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); | 1169 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); |
1170 | } | 1170 | } |
1171 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); | 1171 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); |
1172 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 1172 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
1173 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, | 1173 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, |
1174 | PTE_SIZE); | 1174 | PTE_SIZE); |
1175 | if (ptep == NULL) | 1175 | if (ptep == NULL) |
1176 | early_pgtable_allocfail("pte"); | 1176 | early_pgtable_allocfail("pte"); |
1177 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | 1177 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
1178 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); | 1178 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); |
1179 | } | 1179 | } |
1180 | if(what == 1) { | 1180 | if(what == 1) { |
1181 | /* | 1181 | /* |
1182 | * We bend the rule where all 16 PTPs in a pmd_t point | 1182 | * We bend the rule where all 16 PTPs in a pmd_t point |
1183 | * inside the same PTE page, and we leak a perfectly | 1183 | * inside the same PTE page, and we leak a perfectly |
1184 | * good hardware PTE piece. Alternatives seem worse. | 1184 | * good hardware PTE piece. Alternatives seem worse. |
1185 | */ | 1185 | */ |
1186 | unsigned int x; /* Index of HW PMD in soft cluster */ | 1186 | unsigned int x; /* Index of HW PMD in soft cluster */ |
1187 | x = (start >> PMD_SHIFT) & 15; | 1187 | x = (start >> PMD_SHIFT) & 15; |
1188 | *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; | 1188 | *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; |
1189 | start += SRMMU_REAL_PMD_SIZE; | 1189 | start += SRMMU_REAL_PMD_SIZE; |
1190 | continue; | 1190 | continue; |
1191 | } | 1191 | } |
1192 | ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); | 1192 | ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); |
1193 | *(pte_t *)__nocache_fix(ptep) = __pte(prompte); | 1193 | *(pte_t *)__nocache_fix(ptep) = __pte(prompte); |
1194 | start += PAGE_SIZE; | 1194 | start += PAGE_SIZE; |
1195 | } | 1195 | } |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) | 1198 | #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) |
1199 | 1199 | ||
1200 | /* Create a third-level SRMMU 16MB page mapping. */ | 1200 | /* Create a third-level SRMMU 16MB page mapping. */ |
1201 | static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) | 1201 | static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) |
1202 | { | 1202 | { |
1203 | pgd_t *pgdp = pgd_offset_k(vaddr); | 1203 | pgd_t *pgdp = pgd_offset_k(vaddr); |
1204 | unsigned long big_pte; | 1204 | unsigned long big_pte; |
1205 | 1205 | ||
1206 | big_pte = KERNEL_PTE(phys_base >> 4); | 1206 | big_pte = KERNEL_PTE(phys_base >> 4); |
1207 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); | 1207 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ | 1210 | /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ |
1211 | static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) | 1211 | static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) |
1212 | { | 1212 | { |
1213 | unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); | 1213 | unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); |
1214 | unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); | 1214 | unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); |
1215 | unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); | 1215 | unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); |
1216 | /* Map "low" memory only */ | 1216 | /* Map "low" memory only */ |
1217 | const unsigned long min_vaddr = PAGE_OFFSET; | 1217 | const unsigned long min_vaddr = PAGE_OFFSET; |
1218 | const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; | 1218 | const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; |
1219 | 1219 | ||
1220 | if (vstart < min_vaddr || vstart >= max_vaddr) | 1220 | if (vstart < min_vaddr || vstart >= max_vaddr) |
1221 | return vstart; | 1221 | return vstart; |
1222 | 1222 | ||
1223 | if (vend > max_vaddr || vend < min_vaddr) | 1223 | if (vend > max_vaddr || vend < min_vaddr) |
1224 | vend = max_vaddr; | 1224 | vend = max_vaddr; |
1225 | 1225 | ||
1226 | while(vstart < vend) { | 1226 | while(vstart < vend) { |
1227 | do_large_mapping(vstart, pstart); | 1227 | do_large_mapping(vstart, pstart); |
1228 | vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; | 1228 | vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; |
1229 | } | 1229 | } |
1230 | return vstart; | 1230 | return vstart; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | static inline void memprobe_error(char *msg) | 1233 | static inline void memprobe_error(char *msg) |
1234 | { | 1234 | { |
1235 | prom_printf(msg); | 1235 | prom_printf(msg); |
1236 | prom_printf("Halting now...\n"); | 1236 | prom_printf("Halting now...\n"); |
1237 | prom_halt(); | 1237 | prom_halt(); |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static inline void map_kernel(void) | 1240 | static inline void map_kernel(void) |
1241 | { | 1241 | { |
1242 | int i; | 1242 | int i; |
1243 | 1243 | ||
1244 | if (phys_base > 0) { | 1244 | if (phys_base > 0) { |
1245 | do_large_mapping(PAGE_OFFSET, phys_base); | 1245 | do_large_mapping(PAGE_OFFSET, phys_base); |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 1248 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
1249 | map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); | 1249 | map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); |
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); | 1252 | BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | /* Paging initialization on the Sparc Reference MMU. */ | 1255 | /* Paging initialization on the Sparc Reference MMU. */ |
1256 | extern void sparc_context_init(int); | 1256 | extern void sparc_context_init(int); |
1257 | 1257 | ||
1258 | void (*poke_srmmu)(void) __cpuinitdata = NULL; | 1258 | void (*poke_srmmu)(void) __cpuinitdata = NULL; |
1259 | 1259 | ||
1260 | extern unsigned long bootmem_init(unsigned long *pages_avail); | 1260 | extern unsigned long bootmem_init(unsigned long *pages_avail); |
1261 | 1261 | ||
1262 | void __init srmmu_paging_init(void) | 1262 | void __init srmmu_paging_init(void) |
1263 | { | 1263 | { |
1264 | int i; | 1264 | int i; |
1265 | phandle cpunode; | 1265 | phandle cpunode; |
1266 | char node_str[128]; | 1266 | char node_str[128]; |
1267 | pgd_t *pgd; | 1267 | pgd_t *pgd; |
1268 | pmd_t *pmd; | 1268 | pmd_t *pmd; |
1269 | pte_t *pte; | 1269 | pte_t *pte; |
1270 | unsigned long pages_avail; | 1270 | unsigned long pages_avail; |
1271 | 1271 | ||
1272 | sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ | 1272 | sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ |
1273 | 1273 | ||
1274 | if (sparc_cpu_model == sun4d) | 1274 | if (sparc_cpu_model == sun4d) |
1275 | num_contexts = 65536; /* We know it is Viking */ | 1275 | num_contexts = 65536; /* We know it is Viking */ |
1276 | else { | 1276 | else { |
1277 | /* Find the number of contexts on the srmmu. */ | 1277 | /* Find the number of contexts on the srmmu. */ |
1278 | cpunode = prom_getchild(prom_root_node); | 1278 | cpunode = prom_getchild(prom_root_node); |
1279 | num_contexts = 0; | 1279 | num_contexts = 0; |
1280 | while(cpunode != 0) { | 1280 | while(cpunode != 0) { |
1281 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); | 1281 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); |
1282 | if(!strcmp(node_str, "cpu")) { | 1282 | if(!strcmp(node_str, "cpu")) { |
1283 | num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); | 1283 | num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); |
1284 | break; | 1284 | break; |
1285 | } | 1285 | } |
1286 | cpunode = prom_getsibling(cpunode); | 1286 | cpunode = prom_getsibling(cpunode); |
1287 | } | 1287 | } |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | if(!num_contexts) { | 1290 | if(!num_contexts) { |
1291 | prom_printf("Something wrong, can't find cpu node in paging_init.\n"); | 1291 | prom_printf("Something wrong, can't find cpu node in paging_init.\n"); |
1292 | prom_halt(); | 1292 | prom_halt(); |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | pages_avail = 0; | 1295 | pages_avail = 0; |
1296 | last_valid_pfn = bootmem_init(&pages_avail); | 1296 | last_valid_pfn = bootmem_init(&pages_avail); |
1297 | 1297 | ||
1298 | srmmu_nocache_calcsize(); | 1298 | srmmu_nocache_calcsize(); |
1299 | srmmu_nocache_init(); | 1299 | srmmu_nocache_init(); |
1300 | srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); | 1300 | srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); |
1301 | map_kernel(); | 1301 | map_kernel(); |
1302 | 1302 | ||
1303 | /* ctx table has to be physically aligned to its size */ | 1303 | /* ctx table has to be physically aligned to its size */ |
1304 | srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); | 1304 | srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); |
1305 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); | 1305 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); |
1306 | 1306 | ||
1307 | for(i = 0; i < num_contexts; i++) | 1307 | for(i = 0; i < num_contexts; i++) |
1308 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); | 1308 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); |
1309 | 1309 | ||
1310 | flush_cache_all(); | 1310 | flush_cache_all(); |
1311 | srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); | 1311 | srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); |
1312 | #ifdef CONFIG_SMP | 1312 | #ifdef CONFIG_SMP |
1313 | /* Stop from hanging here... */ | 1313 | /* Stop from hanging here... */ |
1314 | local_flush_tlb_all(); | 1314 | local_flush_tlb_all(); |
1315 | #else | 1315 | #else |
1316 | flush_tlb_all(); | 1316 | flush_tlb_all(); |
1317 | #endif | 1317 | #endif |
1318 | poke_srmmu(); | 1318 | poke_srmmu(); |
1319 | 1319 | ||
1320 | srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); | 1320 | srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); |
1321 | srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); | 1321 | srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); |
1322 | 1322 | ||
1323 | srmmu_allocate_ptable_skeleton( | 1323 | srmmu_allocate_ptable_skeleton( |
1324 | __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); | 1324 | __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); |
1325 | srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); | 1325 | srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); |
1326 | 1326 | ||
1327 | pgd = pgd_offset_k(PKMAP_BASE); | 1327 | pgd = pgd_offset_k(PKMAP_BASE); |
1328 | pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); | 1328 | pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); |
1329 | pte = srmmu_pte_offset(pmd, PKMAP_BASE); | 1329 | pte = srmmu_pte_offset(pmd, PKMAP_BASE); |
1330 | pkmap_page_table = pte; | 1330 | pkmap_page_table = pte; |
1331 | 1331 | ||
1332 | flush_cache_all(); | 1332 | flush_cache_all(); |
1333 | flush_tlb_all(); | 1333 | flush_tlb_all(); |
1334 | 1334 | ||
1335 | sparc_context_init(num_contexts); | 1335 | sparc_context_init(num_contexts); |
1336 | 1336 | ||
1337 | kmap_init(); | 1337 | kmap_init(); |
1338 | 1338 | ||
1339 | { | 1339 | { |
1340 | unsigned long zones_size[MAX_NR_ZONES]; | 1340 | unsigned long zones_size[MAX_NR_ZONES]; |
1341 | unsigned long zholes_size[MAX_NR_ZONES]; | 1341 | unsigned long zholes_size[MAX_NR_ZONES]; |
1342 | unsigned long npages; | 1342 | unsigned long npages; |
1343 | int znum; | 1343 | int znum; |
1344 | 1344 | ||
1345 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | 1345 | for (znum = 0; znum < MAX_NR_ZONES; znum++) |
1346 | zones_size[znum] = zholes_size[znum] = 0; | 1346 | zones_size[znum] = zholes_size[znum] = 0; |
1347 | 1347 | ||
1348 | npages = max_low_pfn - pfn_base; | 1348 | npages = max_low_pfn - pfn_base; |
1349 | 1349 | ||
1350 | zones_size[ZONE_DMA] = npages; | 1350 | zones_size[ZONE_DMA] = npages; |
1351 | zholes_size[ZONE_DMA] = npages - pages_avail; | 1351 | zholes_size[ZONE_DMA] = npages - pages_avail; |
1352 | 1352 | ||
1353 | npages = highend_pfn - max_low_pfn; | 1353 | npages = highend_pfn - max_low_pfn; |
1354 | zones_size[ZONE_HIGHMEM] = npages; | 1354 | zones_size[ZONE_HIGHMEM] = npages; |
1355 | zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); | 1355 | zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); |
1356 | 1356 | ||
1357 | free_area_init_node(0, zones_size, pfn_base, zholes_size); | 1357 | free_area_init_node(0, zones_size, pfn_base, zholes_size); |
1358 | } | 1358 | } |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | static void srmmu_mmu_info(struct seq_file *m) | 1361 | static void srmmu_mmu_info(struct seq_file *m) |
1362 | { | 1362 | { |
1363 | seq_printf(m, | 1363 | seq_printf(m, |
1364 | "MMU type\t: %s\n" | 1364 | "MMU type\t: %s\n" |
1365 | "contexts\t: %d\n" | 1365 | "contexts\t: %d\n" |
1366 | "nocache total\t: %ld\n" | 1366 | "nocache total\t: %ld\n" |
1367 | "nocache used\t: %d\n", | 1367 | "nocache used\t: %d\n", |
1368 | srmmu_name, | 1368 | srmmu_name, |
1369 | num_contexts, | 1369 | num_contexts, |
1370 | srmmu_nocache_size, | 1370 | srmmu_nocache_size, |
1371 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); | 1371 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) | 1374 | static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
1375 | { | 1375 | { |
1376 | } | 1376 | } |
1377 | 1377 | ||
1378 | static void srmmu_destroy_context(struct mm_struct *mm) | 1378 | static void srmmu_destroy_context(struct mm_struct *mm) |
1379 | { | 1379 | { |
1380 | 1380 | ||
1381 | if(mm->context != NO_CONTEXT) { | 1381 | if(mm->context != NO_CONTEXT) { |
1382 | flush_cache_mm(mm); | 1382 | flush_cache_mm(mm); |
1383 | srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); | 1383 | srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); |
1384 | flush_tlb_mm(mm); | 1384 | flush_tlb_mm(mm); |
1385 | spin_lock(&srmmu_context_spinlock); | 1385 | spin_lock(&srmmu_context_spinlock); |
1386 | free_context(mm->context); | 1386 | free_context(mm->context); |
1387 | spin_unlock(&srmmu_context_spinlock); | 1387 | spin_unlock(&srmmu_context_spinlock); |
1388 | mm->context = NO_CONTEXT; | 1388 | mm->context = NO_CONTEXT; |
1389 | } | 1389 | } |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | /* Init various srmmu chip types. */ | 1392 | /* Init various srmmu chip types. */ |
1393 | static void __init srmmu_is_bad(void) | 1393 | static void __init srmmu_is_bad(void) |
1394 | { | 1394 | { |
1395 | prom_printf("Could not determine SRMMU chip type.\n"); | 1395 | prom_printf("Could not determine SRMMU chip type.\n"); |
1396 | prom_halt(); | 1396 | prom_halt(); |
1397 | } | 1397 | } |
1398 | 1398 | ||
1399 | static void __init init_vac_layout(void) | 1399 | static void __init init_vac_layout(void) |
1400 | { | 1400 | { |
1401 | phandle nd; | 1401 | phandle nd; |
1402 | int cache_lines; | 1402 | int cache_lines; |
1403 | char node_str[128]; | 1403 | char node_str[128]; |
1404 | #ifdef CONFIG_SMP | 1404 | #ifdef CONFIG_SMP |
1405 | int cpu = 0; | 1405 | int cpu = 0; |
1406 | unsigned long max_size = 0; | 1406 | unsigned long max_size = 0; |
1407 | unsigned long min_line_size = 0x10000000; | 1407 | unsigned long min_line_size = 0x10000000; |
1408 | #endif | 1408 | #endif |
1409 | 1409 | ||
1410 | nd = prom_getchild(prom_root_node); | 1410 | nd = prom_getchild(prom_root_node); |
1411 | while((nd = prom_getsibling(nd)) != 0) { | 1411 | while((nd = prom_getsibling(nd)) != 0) { |
1412 | prom_getstring(nd, "device_type", node_str, sizeof(node_str)); | 1412 | prom_getstring(nd, "device_type", node_str, sizeof(node_str)); |
1413 | if(!strcmp(node_str, "cpu")) { | 1413 | if(!strcmp(node_str, "cpu")) { |
1414 | vac_line_size = prom_getint(nd, "cache-line-size"); | 1414 | vac_line_size = prom_getint(nd, "cache-line-size"); |
1415 | if (vac_line_size == -1) { | 1415 | if (vac_line_size == -1) { |
1416 | prom_printf("can't determine cache-line-size, " | 1416 | prom_printf("can't determine cache-line-size, " |
1417 | "halting.\n"); | 1417 | "halting.\n"); |
1418 | prom_halt(); | 1418 | prom_halt(); |
1419 | } | 1419 | } |
1420 | cache_lines = prom_getint(nd, "cache-nlines"); | 1420 | cache_lines = prom_getint(nd, "cache-nlines"); |
1421 | if (cache_lines == -1) { | 1421 | if (cache_lines == -1) { |
1422 | prom_printf("can't determine cache-nlines, halting.\n"); | 1422 | prom_printf("can't determine cache-nlines, halting.\n"); |
1423 | prom_halt(); | 1423 | prom_halt(); |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | vac_cache_size = cache_lines * vac_line_size; | 1426 | vac_cache_size = cache_lines * vac_line_size; |
1427 | #ifdef CONFIG_SMP | 1427 | #ifdef CONFIG_SMP |
1428 | if(vac_cache_size > max_size) | 1428 | if(vac_cache_size > max_size) |
1429 | max_size = vac_cache_size; | 1429 | max_size = vac_cache_size; |
1430 | if(vac_line_size < min_line_size) | 1430 | if(vac_line_size < min_line_size) |
1431 | min_line_size = vac_line_size; | 1431 | min_line_size = vac_line_size; |
1432 | //FIXME: cpus not contiguous!! | 1432 | //FIXME: cpus not contiguous!! |
1433 | cpu++; | 1433 | cpu++; |
1434 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) | 1434 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
1435 | break; | 1435 | break; |
1436 | #else | 1436 | #else |
1437 | break; | 1437 | break; |
1438 | #endif | 1438 | #endif |
1439 | } | 1439 | } |
1440 | } | 1440 | } |
1441 | if(nd == 0) { | 1441 | if(nd == 0) { |
1442 | prom_printf("No CPU nodes found, halting.\n"); | 1442 | prom_printf("No CPU nodes found, halting.\n"); |
1443 | prom_halt(); | 1443 | prom_halt(); |
1444 | } | 1444 | } |
1445 | #ifdef CONFIG_SMP | 1445 | #ifdef CONFIG_SMP |
1446 | vac_cache_size = max_size; | 1446 | vac_cache_size = max_size; |
1447 | vac_line_size = min_line_size; | 1447 | vac_line_size = min_line_size; |
1448 | #endif | 1448 | #endif |
1449 | printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", | 1449 | printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", |
1450 | (int)vac_cache_size, (int)vac_line_size); | 1450 | (int)vac_cache_size, (int)vac_line_size); |
1451 | } | 1451 | } |
1452 | 1452 | ||
1453 | static void __cpuinit poke_hypersparc(void) | 1453 | static void __cpuinit poke_hypersparc(void) |
1454 | { | 1454 | { |
1455 | volatile unsigned long clear; | 1455 | volatile unsigned long clear; |
1456 | unsigned long mreg = srmmu_get_mmureg(); | 1456 | unsigned long mreg = srmmu_get_mmureg(); |
1457 | 1457 | ||
1458 | hyper_flush_unconditional_combined(); | 1458 | hyper_flush_unconditional_combined(); |
1459 | 1459 | ||
1460 | mreg &= ~(HYPERSPARC_CWENABLE); | 1460 | mreg &= ~(HYPERSPARC_CWENABLE); |
1461 | mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); | 1461 | mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); |
1462 | mreg |= (HYPERSPARC_CMODE); | 1462 | mreg |= (HYPERSPARC_CMODE); |
1463 | 1463 | ||
1464 | srmmu_set_mmureg(mreg); | 1464 | srmmu_set_mmureg(mreg); |
1465 | 1465 | ||
1466 | #if 0 /* XXX I think this is bad news... -DaveM */ | 1466 | #if 0 /* XXX I think this is bad news... -DaveM */ |
1467 | hyper_clear_all_tags(); | 1467 | hyper_clear_all_tags(); |
1468 | #endif | 1468 | #endif |
1469 | 1469 | ||
1470 | put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); | 1470 | put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); |
1471 | hyper_flush_whole_icache(); | 1471 | hyper_flush_whole_icache(); |
1472 | clear = srmmu_get_faddr(); | 1472 | clear = srmmu_get_faddr(); |
1473 | clear = srmmu_get_fstatus(); | 1473 | clear = srmmu_get_fstatus(); |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | static void __init init_hypersparc(void) | 1476 | static void __init init_hypersparc(void) |
1477 | { | 1477 | { |
1478 | srmmu_name = "ROSS HyperSparc"; | 1478 | srmmu_name = "ROSS HyperSparc"; |
1479 | srmmu_modtype = HyperSparc; | 1479 | srmmu_modtype = HyperSparc; |
1480 | 1480 | ||
1481 | init_vac_layout(); | 1481 | init_vac_layout(); |
1482 | 1482 | ||
1483 | is_hypersparc = 1; | 1483 | is_hypersparc = 1; |
1484 | 1484 | ||
1485 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); | 1485 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); |
1486 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); | 1486 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); |
1487 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); | 1487 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); |
1488 | BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); | 1488 | BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); |
1489 | BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); | 1489 | BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); |
1490 | BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); | 1490 | BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); |
1491 | BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); | 1491 | BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); |
1492 | 1492 | ||
1493 | BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); | 1493 | BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); |
1494 | BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); | 1494 | BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); |
1495 | BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); | 1495 | BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); |
1496 | BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); | 1496 | BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); |
1497 | 1497 | ||
1498 | BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); | 1498 | BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); |
1499 | BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); | 1499 | BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); |
1500 | BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); | 1500 | BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); |
1501 | 1501 | ||
1502 | 1502 | ||
1503 | poke_srmmu = poke_hypersparc; | 1503 | poke_srmmu = poke_hypersparc; |
1504 | 1504 | ||
1505 | hypersparc_setup_blockops(); | 1505 | hypersparc_setup_blockops(); |
1506 | } | 1506 | } |
1507 | 1507 | ||
1508 | static void __cpuinit poke_cypress(void) | 1508 | static void __cpuinit poke_cypress(void) |
1509 | { | 1509 | { |
1510 | unsigned long mreg = srmmu_get_mmureg(); | 1510 | unsigned long mreg = srmmu_get_mmureg(); |
1511 | unsigned long faddr, tagval; | 1511 | unsigned long faddr, tagval; |
1512 | volatile unsigned long cypress_sucks; | 1512 | volatile unsigned long cypress_sucks; |
1513 | volatile unsigned long clear; | 1513 | volatile unsigned long clear; |
1514 | 1514 | ||
1515 | clear = srmmu_get_faddr(); | 1515 | clear = srmmu_get_faddr(); |
1516 | clear = srmmu_get_fstatus(); | 1516 | clear = srmmu_get_fstatus(); |
1517 | 1517 | ||
1518 | if (!(mreg & CYPRESS_CENABLE)) { | 1518 | if (!(mreg & CYPRESS_CENABLE)) { |
1519 | for(faddr = 0x0; faddr < 0x10000; faddr += 20) { | 1519 | for(faddr = 0x0; faddr < 0x10000; faddr += 20) { |
1520 | __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" | 1520 | __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" |
1521 | "sta %%g0, [%0] %2\n\t" : : | 1521 | "sta %%g0, [%0] %2\n\t" : : |
1522 | "r" (faddr), "r" (0x40000), | 1522 | "r" (faddr), "r" (0x40000), |
1523 | "i" (ASI_M_DATAC_TAG)); | 1523 | "i" (ASI_M_DATAC_TAG)); |
1524 | } | 1524 | } |
1525 | } else { | 1525 | } else { |
1526 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { | 1526 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { |
1527 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : | 1527 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : |
1528 | "=r" (tagval) : | 1528 | "=r" (tagval) : |
1529 | "r" (faddr), "r" (0x40000), | 1529 | "r" (faddr), "r" (0x40000), |
1530 | "i" (ASI_M_DATAC_TAG)); | 1530 | "i" (ASI_M_DATAC_TAG)); |
1531 | 1531 | ||
1532 | /* If modified and valid, kick it. */ | 1532 | /* If modified and valid, kick it. */ |
1533 | if((tagval & 0x60) == 0x60) | 1533 | if((tagval & 0x60) == 0x60) |
1534 | cypress_sucks = *(unsigned long *) | 1534 | cypress_sucks = *(unsigned long *) |
1535 | (0xf0020000 + faddr); | 1535 | (0xf0020000 + faddr); |
1536 | } | 1536 | } |
1537 | } | 1537 | } |
1538 | 1538 | ||
1539 | /* And one more, for our good neighbor, Mr. Broken Cypress. */ | 1539 | /* And one more, for our good neighbor, Mr. Broken Cypress. */ |
1540 | clear = srmmu_get_faddr(); | 1540 | clear = srmmu_get_faddr(); |
1541 | clear = srmmu_get_fstatus(); | 1541 | clear = srmmu_get_fstatus(); |
1542 | 1542 | ||
1543 | mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); | 1543 | mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); |
1544 | srmmu_set_mmureg(mreg); | 1544 | srmmu_set_mmureg(mreg); |
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | static void __init init_cypress_common(void) | 1547 | static void __init init_cypress_common(void) |
1548 | { | 1548 | { |
1549 | init_vac_layout(); | 1549 | init_vac_layout(); |
1550 | 1550 | ||
1551 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); | 1551 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); |
1552 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); | 1552 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); |
1553 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); | 1553 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); |
1554 | BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); | 1554 | BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); |
1555 | BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); | 1555 | BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); |
1556 | BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); | 1556 | BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); |
1557 | BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); | 1557 | BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); |
1558 | 1558 | ||
1559 | BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); | 1559 | BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); |
1560 | BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); | 1560 | BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); |
1561 | BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); | 1561 | BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); |
1562 | BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); | 1562 | BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); |
1563 | 1563 | ||
1564 | 1564 | ||
1565 | BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); | 1565 | BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); |
1566 | BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); | 1566 | BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); |
1567 | BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); | 1567 | BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); |
1568 | 1568 | ||
1569 | poke_srmmu = poke_cypress; | 1569 | poke_srmmu = poke_cypress; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | static void __init init_cypress_604(void) | 1572 | static void __init init_cypress_604(void) |
1573 | { | 1573 | { |
1574 | srmmu_name = "ROSS Cypress-604(UP)"; | 1574 | srmmu_name = "ROSS Cypress-604(UP)"; |
1575 | srmmu_modtype = Cypress; | 1575 | srmmu_modtype = Cypress; |
1576 | init_cypress_common(); | 1576 | init_cypress_common(); |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | static void __init init_cypress_605(unsigned long mrev) | 1579 | static void __init init_cypress_605(unsigned long mrev) |
1580 | { | 1580 | { |
1581 | srmmu_name = "ROSS Cypress-605(MP)"; | 1581 | srmmu_name = "ROSS Cypress-605(MP)"; |
1582 | if(mrev == 0xe) { | 1582 | if(mrev == 0xe) { |
1583 | srmmu_modtype = Cypress_vE; | 1583 | srmmu_modtype = Cypress_vE; |
1584 | hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; | 1584 | hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; |
1585 | } else { | 1585 | } else { |
1586 | if(mrev == 0xd) { | 1586 | if(mrev == 0xd) { |
1587 | srmmu_modtype = Cypress_vD; | 1587 | srmmu_modtype = Cypress_vD; |
1588 | hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; | 1588 | hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; |
1589 | } else { | 1589 | } else { |
1590 | srmmu_modtype = Cypress; | 1590 | srmmu_modtype = Cypress; |
1591 | } | 1591 | } |
1592 | } | 1592 | } |
1593 | init_cypress_common(); | 1593 | init_cypress_common(); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static void __cpuinit poke_swift(void) | 1596 | static void __cpuinit poke_swift(void) |
1597 | { | 1597 | { |
1598 | unsigned long mreg; | 1598 | unsigned long mreg; |
1599 | 1599 | ||
1600 | /* Clear any crap from the cache or else... */ | 1600 | /* Clear any crap from the cache or else... */ |
1601 | swift_flush_cache_all(); | 1601 | swift_flush_cache_all(); |
1602 | 1602 | ||
1603 | /* Enable I & D caches */ | 1603 | /* Enable I & D caches */ |
1604 | mreg = srmmu_get_mmureg(); | 1604 | mreg = srmmu_get_mmureg(); |
1605 | mreg |= (SWIFT_IE | SWIFT_DE); | 1605 | mreg |= (SWIFT_IE | SWIFT_DE); |
1606 | /* | 1606 | /* |
1607 | * The Swift branch folding logic is completely broken. At | 1607 | * The Swift branch folding logic is completely broken. At |
1608 | * trap time, if things are just right, if can mistakenly | 1608 | * trap time, if things are just right, if can mistakenly |
1609 | * think that a trap is coming from kernel mode when in fact | 1609 | * think that a trap is coming from kernel mode when in fact |
1610 | * it is coming from user mode (it mis-executes the branch in | 1610 | * it is coming from user mode (it mis-executes the branch in |
1611 | * the trap code). So you see things like crashme completely | 1611 | * the trap code). So you see things like crashme completely |
1612 | * hosing your machine which is completely unacceptable. Turn | 1612 | * hosing your machine which is completely unacceptable. Turn |
1613 | * this shit off... nice job Fujitsu. | 1613 | * this shit off... nice job Fujitsu. |
1614 | */ | 1614 | */ |
1615 | mreg &= ~(SWIFT_BF); | 1615 | mreg &= ~(SWIFT_BF); |
1616 | srmmu_set_mmureg(mreg); | 1616 | srmmu_set_mmureg(mreg); |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | #define SWIFT_MASKID_ADDR 0x10003018 | 1619 | #define SWIFT_MASKID_ADDR 0x10003018 |
1620 | static void __init init_swift(void) | 1620 | static void __init init_swift(void) |
1621 | { | 1621 | { |
1622 | unsigned long swift_rev; | 1622 | unsigned long swift_rev; |
1623 | 1623 | ||
1624 | __asm__ __volatile__("lda [%1] %2, %0\n\t" | 1624 | __asm__ __volatile__("lda [%1] %2, %0\n\t" |
1625 | "srl %0, 0x18, %0\n\t" : | 1625 | "srl %0, 0x18, %0\n\t" : |
1626 | "=r" (swift_rev) : | 1626 | "=r" (swift_rev) : |
1627 | "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); | 1627 | "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); |
1628 | srmmu_name = "Fujitsu Swift"; | 1628 | srmmu_name = "Fujitsu Swift"; |
1629 | switch(swift_rev) { | 1629 | switch(swift_rev) { |
1630 | case 0x11: | 1630 | case 0x11: |
1631 | case 0x20: | 1631 | case 0x20: |
1632 | case 0x23: | 1632 | case 0x23: |
1633 | case 0x30: | 1633 | case 0x30: |
1634 | srmmu_modtype = Swift_lots_o_bugs; | 1634 | srmmu_modtype = Swift_lots_o_bugs; |
1635 | hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); | 1635 | hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); |
1636 | /* | 1636 | /* |
1637 | * Gee george, I wonder why Sun is so hush hush about | 1637 | * Gee george, I wonder why Sun is so hush hush about |
1638 | * this hardware bug... really braindamage stuff going | 1638 | * this hardware bug... really braindamage stuff going |
1639 | * on here. However I think we can find a way to avoid | 1639 | * on here. However I think we can find a way to avoid |
1640 | * all of the workaround overhead under Linux. Basically, | 1640 | * all of the workaround overhead under Linux. Basically, |
1641 | * any page fault can cause kernel pages to become user | 1641 | * any page fault can cause kernel pages to become user |
1642 | * accessible (the mmu gets confused and clears some of | 1642 | * accessible (the mmu gets confused and clears some of |
1643 | * the ACC bits in kernel ptes). Aha, sounds pretty | 1643 | * the ACC bits in kernel ptes). Aha, sounds pretty |
1644 | * horrible eh? But wait, after extensive testing it appears | 1644 | * horrible eh? But wait, after extensive testing it appears |
1645 | * that if you use pgd_t level large kernel pte's (like the | 1645 | * that if you use pgd_t level large kernel pte's (like the |
1646 | * 4MB pages on the Pentium) the bug does not get tripped | 1646 | * 4MB pages on the Pentium) the bug does not get tripped |
1647 | * at all. This avoids almost all of the major overhead. | 1647 | * at all. This avoids almost all of the major overhead. |
1648 | * Welcome to a world where your vendor tells you to, | 1648 | * Welcome to a world where your vendor tells you to, |
1649 | * "apply this kernel patch" instead of "sorry for the | 1649 | * "apply this kernel patch" instead of "sorry for the |
1650 | * broken hardware, send it back and we'll give you | 1650 | * broken hardware, send it back and we'll give you |
1651 | * properly functioning parts" | 1651 | * properly functioning parts" |
1652 | */ | 1652 | */ |
1653 | break; | 1653 | break; |
1654 | case 0x25: | 1654 | case 0x25: |
1655 | case 0x31: | 1655 | case 0x31: |
1656 | srmmu_modtype = Swift_bad_c; | 1656 | srmmu_modtype = Swift_bad_c; |
1657 | hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; | 1657 | hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; |
1658 | /* | 1658 | /* |
1659 | * You see Sun allude to this hardware bug but never | 1659 | * You see Sun allude to this hardware bug but never |
1660 | * admit things directly, they'll say things like, | 1660 | * admit things directly, they'll say things like, |
1661 | * "the Swift chip cache problems" or similar. | 1661 | * "the Swift chip cache problems" or similar. |
1662 | */ | 1662 | */ |
1663 | break; | 1663 | break; |
1664 | default: | 1664 | default: |
1665 | srmmu_modtype = Swift_ok; | 1665 | srmmu_modtype = Swift_ok; |
1666 | break; | 1666 | break; |
1667 | } | 1667 | } |
1668 | 1668 | ||
1669 | BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); | 1669 | BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); |
1670 | BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); | 1670 | BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); |
1671 | BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); | 1671 | BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); |
1672 | BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); | 1672 | BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); |
1673 | 1673 | ||
1674 | 1674 | ||
1675 | BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); | 1675 | BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); |
1676 | BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); | 1676 | BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); |
1677 | BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); | 1677 | BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); |
1678 | BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); | 1678 | BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); |
1679 | 1679 | ||
1680 | BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); | 1680 | BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); |
1681 | BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); | 1681 | BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); |
1682 | BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); | 1682 | BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); |
1683 | 1683 | ||
1684 | BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); | 1684 | BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); |
1685 | 1685 | ||
1686 | flush_page_for_dma_global = 0; | 1686 | flush_page_for_dma_global = 0; |
1687 | 1687 | ||
1688 | /* | 1688 | /* |
1689 | * Are you now convinced that the Swift is one of the | 1689 | * Are you now convinced that the Swift is one of the |
1690 | * biggest VLSI abortions of all time? Bravo Fujitsu! | 1690 | * biggest VLSI abortions of all time? Bravo Fujitsu! |
1691 | * Fujitsu, the !#?!%$'d up processor people. I bet if | 1691 | * Fujitsu, the !#?!%$'d up processor people. I bet if |
1692 | * you examined the microcode of the Swift you'd find | 1692 | * you examined the microcode of the Swift you'd find |
1693 | * XXX's all over the place. | 1693 | * XXX's all over the place. |
1694 | */ | 1694 | */ |
1695 | poke_srmmu = poke_swift; | 1695 | poke_srmmu = poke_swift; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | static void turbosparc_flush_cache_all(void) | 1698 | static void turbosparc_flush_cache_all(void) |
1699 | { | 1699 | { |
1700 | flush_user_windows(); | 1700 | flush_user_windows(); |
1701 | turbosparc_idflash_clear(); | 1701 | turbosparc_idflash_clear(); |
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | static void turbosparc_flush_cache_mm(struct mm_struct *mm) | 1704 | static void turbosparc_flush_cache_mm(struct mm_struct *mm) |
1705 | { | 1705 | { |
1706 | FLUSH_BEGIN(mm) | 1706 | FLUSH_BEGIN(mm) |
1707 | flush_user_windows(); | 1707 | flush_user_windows(); |
1708 | turbosparc_idflash_clear(); | 1708 | turbosparc_idflash_clear(); |
1709 | FLUSH_END | 1709 | FLUSH_END |
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 1712 | static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1713 | { | 1713 | { |
1714 | FLUSH_BEGIN(vma->vm_mm) | 1714 | FLUSH_BEGIN(vma->vm_mm) |
1715 | flush_user_windows(); | 1715 | flush_user_windows(); |
1716 | turbosparc_idflash_clear(); | 1716 | turbosparc_idflash_clear(); |
1717 | FLUSH_END | 1717 | FLUSH_END |
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | 1720 | static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
1721 | { | 1721 | { |
1722 | FLUSH_BEGIN(vma->vm_mm) | 1722 | FLUSH_BEGIN(vma->vm_mm) |
1723 | flush_user_windows(); | 1723 | flush_user_windows(); |
1724 | if (vma->vm_flags & VM_EXEC) | 1724 | if (vma->vm_flags & VM_EXEC) |
1725 | turbosparc_flush_icache(); | 1725 | turbosparc_flush_icache(); |
1726 | turbosparc_flush_dcache(); | 1726 | turbosparc_flush_dcache(); |
1727 | FLUSH_END | 1727 | FLUSH_END |
1728 | } | 1728 | } |
1729 | 1729 | ||
1730 | /* TurboSparc is copy-back, if we turn it on, but this does not work. */ | 1730 | /* TurboSparc is copy-back, if we turn it on, but this does not work. */ |
1731 | static void turbosparc_flush_page_to_ram(unsigned long page) | 1731 | static void turbosparc_flush_page_to_ram(unsigned long page) |
1732 | { | 1732 | { |
1733 | #ifdef TURBOSPARC_WRITEBACK | 1733 | #ifdef TURBOSPARC_WRITEBACK |
1734 | volatile unsigned long clear; | 1734 | volatile unsigned long clear; |
1735 | 1735 | ||
1736 | if (srmmu_hwprobe(page)) | 1736 | if (srmmu_hwprobe(page)) |
1737 | turbosparc_flush_page_cache(page); | 1737 | turbosparc_flush_page_cache(page); |
1738 | clear = srmmu_get_fstatus(); | 1738 | clear = srmmu_get_fstatus(); |
1739 | #endif | 1739 | #endif |
1740 | } | 1740 | } |
1741 | 1741 | ||
1742 | static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | 1742 | static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) |
1743 | { | 1743 | { |
1744 | } | 1744 | } |
1745 | 1745 | ||
1746 | static void turbosparc_flush_page_for_dma(unsigned long page) | 1746 | static void turbosparc_flush_page_for_dma(unsigned long page) |
1747 | { | 1747 | { |
1748 | turbosparc_flush_dcache(); | 1748 | turbosparc_flush_dcache(); |
1749 | } | 1749 | } |
1750 | 1750 | ||
1751 | static void turbosparc_flush_tlb_all(void) | 1751 | static void turbosparc_flush_tlb_all(void) |
1752 | { | 1752 | { |
1753 | srmmu_flush_whole_tlb(); | 1753 | srmmu_flush_whole_tlb(); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | static void turbosparc_flush_tlb_mm(struct mm_struct *mm) | 1756 | static void turbosparc_flush_tlb_mm(struct mm_struct *mm) |
1757 | { | 1757 | { |
1758 | FLUSH_BEGIN(mm) | 1758 | FLUSH_BEGIN(mm) |
1759 | srmmu_flush_whole_tlb(); | 1759 | srmmu_flush_whole_tlb(); |
1760 | FLUSH_END | 1760 | FLUSH_END |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 1763 | static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1764 | { | 1764 | { |
1765 | FLUSH_BEGIN(vma->vm_mm) | 1765 | FLUSH_BEGIN(vma->vm_mm) |
1766 | srmmu_flush_whole_tlb(); | 1766 | srmmu_flush_whole_tlb(); |
1767 | FLUSH_END | 1767 | FLUSH_END |
1768 | } | 1768 | } |
1769 | 1769 | ||
1770 | static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 1770 | static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
1771 | { | 1771 | { |
1772 | FLUSH_BEGIN(vma->vm_mm) | 1772 | FLUSH_BEGIN(vma->vm_mm) |
1773 | srmmu_flush_whole_tlb(); | 1773 | srmmu_flush_whole_tlb(); |
1774 | FLUSH_END | 1774 | FLUSH_END |
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | 1777 | ||
1778 | static void __cpuinit poke_turbosparc(void) | 1778 | static void __cpuinit poke_turbosparc(void) |
1779 | { | 1779 | { |
1780 | unsigned long mreg = srmmu_get_mmureg(); | 1780 | unsigned long mreg = srmmu_get_mmureg(); |
1781 | unsigned long ccreg; | 1781 | unsigned long ccreg; |
1782 | 1782 | ||
1783 | /* Clear any crap from the cache or else... */ | 1783 | /* Clear any crap from the cache or else... */ |
1784 | turbosparc_flush_cache_all(); | 1784 | turbosparc_flush_cache_all(); |
1785 | mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ | 1785 | mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ |
1786 | mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ | 1786 | mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ |
1787 | srmmu_set_mmureg(mreg); | 1787 | srmmu_set_mmureg(mreg); |
1788 | 1788 | ||
1789 | ccreg = turbosparc_get_ccreg(); | 1789 | ccreg = turbosparc_get_ccreg(); |
1790 | 1790 | ||
1791 | #ifdef TURBOSPARC_WRITEBACK | 1791 | #ifdef TURBOSPARC_WRITEBACK |
1792 | ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ | 1792 | ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ |
1793 | ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); | 1793 | ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); |
1794 | /* Write-back D-cache, emulate VLSI | 1794 | /* Write-back D-cache, emulate VLSI |
1795 | * abortion number three, not number one */ | 1795 | * abortion number three, not number one */ |
1796 | #else | 1796 | #else |
1797 | /* For now let's play safe, optimize later */ | 1797 | /* For now let's play safe, optimize later */ |
1798 | ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); | 1798 | ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); |
1799 | /* Do DVMA snooping in Dcache, Write-thru D-cache */ | 1799 | /* Do DVMA snooping in Dcache, Write-thru D-cache */ |
1800 | ccreg &= ~(TURBOSPARC_uS2); | 1800 | ccreg &= ~(TURBOSPARC_uS2); |
1801 | /* Emulate VLSI abortion number three, not number one */ | 1801 | /* Emulate VLSI abortion number three, not number one */ |
1802 | #endif | 1802 | #endif |
1803 | 1803 | ||
1804 | switch (ccreg & 7) { | 1804 | switch (ccreg & 7) { |
1805 | case 0: /* No SE cache */ | 1805 | case 0: /* No SE cache */ |
1806 | case 7: /* Test mode */ | 1806 | case 7: /* Test mode */ |
1807 | break; | 1807 | break; |
1808 | default: | 1808 | default: |
1809 | ccreg |= (TURBOSPARC_SCENABLE); | 1809 | ccreg |= (TURBOSPARC_SCENABLE); |
1810 | } | 1810 | } |
1811 | turbosparc_set_ccreg (ccreg); | 1811 | turbosparc_set_ccreg (ccreg); |
1812 | 1812 | ||
1813 | mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ | 1813 | mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ |
1814 | mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ | 1814 | mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ |
1815 | srmmu_set_mmureg(mreg); | 1815 | srmmu_set_mmureg(mreg); |
1816 | } | 1816 | } |
1817 | 1817 | ||
1818 | static void __init init_turbosparc(void) | 1818 | static void __init init_turbosparc(void) |
1819 | { | 1819 | { |
1820 | srmmu_name = "Fujitsu TurboSparc"; | 1820 | srmmu_name = "Fujitsu TurboSparc"; |
1821 | srmmu_modtype = TurboSparc; | 1821 | srmmu_modtype = TurboSparc; |
1822 | 1822 | ||
1823 | BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); | 1823 | BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); |
1824 | BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); | 1824 | BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); |
1825 | BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); | 1825 | BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); |
1826 | BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); | 1826 | BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); |
1827 | 1827 | ||
1828 | BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); | 1828 | BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); |
1829 | BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); | 1829 | BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); |
1830 | BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); | 1830 | BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); |
1831 | BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); | 1831 | BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); |
1832 | 1832 | ||
1833 | BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); | 1833 | BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); |
1834 | 1834 | ||
1835 | BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); | 1835 | BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); |
1836 | BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); | 1836 | BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); |
1837 | 1837 | ||
1838 | poke_srmmu = poke_turbosparc; | 1838 | poke_srmmu = poke_turbosparc; |
1839 | } | 1839 | } |
1840 | 1840 | ||
1841 | static void __cpuinit poke_tsunami(void) | 1841 | static void __cpuinit poke_tsunami(void) |
1842 | { | 1842 | { |
1843 | unsigned long mreg = srmmu_get_mmureg(); | 1843 | unsigned long mreg = srmmu_get_mmureg(); |
1844 | 1844 | ||
1845 | tsunami_flush_icache(); | 1845 | tsunami_flush_icache(); |
1846 | tsunami_flush_dcache(); | 1846 | tsunami_flush_dcache(); |
1847 | mreg &= ~TSUNAMI_ITD; | 1847 | mreg &= ~TSUNAMI_ITD; |
1848 | mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); | 1848 | mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); |
1849 | srmmu_set_mmureg(mreg); | 1849 | srmmu_set_mmureg(mreg); |
1850 | } | 1850 | } |
1851 | 1851 | ||
1852 | static void __init init_tsunami(void) | 1852 | static void __init init_tsunami(void) |
1853 | { | 1853 | { |
1854 | /* | 1854 | /* |
1855 | * Tsunami's pretty sane, Sun and TI actually got it | 1855 | * Tsunami's pretty sane, Sun and TI actually got it |
1856 | * somewhat right this time. Fujitsu should have | 1856 | * somewhat right this time. Fujitsu should have |
1857 | * taken some lessons from them. | 1857 | * taken some lessons from them. |
1858 | */ | 1858 | */ |
1859 | 1859 | ||
1860 | srmmu_name = "TI Tsunami"; | 1860 | srmmu_name = "TI Tsunami"; |
1861 | srmmu_modtype = Tsunami; | 1861 | srmmu_modtype = Tsunami; |
1862 | 1862 | ||
1863 | BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); | 1863 | BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); |
1864 | BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); | 1864 | BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); |
1865 | BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); | 1865 | BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); |
1866 | BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); | 1866 | BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); |
1867 | 1867 | ||
1868 | 1868 | ||
1869 | BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); | 1869 | BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); |
1870 | BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); | 1870 | BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); |
1871 | BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); | 1871 | BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); |
1872 | BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); | 1872 | BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); |
1873 | 1873 | ||
1874 | BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); | 1874 | BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); |
1875 | BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); | 1875 | BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); |
1876 | BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); | 1876 | BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); |
1877 | 1877 | ||
1878 | poke_srmmu = poke_tsunami; | 1878 | poke_srmmu = poke_tsunami; |
1879 | 1879 | ||
1880 | tsunami_setup_blockops(); | 1880 | tsunami_setup_blockops(); |
1881 | } | 1881 | } |
1882 | 1882 | ||
1883 | static void __cpuinit poke_viking(void) | 1883 | static void __cpuinit poke_viking(void) |
1884 | { | 1884 | { |
1885 | unsigned long mreg = srmmu_get_mmureg(); | 1885 | unsigned long mreg = srmmu_get_mmureg(); |
1886 | static int smp_catch; | 1886 | static int smp_catch; |
1887 | 1887 | ||
1888 | if(viking_mxcc_present) { | 1888 | if(viking_mxcc_present) { |
1889 | unsigned long mxcc_control = mxcc_get_creg(); | 1889 | unsigned long mxcc_control = mxcc_get_creg(); |
1890 | 1890 | ||
1891 | mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); | 1891 | mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); |
1892 | mxcc_control &= ~(MXCC_CTL_RRC); | 1892 | mxcc_control &= ~(MXCC_CTL_RRC); |
1893 | mxcc_set_creg(mxcc_control); | 1893 | mxcc_set_creg(mxcc_control); |
1894 | 1894 | ||
1895 | /* | 1895 | /* |
1896 | * We don't need memory parity checks. | 1896 | * We don't need memory parity checks. |
1897 | * XXX This is a mess, have to dig out later. ecd. | 1897 | * XXX This is a mess, have to dig out later. ecd. |
1898 | viking_mxcc_turn_off_parity(&mreg, &mxcc_control); | 1898 | viking_mxcc_turn_off_parity(&mreg, &mxcc_control); |
1899 | */ | 1899 | */ |
1900 | 1900 | ||
1901 | /* We do cache ptables on MXCC. */ | 1901 | /* We do cache ptables on MXCC. */ |
1902 | mreg |= VIKING_TCENABLE; | 1902 | mreg |= VIKING_TCENABLE; |
1903 | } else { | 1903 | } else { |
1904 | unsigned long bpreg; | 1904 | unsigned long bpreg; |
1905 | 1905 | ||
1906 | mreg &= ~(VIKING_TCENABLE); | 1906 | mreg &= ~(VIKING_TCENABLE); |
1907 | if(smp_catch++) { | 1907 | if(smp_catch++) { |
1908 | /* Must disable mixed-cmd mode here for other cpu's. */ | 1908 | /* Must disable mixed-cmd mode here for other cpu's. */ |
1909 | bpreg = viking_get_bpreg(); | 1909 | bpreg = viking_get_bpreg(); |
1910 | bpreg &= ~(VIKING_ACTION_MIX); | 1910 | bpreg &= ~(VIKING_ACTION_MIX); |
1911 | viking_set_bpreg(bpreg); | 1911 | viking_set_bpreg(bpreg); |
1912 | 1912 | ||
1913 | /* Just in case PROM does something funny. */ | 1913 | /* Just in case PROM does something funny. */ |
1914 | msi_set_sync(); | 1914 | msi_set_sync(); |
1915 | } | 1915 | } |
1916 | } | 1916 | } |
1917 | 1917 | ||
1918 | mreg |= VIKING_SPENABLE; | 1918 | mreg |= VIKING_SPENABLE; |
1919 | mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); | 1919 | mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); |
1920 | mreg |= VIKING_SBENABLE; | 1920 | mreg |= VIKING_SBENABLE; |
1921 | mreg &= ~(VIKING_ACENABLE); | 1921 | mreg &= ~(VIKING_ACENABLE); |
1922 | srmmu_set_mmureg(mreg); | 1922 | srmmu_set_mmureg(mreg); |
1923 | } | 1923 | } |
1924 | 1924 | ||
1925 | static void __init init_viking(void) | 1925 | static void __init init_viking(void) |
1926 | { | 1926 | { |
1927 | unsigned long mreg = srmmu_get_mmureg(); | 1927 | unsigned long mreg = srmmu_get_mmureg(); |
1928 | 1928 | ||
1929 | /* Ahhh, the viking. SRMMU VLSI abortion number two... */ | 1929 | /* Ahhh, the viking. SRMMU VLSI abortion number two... */ |
1930 | if(mreg & VIKING_MMODE) { | 1930 | if(mreg & VIKING_MMODE) { |
1931 | srmmu_name = "TI Viking"; | 1931 | srmmu_name = "TI Viking"; |
1932 | viking_mxcc_present = 0; | 1932 | viking_mxcc_present = 0; |
1933 | msi_set_sync(); | 1933 | msi_set_sync(); |
1934 | 1934 | ||
1935 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); | 1935 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); |
1936 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); | 1936 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); |
1937 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); | 1937 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); |
1938 | 1938 | ||
1939 | /* | 1939 | /* |
1940 | * We need this to make sure old viking takes no hits | 1940 | * We need this to make sure old viking takes no hits |
1941 | * on it's cache for dma snoops to workaround the | 1941 | * on it's cache for dma snoops to workaround the |
1942 | * "load from non-cacheable memory" interrupt bug. | 1942 | * "load from non-cacheable memory" interrupt bug. |
1943 | * This is only necessary because of the new way in | 1943 | * This is only necessary because of the new way in |
1944 | * which we use the IOMMU. | 1944 | * which we use the IOMMU. |
1945 | */ | 1945 | */ |
1946 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); | 1946 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); |
1947 | 1947 | ||
1948 | flush_page_for_dma_global = 0; | 1948 | flush_page_for_dma_global = 0; |
1949 | } else { | 1949 | } else { |
1950 | srmmu_name = "TI Viking/MXCC"; | 1950 | srmmu_name = "TI Viking/MXCC"; |
1951 | viking_mxcc_present = 1; | 1951 | viking_mxcc_present = 1; |
1952 | 1952 | ||
1953 | srmmu_cache_pagetables = 1; | 1953 | srmmu_cache_pagetables = 1; |
1954 | 1954 | ||
1955 | /* MXCC vikings lack the DMA snooping bug. */ | 1955 | /* MXCC vikings lack the DMA snooping bug. */ |
1956 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); | 1956 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); |
1957 | } | 1957 | } |
1958 | 1958 | ||
1959 | BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); | 1959 | BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); |
1960 | BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); | 1960 | BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); |
1961 | BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); | 1961 | BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); |
1962 | BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); | 1962 | BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); |
1963 | 1963 | ||
1964 | #ifdef CONFIG_SMP | 1964 | #ifdef CONFIG_SMP |
1965 | if (sparc_cpu_model == sun4d) { | 1965 | if (sparc_cpu_model == sun4d) { |
1966 | BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); | 1966 | BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); |
1967 | BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); | 1967 | BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); |
1968 | BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); | 1968 | BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); |
1969 | BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); | 1969 | BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); |
1970 | } else | 1970 | } else |
1971 | #endif | 1971 | #endif |
1972 | { | 1972 | { |
1973 | BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); | 1973 | BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); |
1974 | BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); | 1974 | BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); |
1975 | BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); | 1975 | BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); |
1976 | BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); | 1976 | BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); |
1977 | } | 1977 | } |
1978 | 1978 | ||
1979 | BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); | 1979 | BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); |
1980 | BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); | 1980 | BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); |
1981 | 1981 | ||
1982 | poke_srmmu = poke_viking; | 1982 | poke_srmmu = poke_viking; |
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | #ifdef CONFIG_SPARC_LEON | 1985 | #ifdef CONFIG_SPARC_LEON |
1986 | 1986 | ||
1987 | void __init poke_leonsparc(void) | 1987 | void __init poke_leonsparc(void) |
1988 | { | 1988 | { |
1989 | } | 1989 | } |
1990 | 1990 | ||
1991 | void __init init_leon(void) | 1991 | void __init init_leon(void) |
1992 | { | 1992 | { |
1993 | 1993 | ||
1994 | srmmu_name = "LEON"; | 1994 | srmmu_name = "LEON"; |
1995 | 1995 | ||
1996 | BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, | 1996 | BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, |
1997 | BTFIXUPCALL_NORM); | 1997 | BTFIXUPCALL_NORM); |
1998 | BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, | 1998 | BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, |
1999 | BTFIXUPCALL_NORM); | 1999 | BTFIXUPCALL_NORM); |
2000 | BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, | 2000 | BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, |
2001 | BTFIXUPCALL_NORM); | 2001 | BTFIXUPCALL_NORM); |
2002 | BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, | 2002 | BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, |
2003 | BTFIXUPCALL_NORM); | 2003 | BTFIXUPCALL_NORM); |
2004 | BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, | 2004 | BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, |
2005 | BTFIXUPCALL_NORM); | 2005 | BTFIXUPCALL_NORM); |
2006 | 2006 | ||
2007 | BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); | 2007 | BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
2008 | BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); | 2008 | BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
2009 | BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); | 2009 | BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
2010 | BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); | 2010 | BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
2011 | 2011 | ||
2012 | BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, | 2012 | BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, |
2013 | BTFIXUPCALL_NOP); | 2013 | BTFIXUPCALL_NOP); |
2014 | BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); | 2014 | BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); |
2015 | 2015 | ||
2016 | poke_srmmu = poke_leonsparc; | 2016 | poke_srmmu = poke_leonsparc; |
2017 | 2017 | ||
2018 | srmmu_cache_pagetables = 0; | 2018 | srmmu_cache_pagetables = 0; |
2019 | 2019 | ||
2020 | leon_flush_during_switch = leon_flush_needed(); | 2020 | leon_flush_during_switch = leon_flush_needed(); |
2021 | } | 2021 | } |
2022 | #endif | 2022 | #endif |
2023 | 2023 | ||
2024 | /* Probe for the srmmu chip version. */ | 2024 | /* Probe for the srmmu chip version. */ |
2025 | static void __init get_srmmu_type(void) | 2025 | static void __init get_srmmu_type(void) |
2026 | { | 2026 | { |
2027 | unsigned long mreg, psr; | 2027 | unsigned long mreg, psr; |
2028 | unsigned long mod_typ, mod_rev, psr_typ, psr_vers; | 2028 | unsigned long mod_typ, mod_rev, psr_typ, psr_vers; |
2029 | 2029 | ||
2030 | srmmu_modtype = SRMMU_INVAL_MOD; | 2030 | srmmu_modtype = SRMMU_INVAL_MOD; |
2031 | hwbug_bitmask = 0; | 2031 | hwbug_bitmask = 0; |
2032 | 2032 | ||
2033 | mreg = srmmu_get_mmureg(); psr = get_psr(); | 2033 | mreg = srmmu_get_mmureg(); psr = get_psr(); |
2034 | mod_typ = (mreg & 0xf0000000) >> 28; | 2034 | mod_typ = (mreg & 0xf0000000) >> 28; |
2035 | mod_rev = (mreg & 0x0f000000) >> 24; | 2035 | mod_rev = (mreg & 0x0f000000) >> 24; |
2036 | psr_typ = (psr >> 28) & 0xf; | 2036 | psr_typ = (psr >> 28) & 0xf; |
2037 | psr_vers = (psr >> 24) & 0xf; | 2037 | psr_vers = (psr >> 24) & 0xf; |
2038 | 2038 | ||
2039 | /* First, check for sparc-leon. */ | 2039 | /* First, check for sparc-leon. */ |
2040 | if (sparc_cpu_model == sparc_leon) { | 2040 | if (sparc_cpu_model == sparc_leon) { |
2041 | init_leon(); | 2041 | init_leon(); |
2042 | return; | 2042 | return; |
2043 | } | 2043 | } |
2044 | 2044 | ||
2045 | /* Second, check for HyperSparc or Cypress. */ | 2045 | /* Second, check for HyperSparc or Cypress. */ |
2046 | if(mod_typ == 1) { | 2046 | if(mod_typ == 1) { |
2047 | switch(mod_rev) { | 2047 | switch(mod_rev) { |
2048 | case 7: | 2048 | case 7: |
2049 | /* UP or MP Hypersparc */ | 2049 | /* UP or MP Hypersparc */ |
2050 | init_hypersparc(); | 2050 | init_hypersparc(); |
2051 | break; | 2051 | break; |
2052 | case 0: | 2052 | case 0: |
2053 | case 2: | 2053 | case 2: |
2054 | /* Uniprocessor Cypress */ | 2054 | /* Uniprocessor Cypress */ |
2055 | init_cypress_604(); | 2055 | init_cypress_604(); |
2056 | break; | 2056 | break; |
2057 | case 10: | 2057 | case 10: |
2058 | case 11: | 2058 | case 11: |
2059 | case 12: | 2059 | case 12: |
2060 | /* _REALLY OLD_ Cypress MP chips... */ | 2060 | /* _REALLY OLD_ Cypress MP chips... */ |
2061 | case 13: | 2061 | case 13: |
2062 | case 14: | 2062 | case 14: |
2063 | case 15: | 2063 | case 15: |
2064 | /* MP Cypress mmu/cache-controller */ | 2064 | /* MP Cypress mmu/cache-controller */ |
2065 | init_cypress_605(mod_rev); | 2065 | init_cypress_605(mod_rev); |
2066 | break; | 2066 | break; |
2067 | default: | 2067 | default: |
2068 | /* Some other Cypress revision, assume a 605. */ | 2068 | /* Some other Cypress revision, assume a 605. */ |
2069 | init_cypress_605(mod_rev); | 2069 | init_cypress_605(mod_rev); |
2070 | break; | 2070 | break; |
2071 | } | 2071 | } |
2072 | return; | 2072 | return; |
2073 | } | 2073 | } |
2074 | 2074 | ||
2075 | /* | 2075 | /* |
2076 | * Now Fujitsu TurboSparc. It might happen that it is | 2076 | * Now Fujitsu TurboSparc. It might happen that it is |
2077 | * in Swift emulation mode, so we will check later... | 2077 | * in Swift emulation mode, so we will check later... |
2078 | */ | 2078 | */ |
2079 | if (psr_typ == 0 && psr_vers == 5) { | 2079 | if (psr_typ == 0 && psr_vers == 5) { |
2080 | init_turbosparc(); | 2080 | init_turbosparc(); |
2081 | return; | 2081 | return; |
2082 | } | 2082 | } |
2083 | 2083 | ||
2084 | /* Next check for Fujitsu Swift. */ | 2084 | /* Next check for Fujitsu Swift. */ |
2085 | if(psr_typ == 0 && psr_vers == 4) { | 2085 | if(psr_typ == 0 && psr_vers == 4) { |
2086 | phandle cpunode; | 2086 | phandle cpunode; |
2087 | char node_str[128]; | 2087 | char node_str[128]; |
2088 | 2088 | ||
2089 | /* Look if it is not a TurboSparc emulating Swift... */ | 2089 | /* Look if it is not a TurboSparc emulating Swift... */ |
2090 | cpunode = prom_getchild(prom_root_node); | 2090 | cpunode = prom_getchild(prom_root_node); |
2091 | while((cpunode = prom_getsibling(cpunode)) != 0) { | 2091 | while((cpunode = prom_getsibling(cpunode)) != 0) { |
2092 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); | 2092 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); |
2093 | if(!strcmp(node_str, "cpu")) { | 2093 | if(!strcmp(node_str, "cpu")) { |
2094 | if (!prom_getintdefault(cpunode, "psr-implementation", 1) && | 2094 | if (!prom_getintdefault(cpunode, "psr-implementation", 1) && |
2095 | prom_getintdefault(cpunode, "psr-version", 1) == 5) { | 2095 | prom_getintdefault(cpunode, "psr-version", 1) == 5) { |
2096 | init_turbosparc(); | 2096 | init_turbosparc(); |
2097 | return; | 2097 | return; |
2098 | } | 2098 | } |
2099 | break; | 2099 | break; |
2100 | } | 2100 | } |
2101 | } | 2101 | } |
2102 | 2102 | ||
2103 | init_swift(); | 2103 | init_swift(); |
2104 | return; | 2104 | return; |
2105 | } | 2105 | } |
2106 | 2106 | ||
2107 | /* Now the Viking family of srmmu. */ | 2107 | /* Now the Viking family of srmmu. */ |
2108 | if(psr_typ == 4 && | 2108 | if(psr_typ == 4 && |
2109 | ((psr_vers == 0) || | 2109 | ((psr_vers == 0) || |
2110 | ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { | 2110 | ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { |
2111 | init_viking(); | 2111 | init_viking(); |
2112 | return; | 2112 | return; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | /* Finally the Tsunami. */ | 2115 | /* Finally the Tsunami. */ |
2116 | if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { | 2116 | if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { |
2117 | init_tsunami(); | 2117 | init_tsunami(); |
2118 | return; | 2118 | return; |
2119 | } | 2119 | } |
2120 | 2120 | ||
2121 | /* Oh well */ | 2121 | /* Oh well */ |
2122 | srmmu_is_bad(); | 2122 | srmmu_is_bad(); |
2123 | } | 2123 | } |
2124 | 2124 | ||
2125 | /* don't laugh, static pagetables */ | ||
2126 | static void srmmu_check_pgt_cache(int low, int high) | ||
2127 | { | ||
2128 | } | ||
2129 | |||
2130 | extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, | 2125 | extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, |
2131 | tsetup_mmu_patchme, rtrap_mmu_patchme; | 2126 | tsetup_mmu_patchme, rtrap_mmu_patchme; |
2132 | 2127 | ||
2133 | extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, | 2128 | extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, |
2134 | tsetup_srmmu_stackchk, srmmu_rett_stackchk; | 2129 | tsetup_srmmu_stackchk, srmmu_rett_stackchk; |
2135 | 2130 | ||
2136 | #ifdef CONFIG_SMP | 2131 | #ifdef CONFIG_SMP |
2137 | /* Local cross-calls. */ | 2132 | /* Local cross-calls. */ |
2138 | static void smp_flush_page_for_dma(unsigned long page) | 2133 | static void smp_flush_page_for_dma(unsigned long page) |
2139 | { | 2134 | { |
2140 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); | 2135 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); |
2141 | local_flush_page_for_dma(page); | 2136 | local_flush_page_for_dma(page); |
2142 | } | 2137 | } |
2143 | 2138 | ||
2144 | #endif | 2139 | #endif |
2145 | 2140 | ||
2146 | /* Load up routines and constants for sun4m and sun4d mmu */ | 2141 | /* Load up routines and constants for sun4m and sun4d mmu */ |
2147 | void __init ld_mmu_srmmu(void) | 2142 | void __init ld_mmu_srmmu(void) |
2148 | { | 2143 | { |
2149 | extern void ld_mmu_iommu(void); | 2144 | extern void ld_mmu_iommu(void); |
2150 | extern void ld_mmu_iounit(void); | 2145 | extern void ld_mmu_iounit(void); |
2151 | extern void ___xchg32_sun4md(void); | 2146 | extern void ___xchg32_sun4md(void); |
2152 | 2147 | ||
2153 | BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); | 2148 | BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); |
2154 | BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); | 2149 | BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); |
2155 | BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); | 2150 | BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); |
2156 | 2151 | ||
2157 | BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); | 2152 | BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); |
2158 | BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); | 2153 | BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); |
2159 | 2154 | ||
2160 | BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); | 2155 | BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); |
2161 | PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); | 2156 | PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); |
2162 | BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); | 2157 | BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); |
2163 | BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); | 2158 | BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); |
2164 | BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); | 2159 | BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); |
2165 | page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); | 2160 | page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); |
2166 | 2161 | ||
2167 | /* Functions */ | 2162 | /* Functions */ |
2168 | #ifndef CONFIG_SMP | 2163 | #ifndef CONFIG_SMP |
2169 | BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); | 2164 | BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); |
2170 | #endif | 2165 | #endif |
2171 | BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); | ||
2172 | 2166 | ||
2173 | BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); | 2167 | BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); |
2174 | 2168 | ||
2175 | BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); | 2169 | BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); |
2176 | BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); | 2170 | BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); |
2177 | BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); | 2171 | BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); |
2178 | 2172 | ||
2179 | BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); | 2173 | BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); |
2180 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); | 2174 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); |
2181 | 2175 | ||
2182 | BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); | 2176 | BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); |
2183 | BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); | 2177 | BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); |
2184 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); | 2178 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); |
2185 | 2179 | ||
2186 | BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); | 2180 | BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); |
2187 | BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); | 2181 | BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); |
2188 | BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); | 2182 | BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); |
2189 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); | 2183 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); |
2190 | 2184 | ||
2191 | BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); | 2185 | BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); |
2192 | BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); | 2186 | BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); |
2193 | BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); | 2187 | BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); |
2194 | BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); | 2188 | BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); |
2195 | BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); | 2189 | BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); |
2196 | BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); | 2190 | BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); |
2197 | 2191 | ||
2198 | BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); | 2192 | BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); |
2199 | BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); | 2193 | BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); |
2200 | BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); | 2194 | BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); |
2201 | 2195 | ||
2202 | BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); | 2196 | BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); |
2203 | BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); | 2197 | BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); |
2204 | BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); | 2198 | BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); |
2205 | BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); | 2199 | BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); |
2206 | BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); | 2200 | BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); |
2207 | BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); | 2201 | BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); |
2208 | BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); | 2202 | BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); |
2209 | BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); | 2203 | BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); |
2210 | 2204 | ||
2211 | BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); | 2205 | BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); |
2212 | BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); | 2206 | BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); |
2213 | BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); | 2207 | BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); |
2214 | BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); | 2208 | BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); |
2215 | BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); | 2209 | BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); |
2216 | BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); | 2210 | BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); |
2217 | BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); | 2211 | BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); |
2218 | BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); | 2212 | BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); |
2219 | BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); | 2213 | BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); |
2220 | BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); | 2214 | BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); |
2221 | BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); | 2215 | BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); |
2222 | BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); | 2216 | BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); |
2223 | 2217 | ||
2224 | BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); | 2218 | BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); |
2225 | BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); | 2219 | BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); |
2226 | 2220 | ||
2227 | BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); | 2221 | BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); |
2228 | BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); | 2222 | BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); |
2229 | BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); | 2223 | BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); |
2230 | 2224 | ||
2231 | BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); | 2225 | BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); |
2232 | 2226 | ||
2233 | get_srmmu_type(); | 2227 | get_srmmu_type(); |
2234 | 2228 | ||
2235 | #ifdef CONFIG_SMP | 2229 | #ifdef CONFIG_SMP |
2236 | /* El switcheroo... */ | 2230 | /* El switcheroo... */ |
2237 | 2231 | ||
2238 | BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); | 2232 | BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); |
2239 | BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); | 2233 | BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); |
2240 | BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); | 2234 | BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); |
2241 | BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); | 2235 | BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); |
2242 | BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); | 2236 | BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); |
2243 | BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); | 2237 | BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); |
2244 | BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); | 2238 | BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); |
2245 | BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); | 2239 | BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); |
2246 | BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); | 2240 | BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); |
2247 | BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); | 2241 | BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); |
2248 | BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); | 2242 | BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); |
2249 | 2243 | ||
2250 | BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); | 2244 | BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); |
2251 | BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); | 2245 | BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); |
2252 | BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); | 2246 | BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); |
2253 | BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); | 2247 | BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); |
2254 | if (sparc_cpu_model != sun4d && | 2248 | if (sparc_cpu_model != sun4d && |
2255 | sparc_cpu_model != sparc_leon) { | 2249 | sparc_cpu_model != sparc_leon) { |
2256 | BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); | 2250 | BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); |
2257 | BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); | 2251 | BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); |
2258 | BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); | 2252 | BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); |
2259 | BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); | 2253 | BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); |
2260 | } | 2254 | } |
2261 | BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); | 2255 | BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); |
2262 | BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); | 2256 | BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); |
2263 | BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); | 2257 | BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); |
2264 | 2258 | ||
2265 | if (poke_srmmu == poke_viking) { | 2259 | if (poke_srmmu == poke_viking) { |
2266 | /* Avoid unnecessary cross calls. */ | 2260 | /* Avoid unnecessary cross calls. */ |
2267 | BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); | 2261 | BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); |
2268 | BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); | 2262 | BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); |
2269 | BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); | 2263 | BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); |
2270 | BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); | 2264 | BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); |
2271 | BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); | 2265 | BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); |
2272 | BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); | 2266 | BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); |
2273 | BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); | 2267 | BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); |
2274 | } | 2268 | } |
2275 | #endif | 2269 | #endif |
2276 | 2270 | ||
2277 | if (sparc_cpu_model == sun4d) | 2271 | if (sparc_cpu_model == sun4d) |
2278 | ld_mmu_iounit(); | 2272 | ld_mmu_iounit(); |
2279 | else | 2273 | else |
2280 | ld_mmu_iommu(); | 2274 | ld_mmu_iommu(); |
2281 | #ifdef CONFIG_SMP | 2275 | #ifdef CONFIG_SMP |
2282 | if (sparc_cpu_model == sun4d) | 2276 | if (sparc_cpu_model == sun4d) |
2283 | sun4d_init_smp(); | 2277 | sun4d_init_smp(); |
2284 | else if (sparc_cpu_model == sparc_leon) | 2278 | else if (sparc_cpu_model == sparc_leon) |
2285 | leon_init_smp(); | 2279 | leon_init_smp(); |
2286 | else | 2280 | else |
2287 | sun4m_init_smp(); | 2281 | sun4m_init_smp(); |
2288 | #endif | 2282 | #endif |
2289 | } | 2283 | } |
2290 | 2284 |