Commit 5d83d66635bb1642f3c6a3690c28ff4afdf1ae5f

Authored by David S. Miller
1 parent b25e74b1be

sparc32: Move cache and TLB flushes over to method ops.

This eliminated most of the remaining users of btfixup.

There are some complications because of the special cases we
have for sun4d, leon, and some flavors of viking.

It was found that there are no cases where a flush_page_for_dma
method was not hooked up to something, so the "noflush" iommu
methods were removed.

Add some documentation to the viking_sun4d_smp_ops to describe exactly
the hardware bug which causes us to need special TLB flushing on
sun4d.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 13 changed files with 466 additions and 478 deletions Side-by-side Diff

arch/sparc/include/asm/cacheflush_32.h
1 1 #ifndef _SPARC_CACHEFLUSH_H
2 2 #define _SPARC_CACHEFLUSH_H
3 3  
4   -#include <linux/mm.h> /* Common for other includes */
5   -// #include <linux/kernel.h> from pgalloc.h
6   -// #include <linux/sched.h> from pgalloc.h
  4 +#include <asm/cachetlb_32.h>
7 5  
8   -// #include <asm/page.h>
9   -#include <asm/btfixup.h>
10   -
11   -/*
12   - * Fine grained cache flushing.
13   - */
14   -#ifdef CONFIG_SMP
15   -
16   -BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17   -BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18   -BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19   -BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20   -
21   -#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22   -#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23   -#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24   -#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25   -
26   -BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27   -BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28   -
29   -#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30   -#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31   -
32   -extern void smp_flush_cache_all(void);
33   -extern void smp_flush_cache_mm(struct mm_struct *mm);
34   -extern void smp_flush_cache_range(struct vm_area_struct *vma,
35   - unsigned long start,
36   - unsigned long end);
37   -extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38   -
39   -extern void smp_flush_page_to_ram(unsigned long page);
40   -extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41   -
42   -#endif /* CONFIG_SMP */
43   -
44   -BTFIXUPDEF_CALL(void, flush_cache_all, void)
45   -BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46   -BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47   -BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48   -
49   -#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50   -#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51   -#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52   -#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53   -#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
  6 +#define flush_cache_all() \
  7 + sparc32_cachetlb_ops->cache_all()
  8 +#define flush_cache_mm(mm) \
  9 + sparc32_cachetlb_ops->cache_mm(mm)
  10 +#define flush_cache_dup_mm(mm) \
  11 + sparc32_cachetlb_ops->cache_mm(mm)
  12 +#define flush_cache_range(vma,start,end) \
  13 + sparc32_cachetlb_ops->cache_range(vma, start, end)
  14 +#define flush_cache_page(vma,addr,pfn) \
  15 + sparc32_cachetlb_ops->cache_page(vma, addr)
54 16 #define flush_icache_range(start, end) do { } while (0)
55 17 #define flush_icache_page(vma, pg) do { } while (0)
56 18  
... ... @@ -67,11 +29,12 @@
67 29 memcpy(dst, src, len); \
68 30 } while (0)
69 31  
70   -BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
71   -BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
72   -
73   -#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
74   -#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
  32 +#define __flush_page_to_ram(addr) \
  33 + sparc32_cachetlb_ops->page_to_ram(addr)
  34 +#define flush_sig_insns(mm,insn_addr) \
  35 + sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
  36 +#define flush_page_for_dma(addr) \
  37 + sparc32_cachetlb_ops->page_for_dma(addr)
75 38  
76 39 extern void sparc_flush_page_to_ram(struct page *page);
77 40  
arch/sparc/include/asm/cachetlb_32.h
  1 +#ifndef _SPARC_CACHETLB_H
  2 +#define _SPARC_CACHETLB_H
  3 +
  4 +struct mm_struct;
  5 +struct vm_area_struct;
  6 +
  7 +struct sparc32_cachetlb_ops {
  8 + void (*cache_all)(void);
  9 + void (*cache_mm)(struct mm_struct *);
  10 + void (*cache_range)(struct vm_area_struct *, unsigned long,
  11 + unsigned long);
  12 + void (*cache_page)(struct vm_area_struct *, unsigned long);
  13 +
  14 + void (*tlb_all)(void);
  15 + void (*tlb_mm)(struct mm_struct *);
  16 + void (*tlb_range)(struct vm_area_struct *, unsigned long,
  17 + unsigned long);
  18 + void (*tlb_page)(struct vm_area_struct *, unsigned long);
  19 +
  20 + void (*page_to_ram)(unsigned long);
  21 + void (*sig_insns)(struct mm_struct *, unsigned long);
  22 + void (*page_for_dma)(unsigned long);
  23 +};
  24 +extern const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
  25 +#ifdef CONFIG_SMP
  26 +extern const struct sparc32_cachetlb_ops *local_ops;
  27 +#endif
  28 +
  29 +#endif /* SPARC_CACHETLB_H */
arch/sparc/include/asm/tlbflush_32.h
1 1 #ifndef _SPARC_TLBFLUSH_H
2 2 #define _SPARC_TLBFLUSH_H
3 3  
4   -#include <linux/mm.h>
5   -// #include <asm/processor.h>
  4 +#include <asm/cachetlb_32.h>
6 5  
7   -/*
8   - * TLB flushing:
9   - *
10   - * - flush_tlb() flushes the current mm struct TLBs XXX Exists?
11   - * - flush_tlb_all() flushes all processes TLBs
12   - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13   - * - flush_tlb_page(vma, vmaddr) flushes one page
14   - * - flush_tlb_range(vma, start, end) flushes a range of pages
15   - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16   - */
17   -
18   -#ifdef CONFIG_SMP
19   -
20   -BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
21   -BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
22   -BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
23   -BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
24   -
25   -#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
26   -#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
27   -#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
28   -#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
29   -
30   -extern void smp_flush_tlb_all(void);
31   -extern void smp_flush_tlb_mm(struct mm_struct *mm);
32   -extern void smp_flush_tlb_range(struct vm_area_struct *vma,
33   - unsigned long start,
34   - unsigned long end);
35   -extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
36   -
37   -#endif /* CONFIG_SMP */
38   -
39   -BTFIXUPDEF_CALL(void, flush_tlb_all, void)
40   -BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
41   -BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
42   -BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
43   -
44   -#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
45   -#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
46   -#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
47   -#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
48   -
49   -// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
  6 +#define flush_tlb_all() \
  7 + sparc32_cachetlb_ops->tlb_all()
  8 +#define flush_tlb_mm(mm) \
  9 + sparc32_cachetlb_ops->tlb_mm(mm)
  10 +#define flush_tlb_range(vma, start, end) \
  11 + sparc32_cachetlb_ops->tlb_range(vma, start, end)
  12 +#define flush_tlb_page(vma, addr) \
  13 + sparc32_cachetlb_ops->tlb_page(vma, addr)
50 14  
51 15 /*
52 16 * This is a kludge, until I know better. --zaitcev XXX
arch/sparc/kernel/leon_kernel.c
... ... @@ -427,7 +427,7 @@
427 427 */
428 428 local_irq_save(flags);
429 429 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
430   - local_flush_cache_all();
  430 + local_ops->cache_all();
431 431 local_irq_restore(flags);
432 432 }
433 433 #endif
arch/sparc/kernel/leon_smp.c
... ... @@ -75,8 +75,8 @@
75 75 {
76 76 int cpuid = hard_smpleon_processor_id();
77 77  
78   - local_flush_cache_all();
79   - local_flush_tlb_all();
  78 + local_ops->cache_all();
  79 + local_ops->tlb_all();
80 80 leon_configure_cache_smp();
81 81  
82 82 notify_cpu_starting(cpuid);
... ... @@ -87,8 +87,8 @@
87 87 calibrate_delay();
88 88 smp_store_cpu_info(cpuid);
89 89  
90   - local_flush_cache_all();
91   - local_flush_tlb_all();
  90 + local_ops->cache_all();
  91 + local_ops->tlb_all();
92 92  
93 93 /*
94 94 * Unblock the master CPU _only_ when the scheduler state
... ... @@ -99,8 +99,8 @@
99 99 */
100 100 do_swap(&cpu_callin_map[cpuid], 1);
101 101  
102   - local_flush_cache_all();
103   - local_flush_tlb_all();
  102 + local_ops->cache_all();
  103 + local_ops->tlb_all();
104 104  
105 105 /* Fix idle thread fields. */
106 106 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
... ... @@ -143,8 +143,8 @@
143 143 }
144 144 }
145 145  
146   - local_flush_cache_all();
147   - local_flush_tlb_all();
  146 + local_ops->cache_all();
  147 + local_ops->tlb_all();
148 148 }
149 149  
150 150 void leon_smp_setbroadcast(unsigned int mask)
... ... @@ -199,7 +199,7 @@
199 199 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
200 200  
201 201 leon_configure_cache_smp();
202   - local_flush_cache_all();
  202 + local_ops->cache_all();
203 203  
204 204 }
205 205  
... ... @@ -226,7 +226,7 @@
226 226 /* whirrr, whirrr, whirrrrrrrrr... */
227 227 printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
228 228 (unsigned int)&leon3_irqctrl_regs->mpstatus);
229   - local_flush_cache_all();
  229 + local_ops->cache_all();
230 230  
231 231 /* Make sure all IRQs are of from the start for this new CPU */
232 232 LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
... ... @@ -251,7 +251,7 @@
251 251 leon_enable_irq_cpu(leon_ipi_irq, i);
252 252 }
253 253  
254   - local_flush_cache_all();
  254 + local_ops->cache_all();
255 255 return 0;
256 256 }
257 257  
... ... @@ -271,7 +271,7 @@
271 271 }
272 272 }
273 273 *prev = first;
274   - local_flush_cache_all();
  274 + local_ops->cache_all();
275 275  
276 276 /* Free unneeded trap tables */
277 277 if (!cpu_present(1)) {
... ... @@ -337,7 +337,7 @@
337 337 local_irq_save(flags);
338 338 trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
339 339 trap_table->inst_three += smpleon_ipi - real_irq_entry;
340   - local_flush_cache_all();
  340 + local_ops->cache_all();
341 341 local_irq_restore(flags);
342 342  
343 343 for_each_possible_cpu(cpu) {
arch/sparc/kernel/smp_32.c
... ... @@ -171,128 +171,6 @@
171 171 irq_exit();
172 172 }
173 173  
174   -void smp_flush_cache_all(void)
175   -{
176   - xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
177   - local_flush_cache_all();
178   -}
179   -
180   -void smp_flush_tlb_all(void)
181   -{
182   - xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
183   - local_flush_tlb_all();
184   -}
185   -
186   -void smp_flush_cache_mm(struct mm_struct *mm)
187   -{
188   - if(mm->context != NO_CONTEXT) {
189   - cpumask_t cpu_mask;
190   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
191   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
192   - if (!cpumask_empty(&cpu_mask))
193   - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
194   - local_flush_cache_mm(mm);
195   - }
196   -}
197   -
198   -void smp_flush_tlb_mm(struct mm_struct *mm)
199   -{
200   - if(mm->context != NO_CONTEXT) {
201   - cpumask_t cpu_mask;
202   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
203   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
204   - if (!cpumask_empty(&cpu_mask)) {
205   - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
206   - if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
207   - cpumask_copy(mm_cpumask(mm),
208   - cpumask_of(smp_processor_id()));
209   - }
210   - local_flush_tlb_mm(mm);
211   - }
212   -}
213   -
214   -void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
215   - unsigned long end)
216   -{
217   - struct mm_struct *mm = vma->vm_mm;
218   -
219   - if (mm->context != NO_CONTEXT) {
220   - cpumask_t cpu_mask;
221   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
222   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
223   - if (!cpumask_empty(&cpu_mask))
224   - xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
225   - local_flush_cache_range(vma, start, end);
226   - }
227   -}
228   -
229   -void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
230   - unsigned long end)
231   -{
232   - struct mm_struct *mm = vma->vm_mm;
233   -
234   - if (mm->context != NO_CONTEXT) {
235   - cpumask_t cpu_mask;
236   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
237   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
238   - if (!cpumask_empty(&cpu_mask))
239   - xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
240   - local_flush_tlb_range(vma, start, end);
241   - }
242   -}
243   -
244   -void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
245   -{
246   - struct mm_struct *mm = vma->vm_mm;
247   -
248   - if(mm->context != NO_CONTEXT) {
249   - cpumask_t cpu_mask;
250   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
251   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
252   - if (!cpumask_empty(&cpu_mask))
253   - xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
254   - local_flush_cache_page(vma, page);
255   - }
256   -}
257   -
258   -void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
259   -{
260   - struct mm_struct *mm = vma->vm_mm;
261   -
262   - if(mm->context != NO_CONTEXT) {
263   - cpumask_t cpu_mask;
264   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
265   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
266   - if (!cpumask_empty(&cpu_mask))
267   - xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
268   - local_flush_tlb_page(vma, page);
269   - }
270   -}
271   -
272   -void smp_flush_page_to_ram(unsigned long page)
273   -{
274   - /* Current theory is that those who call this are the one's
275   - * who have just dirtied their cache with the pages contents
276   - * in kernel space, therefore we only run this on local cpu.
277   - *
278   - * XXX This experiment failed, research further... -DaveM
279   - */
280   -#if 1
281   - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
282   -#endif
283   - local_flush_page_to_ram(page);
284   -}
285   -
286   -void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
287   -{
288   - cpumask_t cpu_mask;
289   - cpumask_copy(&cpu_mask, mm_cpumask(mm));
290   - cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
291   - if (!cpumask_empty(&cpu_mask))
292   - xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
293   - local_flush_sig_insns(mm, insn_addr);
294   -}
295   -
296 174 int setup_profiling_timer(unsigned int multiplier)
297 175 {
298 176 return -EINVAL;
arch/sparc/kernel/sun4d_irq.c
... ... @@ -15,6 +15,7 @@
15 15 #include <asm/sbi.h>
16 16 #include <asm/cacheflush.h>
17 17 #include <asm/setup.h>
  18 +#include <asm/oplib.h>
18 19  
19 20 #include "kernel.h"
20 21 #include "irq.h"
... ... @@ -411,7 +412,7 @@
411 412 trap_table->inst_two = lvl14_save[1];
412 413 trap_table->inst_three = lvl14_save[2];
413 414 trap_table->inst_four = lvl14_save[3];
414   - local_flush_cache_all();
  415 + local_ops->cache_all();
415 416 local_irq_restore(flags);
416 417 #endif
417 418 }
arch/sparc/kernel/sun4d_smp.c
... ... @@ -10,12 +10,14 @@
10 10 #include <linux/interrupt.h>
11 11 #include <linux/profile.h>
12 12 #include <linux/delay.h>
  13 +#include <linux/sched.h>
13 14 #include <linux/cpu.h>
14 15  
15 16 #include <asm/cacheflush.h>
16 17 #include <asm/switch_to.h>
17 18 #include <asm/tlbflush.h>
18 19 #include <asm/timer.h>
  20 +#include <asm/oplib.h>
19 21 #include <asm/sbi.h>
20 22 #include <asm/mmu.h>
21 23  
... ... @@ -60,8 +62,8 @@
60 62 /* Enable level15 interrupt, disable level14 interrupt for now */
61 63 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
62 64  
63   - local_flush_cache_all();
64   - local_flush_tlb_all();
  65 + local_ops->cache_all();
  66 + local_ops->tlb_all();
65 67  
66 68 notify_cpu_starting(cpuid);
67 69 /*
68 70  
... ... @@ -75,13 +77,13 @@
75 77  
76 78 calibrate_delay();
77 79 smp_store_cpu_info(cpuid);
78   - local_flush_cache_all();
79   - local_flush_tlb_all();
  80 + local_ops->cache_all();
  81 + local_ops->tlb_all();
80 82  
81 83 /* Allow master to continue. */
82 84 sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
83   - local_flush_cache_all();
84   - local_flush_tlb_all();
  85 + local_ops->cache_all();
  86 + local_ops->tlb_all();
85 87  
86 88 while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
87 89 barrier();
... ... @@ -101,8 +103,8 @@
101 103 atomic_inc(&init_mm.mm_count);
102 104 current->active_mm = &init_mm;
103 105  
104   - local_flush_cache_all();
105   - local_flush_tlb_all();
  106 + local_ops->cache_all();
  107 + local_ops->tlb_all();
106 108  
107 109 local_irq_enable(); /* We don't allow PIL 14 yet */
108 110  
... ... @@ -124,7 +126,7 @@
124 126 smp4d_ipi_init();
125 127 if (boot_cpu_id)
126 128 current_set[0] = NULL;
127   - local_flush_cache_all();
  129 + local_ops->cache_all();
128 130 }
129 131  
130 132 int __cpuinit smp4d_boot_one_cpu(int i)
... ... @@ -150,7 +152,7 @@
150 152  
151 153 /* whirrr, whirrr, whirrrrrrrrr... */
152 154 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
153   - local_flush_cache_all();
  155 + local_ops->cache_all();
154 156 prom_startcpu(cpu_node,
155 157 &smp_penguin_ctable, 0, (char *)entry);
156 158  
... ... @@ -168,7 +170,7 @@
168 170 return -ENODEV;
169 171  
170 172 }
171   - local_flush_cache_all();
  173 + local_ops->cache_all();
172 174 return 0;
173 175 }
174 176  
... ... @@ -185,7 +187,7 @@
185 187 prev = &cpu_data(i).next;
186 188 }
187 189 *prev = first;
188   - local_flush_cache_all();
  190 + local_ops->cache_all();
189 191  
190 192 /* Ok, they are spinning and ready to go. */
191 193 smp_processors_ready = 1;
arch/sparc/kernel/sun4m_irq.c
... ... @@ -431,7 +431,7 @@
431 431 trap_table->inst_two = lvl14_save[1];
432 432 trap_table->inst_three = lvl14_save[2];
433 433 trap_table->inst_four = lvl14_save[3];
434   - local_flush_cache_all();
  434 + local_ops->cache_all();
435 435 local_irq_restore(flags);
436 436 }
437 437 #endif
arch/sparc/kernel/sun4m_smp.c
... ... @@ -8,12 +8,14 @@
8 8 #include <linux/interrupt.h>
9 9 #include <linux/profile.h>
10 10 #include <linux/delay.h>
  11 +#include <linux/sched.h>
11 12 #include <linux/cpu.h>
12 13  
13 14 #include <asm/cacheflush.h>
14 15 #include <asm/switch_to.h>
15 16 #include <asm/tlbflush.h>
16 17 #include <asm/timer.h>
  18 +#include <asm/oplib.h>
17 19  
18 20 #include "irq.h"
19 21 #include "kernel.h"
... ... @@ -38,8 +40,8 @@
38 40 {
39 41 int cpuid = hard_smp_processor_id();
40 42  
41   - local_flush_cache_all();
42   - local_flush_tlb_all();
  43 + local_ops->cache_all();
  44 + local_ops->tlb_all();
43 45  
44 46 notify_cpu_starting(cpuid);
45 47  
... ... @@ -48,8 +50,8 @@
48 50 calibrate_delay();
49 51 smp_store_cpu_info(cpuid);
50 52  
51   - local_flush_cache_all();
52   - local_flush_tlb_all();
  53 + local_ops->cache_all();
  54 + local_ops->tlb_all();
53 55  
54 56 /*
55 57 * Unblock the master CPU _only_ when the scheduler state
... ... @@ -61,8 +63,8 @@
61 63 swap_ulong(&cpu_callin_map[cpuid], 1);
62 64  
63 65 /* XXX: What's up with all the flushes? */
64   - local_flush_cache_all();
65   - local_flush_tlb_all();
  66 + local_ops->cache_all();
  67 + local_ops->tlb_all();
66 68  
67 69 /* Fix idle thread fields. */
68 70 __asm__ __volatile__("ld [%0], %%g6\n\t"
... ... @@ -88,7 +90,7 @@
88 90 {
89 91 smp4m_ipi_init();
90 92 sun4m_unmask_profile_irq();
91   - local_flush_cache_all();
  93 + local_ops->cache_all();
92 94 }
93 95  
94 96 int __cpuinit smp4m_boot_one_cpu(int i)
... ... @@ -117,7 +119,7 @@
117 119  
118 120 /* whirrr, whirrr, whirrrrrrrrr... */
119 121 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
120   - local_flush_cache_all();
  122 + local_ops->cache_all();
121 123 prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
122 124  
123 125 /* wheee... it's going... */
... ... @@ -132,7 +134,7 @@
132 134 return -ENODEV;
133 135 }
134 136  
135   - local_flush_cache_all();
  137 + local_ops->cache_all();
136 138 return 0;
137 139 }
138 140  
... ... @@ -149,7 +151,7 @@
149 151 prev = &cpu_data(i).next;
150 152 }
151 153 *prev = first;
152   - local_flush_cache_all();
  154 + local_ops->cache_all();
153 155  
154 156 /* Ok, they are spinning and ready to go. */
155 157 }
arch/sparc/mm/btfixup.c
... ... @@ -38,7 +38,6 @@
38 38 static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
39 39 static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
40 40 static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
41   -static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
42 41 static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
43 42  
44 43 #ifdef BTFIXUP_OPTIMIZE_OTHER
... ... @@ -75,7 +74,6 @@
75 74 unsigned insn;
76 75 unsigned *addr;
77 76 int fmangled = 0;
78   - void (*flush_cacheall)(void);
79 77  
80 78 if (!visited) {
81 79 visited++;
82 80  
83 81  
... ... @@ -311,14 +309,9 @@
311 309 p = q + count;
312 310 }
313 311 #ifdef CONFIG_SMP
314   - flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
  312 + local_ops->cache_all();
315 313 #else
316   - flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
  314 + sparc32_cachetlb_ops->cache_all();
317 315 #endif
318   - if (!flush_cacheall) {
319   - prom_printf(fca_und);
320   - prom_halt();
321   - }
322   - (*flush_cacheall)();
323 316 }
arch/sparc/mm/iommu.c
... ... @@ -39,8 +39,6 @@
39 39  
40 40 /* srmmu.c */
41 41 extern int viking_mxcc_present;
42   -BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43   -#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44 42 extern int flush_page_for_dma_global;
45 43 static int viking_flush;
46 44 /* viking.S */
... ... @@ -216,11 +214,6 @@
216 214 return busa + off;
217 215 }
218 216  
219   -static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
220   -{
221   - return iommu_get_scsi_one(dev, vaddr, len);
222   -}
223   -
224 217 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
225 218 {
226 219 flush_page_for_dma(0);
... ... @@ -238,19 +231,6 @@
238 231 return iommu_get_scsi_one(dev, vaddr, len);
239 232 }
240 233  
241   -static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
242   -{
243   - int n;
244   -
245   - while (sz != 0) {
246   - --sz;
247   - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
248   - sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
249   - sg->dma_length = sg->length;
250   - sg = sg_next(sg);
251   - }
252   -}
253   -
254 234 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
255 235 {
256 236 int n;
... ... @@ -426,17 +406,6 @@
426 406 }
427 407 #endif
428 408  
429   -static const struct sparc32_dma_ops iommu_dma_noflush_ops = {
430   - .get_scsi_one = iommu_get_scsi_one_noflush,
431   - .get_scsi_sgl = iommu_get_scsi_sgl_noflush,
432   - .release_scsi_one = iommu_release_scsi_one,
433   - .release_scsi_sgl = iommu_release_scsi_sgl,
434   -#ifdef CONFIG_SBUS
435   - .map_dma_area = iommu_map_dma_area,
436   - .unmap_dma_area = iommu_unmap_dma_area,
437   -#endif
438   -};
439   -
440 409 static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
441 410 .get_scsi_one = iommu_get_scsi_one_gflush,
442 411 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
... ... @@ -461,12 +430,7 @@
461 430  
462 431 void __init ld_mmu_iommu(void)
463 432 {
464   - viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
465   -
466   - if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
467   - /* IO coherent chip */
468   - sparc32_dma_ops = &iommu_dma_noflush_ops;
469   - } else if (flush_page_for_dma_global) {
  433 + if (flush_page_for_dma_global) {
470 434 /* flush_page_for_dma flushes everything, no matter of what page is it */
471 435 sparc32_dma_ops = &iommu_dma_gflush_ops;
472 436 } else {
arch/sparc/mm/srmmu.c
... ... @@ -65,24 +65,20 @@
65 65  
66 66 static pgd_t *srmmu_swapper_pg_dir;
67 67  
  68 +const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
  69 +
68 70 #ifdef CONFIG_SMP
  71 +const struct sparc32_cachetlb_ops *local_ops;
  72 +
69 73 #define FLUSH_BEGIN(mm)
70 74 #define FLUSH_END
71 75 #else
72   -#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
  76 +#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
73 77 #define FLUSH_END }
74 78 #endif
75 79  
76   -BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
77   -#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78   -
79 80 int flush_page_for_dma_global = 1;
80 81  
81   -#ifdef CONFIG_SMP
82   -BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
83   -#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
84   -#endif
85   -
86 82 char *srmmu_name;
87 83  
88 84 ctxd_t *srmmu_ctx_table_phys;
... ... @@ -1126,7 +1122,7 @@
1126 1122 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
1127 1123 #ifdef CONFIG_SMP
1128 1124 /* Stop from hanging here... */
1129   - local_flush_tlb_all();
  1125 + local_ops->tlb_all();
1130 1126 #else
1131 1127 flush_tlb_all();
1132 1128 #endif
... ... @@ -1284,6 +1280,20 @@
1284 1280 clear = srmmu_get_fstatus();
1285 1281 }
1286 1282  
  1283 +static const struct sparc32_cachetlb_ops hypersparc_ops = {
  1284 + .cache_all = hypersparc_flush_cache_all,
  1285 + .cache_mm = hypersparc_flush_cache_mm,
  1286 + .cache_page = hypersparc_flush_cache_page,
  1287 + .cache_range = hypersparc_flush_cache_range,
  1288 + .tlb_all = hypersparc_flush_tlb_all,
  1289 + .tlb_mm = hypersparc_flush_tlb_mm,
  1290 + .tlb_page = hypersparc_flush_tlb_page,
  1291 + .tlb_range = hypersparc_flush_tlb_range,
  1292 + .page_to_ram = hypersparc_flush_page_to_ram,
  1293 + .sig_insns = hypersparc_flush_sig_insns,
  1294 + .page_for_dma = hypersparc_flush_page_for_dma,
  1295 +};
  1296 +
1287 1297 static void __init init_hypersparc(void)
1288 1298 {
1289 1299 srmmu_name = "ROSS HyperSparc";
1290 1300  
... ... @@ -1292,22 +1302,8 @@
1292 1302 init_vac_layout();
1293 1303  
1294 1304 is_hypersparc = 1;
  1305 + sparc32_cachetlb_ops = &hypersparc_ops;
1295 1306  
1296   - BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1297   - BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1298   - BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1299   - BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1300   -
1301   - BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1302   - BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1303   - BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1304   - BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1305   -
1306   - BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1307   - BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1308   - BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1309   -
1310   -
1311 1307 poke_srmmu = poke_hypersparc;
1312 1308  
1313 1309 hypersparc_setup_blockops();
1314 1310  
... ... @@ -1352,25 +1348,24 @@
1352 1348 srmmu_set_mmureg(mreg);
1353 1349 }
1354 1350  
  1351 +static const struct sparc32_cachetlb_ops cypress_ops = {
  1352 + .cache_all = cypress_flush_cache_all,
  1353 + .cache_mm = cypress_flush_cache_mm,
  1354 + .cache_page = cypress_flush_cache_page,
  1355 + .cache_range = cypress_flush_cache_range,
  1356 + .tlb_all = cypress_flush_tlb_all,
  1357 + .tlb_mm = cypress_flush_tlb_mm,
  1358 + .tlb_page = cypress_flush_tlb_page,
  1359 + .tlb_range = cypress_flush_tlb_range,
  1360 + .page_to_ram = cypress_flush_page_to_ram,
  1361 + .sig_insns = cypress_flush_sig_insns,
  1362 + .page_for_dma = cypress_flush_page_for_dma,
  1363 +};
  1364 +
1355 1365 static void __init init_cypress_common(void)
1356 1366 {
1357 1367 init_vac_layout();
1358   -
1359   - BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1360   - BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1361   - BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1362   - BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1363   -
1364   - BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1365   - BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1366   - BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1367   - BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1368   -
1369   -
1370   - BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1371   - BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1372   - BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1373   -
  1368 + sparc32_cachetlb_ops = &cypress_ops;
1374 1369 poke_srmmu = poke_cypress;
1375 1370 }
1376 1371  
... ... @@ -1421,6 +1416,20 @@
1421 1416 srmmu_set_mmureg(mreg);
1422 1417 }
1423 1418  
  1419 +static const struct sparc32_cachetlb_ops swift_ops = {
  1420 + .cache_all = swift_flush_cache_all,
  1421 + .cache_mm = swift_flush_cache_mm,
  1422 + .cache_page = swift_flush_cache_page,
  1423 + .cache_range = swift_flush_cache_range,
  1424 + .tlb_all = swift_flush_tlb_all,
  1425 + .tlb_mm = swift_flush_tlb_mm,
  1426 + .tlb_page = swift_flush_tlb_page,
  1427 + .tlb_range = swift_flush_tlb_range,
  1428 + .page_to_ram = swift_flush_page_to_ram,
  1429 + .sig_insns = swift_flush_sig_insns,
  1430 + .page_for_dma = swift_flush_page_for_dma,
  1431 +};
  1432 +
1424 1433 #define SWIFT_MASKID_ADDR 0x10003018
1425 1434 static void __init init_swift(void)
1426 1435 {
... ... @@ -1471,21 +1480,7 @@
1471 1480 break;
1472 1481 }
1473 1482  
1474   - BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1475   - BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1476   - BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1477   - BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1478   -
1479   -
1480   - BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1481   - BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1482   - BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1483   - BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1484   -
1485   - BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1486   - BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1487   - BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1488   -
  1483 + sparc32_cachetlb_ops = &swift_ops;
1489 1484 flush_page_for_dma_global = 0;
1490 1485  
1491 1486 /*
1492 1487  
... ... @@ -1618,26 +1613,25 @@
1618 1613 srmmu_set_mmureg(mreg);
1619 1614 }
1620 1615  
  1616 +static const struct sparc32_cachetlb_ops turbosparc_ops = {
  1617 + .cache_all = turbosparc_flush_cache_all,
  1618 + .cache_mm = turbosparc_flush_cache_mm,
  1619 + .cache_page = turbosparc_flush_cache_page,
  1620 + .cache_range = turbosparc_flush_cache_range,
  1621 + .tlb_all = turbosparc_flush_tlb_all,
  1622 + .tlb_mm = turbosparc_flush_tlb_mm,
  1623 + .tlb_page = turbosparc_flush_tlb_page,
  1624 + .tlb_range = turbosparc_flush_tlb_range,
  1625 + .page_to_ram = turbosparc_flush_page_to_ram,
  1626 + .sig_insns = turbosparc_flush_sig_insns,
  1627 + .page_for_dma = turbosparc_flush_page_for_dma,
  1628 +};
  1629 +
1621 1630 static void __init init_turbosparc(void)
1622 1631 {
1623 1632 srmmu_name = "Fujitsu TurboSparc";
1624 1633 srmmu_modtype = TurboSparc;
1625   -
1626   - BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1627   - BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1628   - BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1629   - BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1630   -
1631   - BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1632   - BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1633   - BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1634   - BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1635   -
1636   - BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1637   -
1638   - BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1639   - BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1640   -
  1634 + sparc32_cachetlb_ops = &turbosparc_ops;
1641 1635 poke_srmmu = poke_turbosparc;
1642 1636 }
1643 1637  
... ... @@ -1652,6 +1646,20 @@
1652 1646 srmmu_set_mmureg(mreg);
1653 1647 }
1654 1648  
  1649 +static const struct sparc32_cachetlb_ops tsunami_ops = {
  1650 + .cache_all = tsunami_flush_cache_all,
  1651 + .cache_mm = tsunami_flush_cache_mm,
  1652 + .cache_page = tsunami_flush_cache_page,
  1653 + .cache_range = tsunami_flush_cache_range,
  1654 + .tlb_all = tsunami_flush_tlb_all,
  1655 + .tlb_mm = tsunami_flush_tlb_mm,
  1656 + .tlb_page = tsunami_flush_tlb_page,
  1657 + .tlb_range = tsunami_flush_tlb_range,
  1658 + .page_to_ram = tsunami_flush_page_to_ram,
  1659 + .sig_insns = tsunami_flush_sig_insns,
  1660 + .page_for_dma = tsunami_flush_page_for_dma,
  1661 +};
  1662 +
1655 1663 static void __init init_tsunami(void)
1656 1664 {
1657 1665 /*
... ... @@ -1662,22 +1670,7 @@
1662 1670  
1663 1671 srmmu_name = "TI Tsunami";
1664 1672 srmmu_modtype = Tsunami;
1665   -
1666   - BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1667   - BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1668   - BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1669   - BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1670   -
1671   -
1672   - BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1673   - BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1674   - BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1675   - BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1676   -
1677   - BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1678   - BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1679   - BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1680   -
  1673 + sparc32_cachetlb_ops = &tsunami_ops;
1681 1674 poke_srmmu = poke_tsunami;
1682 1675  
1683 1676 tsunami_setup_blockops();
... ... @@ -1688,7 +1681,7 @@
1688 1681 unsigned long mreg = srmmu_get_mmureg();
1689 1682 static int smp_catch;
1690 1683  
1691   - if(viking_mxcc_present) {
  1684 + if (viking_mxcc_present) {
1692 1685 unsigned long mxcc_control = mxcc_get_creg();
1693 1686  
1694 1687 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
... ... @@ -1725,6 +1718,52 @@
1725 1718 srmmu_set_mmureg(mreg);
1726 1719 }
1727 1720  
  1721 +static struct sparc32_cachetlb_ops viking_ops = {
  1722 + .cache_all = viking_flush_cache_all,
  1723 + .cache_mm = viking_flush_cache_mm,
  1724 + .cache_page = viking_flush_cache_page,
  1725 + .cache_range = viking_flush_cache_range,
  1726 + .tlb_all = viking_flush_tlb_all,
  1727 + .tlb_mm = viking_flush_tlb_mm,
  1728 + .tlb_page = viking_flush_tlb_page,
  1729 + .tlb_range = viking_flush_tlb_range,
  1730 + .page_to_ram = viking_flush_page_to_ram,
  1731 + .sig_insns = viking_flush_sig_insns,
  1732 + .page_for_dma = viking_flush_page_for_dma,
  1733 +};
  1734 +
  1735 +#ifdef CONFIG_SMP
  1736 +/* On sun4d the cpu broadcasts local TLB flushes, so we can just
  1737 + * perform the local TLB flush and all the other cpus will see it.
  1738 + * But, unfortunately, there is a bug in the sun4d XBUS backplane
  1739 + * that requires that we add some synchronization to these flushes.
  1740 + *
  1741 + * The bug is that the fifo which keeps track of all the pending TLB
  1742 + * broadcasts in the system is an entry or two too small, so if we
  1743 + * have too many going at once we'll overflow that fifo and lose a TLB
  1744 + * flush resulting in corruption.
  1745 + *
  1746 + * Our workaround is to take a global spinlock around the TLB flushes,
  1747 + * which guarentees we won't ever have too many pending. It's a big
  1748 + * hammer, but a semaphore like system to make sure we only have N TLB
  1749 + * flushes going at once will require SMP locking anyways so there's
  1750 + * no real value in trying any harder than this.
  1751 + */
  1752 +static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
  1753 + .cache_all = viking_flush_cache_all,
  1754 + .cache_mm = viking_flush_cache_mm,
  1755 + .cache_page = viking_flush_cache_page,
  1756 + .cache_range = viking_flush_cache_range,
  1757 + .tlb_all = sun4dsmp_flush_tlb_all,
  1758 + .tlb_mm = sun4dsmp_flush_tlb_mm,
  1759 + .tlb_page = sun4dsmp_flush_tlb_page,
  1760 + .tlb_range = sun4dsmp_flush_tlb_range,
  1761 + .page_to_ram = viking_flush_page_to_ram,
  1762 + .sig_insns = viking_flush_sig_insns,
  1763 + .page_for_dma = viking_flush_page_for_dma,
  1764 +};
  1765 +#endif
  1766 +
1728 1767 static void __init init_viking(void)
1729 1768 {
1730 1769 unsigned long mreg = srmmu_get_mmureg();
1731 1770  
1732 1771  
1733 1772  
1734 1773  
1735 1774  
1736 1775  
1737 1776  
1738 1777  
1739 1778  
1740 1779  
1741 1780  
1742 1781  
1743 1782  
1744 1783  
1745 1784  
1746 1785  
... ... @@ -1742,76 +1781,101 @@
1742 1781 * This is only necessary because of the new way in
1743 1782 * which we use the IOMMU.
1744 1783 */
1745   - BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
1746   -
  1784 + viking_ops.page_for_dma = viking_flush_page;
  1785 +#ifdef CONFIG_SMP
  1786 + viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
  1787 +#endif
1747 1788 flush_page_for_dma_global = 0;
1748 1789 } else {
1749 1790 srmmu_name = "TI Viking/MXCC";
1750 1791 viking_mxcc_present = 1;
1751   -
1752 1792 srmmu_cache_pagetables = 1;
1753   -
1754   - /* MXCC vikings lack the DMA snooping bug. */
1755   - BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1756 1793 }
1757 1794  
1758   - BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
1759   - BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
1760   - BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1761   - BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1762   -
  1795 + sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
  1796 + &viking_ops;
1763 1797 #ifdef CONFIG_SMP
1764   - if (sparc_cpu_model == sun4d) {
1765   - BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
1766   - BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
1767   - BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1768   - BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1769   - } else
  1798 + if (sparc_cpu_model == sun4d)
  1799 + sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
  1800 + &viking_sun4d_smp_ops;
1770 1801 #endif
1771   - {
1772   - BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1773   - BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1774   - BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1775   - BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1776   - }
1777 1802  
1778   - BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1779   - BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1780   -
1781 1803 poke_srmmu = poke_viking;
1782 1804 }
1783 1805  
1784 1806 #ifdef CONFIG_SPARC_LEON
  1807 +static void leon_flush_cache_mm(struct mm_struct *mm)
  1808 +{
  1809 + leon_flush_cache_all();
  1810 +}
1785 1811  
1786   -void __init poke_leonsparc(void)
  1812 +static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1787 1813 {
  1814 + leon_flush_pcache_all(vma, page);
1788 1815 }
1789 1816  
1790   -void __init init_leon(void)
  1817 +static void leon_flush_cache_range(struct vm_area_struct *vma,
  1818 + unsigned long start,
  1819 + unsigned long end)
1791 1820 {
  1821 + leon_flush_cache_all();
  1822 +}
1792 1823  
1793   - srmmu_name = "LEON";
  1824 +static void leon_flush_tlb_mm(struct mm_struct *mm)
  1825 +{
  1826 + leon_flush_tlb_all();
  1827 +}
1794 1828  
1795   - BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1796   - BTFIXUPCALL_NORM);
1797   - BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1798   - BTFIXUPCALL_NORM);
1799   - BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
1800   - BTFIXUPCALL_NORM);
1801   - BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
1802   - BTFIXUPCALL_NORM);
1803   - BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
1804   - BTFIXUPCALL_NORM);
  1829 +static void leon_flush_tlb_page(struct vm_area_struct *vma,
  1830 + unsigned long page)
  1831 +{
  1832 + leon_flush_tlb_all();
  1833 +}
1805 1834  
1806   - BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1807   - BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1808   - BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1809   - BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
  1835 +static void leon_flush_tlb_range(struct vm_area_struct *vma,
  1836 + unsigned long start,
  1837 + unsigned long end)
  1838 +{
  1839 + leon_flush_tlb_all();
  1840 +}
1810 1841  
1811   - BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
1812   - BTFIXUPCALL_NOP);
1813   - BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
  1842 +static void leon_flush_page_to_ram(unsigned long page)
  1843 +{
  1844 + leon_flush_cache_all();
  1845 +}
1814 1846  
  1847 +static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
  1848 +{
  1849 + leon_flush_cache_all();
  1850 +}
  1851 +
  1852 +static void leon_flush_page_for_dma(unsigned long page)
  1853 +{
  1854 + leon_flush_dcache_all();
  1855 +}
  1856 +
  1857 +void __init poke_leonsparc(void)
  1858 +{
  1859 +}
  1860 +
  1861 +static const struct sparc32_cachetlb_ops leon_ops = {
  1862 + .cache_all = leon_flush_cache_all,
  1863 + .cache_mm = leon_flush_cache_mm,
  1864 + .cache_page = leon_flush_cache_page,
  1865 + .cache_range = leon_flush_cache_range,
  1866 + .tlb_all = leon_flush_tlb_all,
  1867 + .tlb_mm = leon_flush_tlb_mm,
  1868 + .tlb_page = leon_flush_tlb_page,
  1869 + .tlb_range = leon_flush_tlb_range,
  1870 + .page_to_ram = leon_flush_page_to_ram,
  1871 + .sig_insns = leon_flush_sig_insns,
  1872 + .page_for_dma = leon_flush_page_for_dma,
  1873 +};
  1874 +
  1875 +void __init init_leon(void)
  1876 +{
  1877 + srmmu_name = "LEON";
  1878 + sparc32_cachetlb_ops = &leon_ops;
1815 1879 poke_srmmu = poke_leonsparc;
1816 1880  
1817 1881 srmmu_cache_pagetables = 0;
1818 1882  
1819 1883  
1820 1884  
... ... @@ -1925,12 +1989,154 @@
1925 1989 /* Local cross-calls. */
1926 1990 static void smp_flush_page_for_dma(unsigned long page)
1927 1991 {
1928   - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
1929   - local_flush_page_for_dma(page);
  1992 + xc1((smpfunc_t) local_ops->page_for_dma, page);
  1993 + local_ops->page_for_dma(page);
1930 1994 }
1931 1995  
  1996 +static void smp_flush_cache_all(void)
  1997 +{
  1998 + xc0((smpfunc_t) local_ops->cache_all);
  1999 + local_ops->cache_all();
  2000 +}
  2001 +
  2002 +static void smp_flush_tlb_all(void)
  2003 +{
  2004 + xc0((smpfunc_t) local_ops->tlb_all);
  2005 + local_ops->tlb_all();
  2006 +}
  2007 +
  2008 +static void smp_flush_cache_mm(struct mm_struct *mm)
  2009 +{
  2010 + if (mm->context != NO_CONTEXT) {
  2011 + cpumask_t cpu_mask;
  2012 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2013 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2014 + if (!cpumask_empty(&cpu_mask))
  2015 + xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
  2016 + local_ops->cache_mm(mm);
  2017 + }
  2018 +}
  2019 +
  2020 +static void smp_flush_tlb_mm(struct mm_struct *mm)
  2021 +{
  2022 + if (mm->context != NO_CONTEXT) {
  2023 + cpumask_t cpu_mask;
  2024 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2025 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2026 + if (!cpumask_empty(&cpu_mask)) {
  2027 + xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
  2028 + if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
  2029 + cpumask_copy(mm_cpumask(mm),
  2030 + cpumask_of(smp_processor_id()));
  2031 + }
  2032 + local_ops->tlb_mm(mm);
  2033 + }
  2034 +}
  2035 +
  2036 +static void smp_flush_cache_range(struct vm_area_struct *vma,
  2037 + unsigned long start,
  2038 + unsigned long end)
  2039 +{
  2040 + struct mm_struct *mm = vma->vm_mm;
  2041 +
  2042 + if (mm->context != NO_CONTEXT) {
  2043 + cpumask_t cpu_mask;
  2044 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2045 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2046 + if (!cpumask_empty(&cpu_mask))
  2047 + xc3((smpfunc_t) local_ops->cache_range,
  2048 + (unsigned long) vma, start, end);
  2049 + local_ops->cache_range(vma, start, end);
  2050 + }
  2051 +}
  2052 +
  2053 +static void smp_flush_tlb_range(struct vm_area_struct *vma,
  2054 + unsigned long start,
  2055 + unsigned long end)
  2056 +{
  2057 + struct mm_struct *mm = vma->vm_mm;
  2058 +
  2059 + if (mm->context != NO_CONTEXT) {
  2060 + cpumask_t cpu_mask;
  2061 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2062 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2063 + if (!cpumask_empty(&cpu_mask))
  2064 + xc3((smpfunc_t) local_ops->tlb_range,
  2065 + (unsigned long) vma, start, end);
  2066 + local_ops->tlb_range(vma, start, end);
  2067 + }
  2068 +}
  2069 +
  2070 +static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
  2071 +{
  2072 + struct mm_struct *mm = vma->vm_mm;
  2073 +
  2074 + if (mm->context != NO_CONTEXT) {
  2075 + cpumask_t cpu_mask;
  2076 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2077 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2078 + if (!cpumask_empty(&cpu_mask))
  2079 + xc2((smpfunc_t) local_ops->cache_page,
  2080 + (unsigned long) vma, page);
  2081 + local_ops->cache_page(vma, page);
  2082 + }
  2083 +}
  2084 +
  2085 +static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  2086 +{
  2087 + struct mm_struct *mm = vma->vm_mm;
  2088 +
  2089 + if (mm->context != NO_CONTEXT) {
  2090 + cpumask_t cpu_mask;
  2091 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2092 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2093 + if (!cpumask_empty(&cpu_mask))
  2094 + xc2((smpfunc_t) local_ops->tlb_page,
  2095 + (unsigned long) vma, page);
  2096 + local_ops->tlb_page(vma, page);
  2097 + }
  2098 +}
  2099 +
  2100 +static void smp_flush_page_to_ram(unsigned long page)
  2101 +{
  2102 + /* Current theory is that those who call this are the one's
  2103 + * who have just dirtied their cache with the pages contents
  2104 + * in kernel space, therefore we only run this on local cpu.
  2105 + *
  2106 + * XXX This experiment failed, research further... -DaveM
  2107 + */
  2108 +#if 1
  2109 + xc1((smpfunc_t) local_ops->page_to_ram, page);
1932 2110 #endif
  2111 + local_ops->page_to_ram(page);
  2112 +}
1933 2113  
  2114 +static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
  2115 +{
  2116 + cpumask_t cpu_mask;
  2117 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  2118 + cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  2119 + if (!cpumask_empty(&cpu_mask))
  2120 + xc2((smpfunc_t) local_ops->sig_insns,
  2121 + (unsigned long) mm, insn_addr);
  2122 + local_ops->sig_insns(mm, insn_addr);
  2123 +}
  2124 +
  2125 +static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
  2126 + .cache_all = smp_flush_cache_all,
  2127 + .cache_mm = smp_flush_cache_mm,
  2128 + .cache_page = smp_flush_cache_page,
  2129 + .cache_range = smp_flush_cache_range,
  2130 + .tlb_all = smp_flush_tlb_all,
  2131 + .tlb_mm = smp_flush_tlb_mm,
  2132 + .tlb_page = smp_flush_tlb_page,
  2133 + .tlb_range = smp_flush_tlb_range,
  2134 + .page_to_ram = smp_flush_page_to_ram,
  2135 + .sig_insns = smp_flush_sig_insns,
  2136 + .page_for_dma = smp_flush_page_for_dma,
  2137 +};
  2138 +#endif
  2139 +
1934 2140 /* Load up routines and constants for sun4m and sun4d mmu */
1935 2141 void __init load_mmu(void)
1936 2142 {
1937 2143  
1938 2144  
1939 2145  
1940 2146  
... ... @@ -1942,44 +2148,30 @@
1942 2148  
1943 2149 #ifdef CONFIG_SMP
1944 2150 /* El switcheroo... */
  2151 + local_ops = sparc32_cachetlb_ops;
1945 2152  
1946   - BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
1947   - BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
1948   - BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
1949   - BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
1950   - BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
1951   - BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
1952   - BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
1953   - BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
1954   - BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
1955   - BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
1956   - BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
1957   -
1958   - BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
1959   - BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
1960   - BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
1961   - BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
1962   - if (sparc_cpu_model != sun4d &&
1963   - sparc_cpu_model != sparc_leon) {
1964   - BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
1965   - BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
1966   - BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
1967   - BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
  2153 + if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
  2154 + smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
  2155 + smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
  2156 + smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
  2157 + smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1968 2158 }
1969   - BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
1970   - BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
1971   - BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
1972 2159  
1973 2160 if (poke_srmmu == poke_viking) {
1974 2161 /* Avoid unnecessary cross calls. */
1975   - BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
1976   - BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
1977   - BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
1978   - BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
1979   - BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
1980   - BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
1981   - BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
  2162 + smp_cachetlb_ops.cache_all = local_ops->cache_all;
  2163 + smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
  2164 + smp_cachetlb_ops.cache_range = local_ops->cache_range;
  2165 + smp_cachetlb_ops.cache_page = local_ops->cache_page;
  2166 +
  2167 + smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
  2168 + smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
  2169 + smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1982 2170 }
  2171 +
  2172 + /* It really is const after this point. */
  2173 + sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
  2174 + &smp_cachetlb_ops;
1983 2175 #endif
1984 2176  
1985 2177 if (sparc_cpu_model == sun4d)