Blame view

arch/sh/mm/cache-sh4.c 9.68 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
  /*
   * arch/sh/mm/cache-sh4.c
   *
   * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
5
   * Copyright (C) 2001 - 2009  Paul Mundt
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
   * Copyright (C) 2003  Richard Curnow
09b5a10c1   Chris Smith   sh: Optimized flu...
7
   * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
   *
   * This file is subject to the terms and conditions of the GNU General Public
   * License.  See the file "COPYING" in the main directory of this archive
   * for more details.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
  #include <linux/init.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
  #include <linux/mm.h>
52e27782e   Paul Mundt   sh: p3map_sem sem...
15
16
  #include <linux/io.h>
  #include <linux/mutex.h>
2277ab4a1   Paul Mundt   sh: Migrate from ...
17
  #include <linux/fs.h>
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
18
19
  #include <linux/highmem.h>
  #include <asm/pgtable.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
  #include <asm/mmu_context.h>
  #include <asm/cacheflush.h>
28ccf7f91   Paul Mundt   sh: Selective flu...
22
23
24
25
26
  /*
   * The maximum number of pages we support up to when doing ranged dcache
   * flushing. Anything exceeding this will simply flush the dcache in its
   * entirety.
   */
09b5a10c1   Chris Smith   sh: Optimized flu...
27
  #define MAX_ICACHE_PAGES	32
28ccf7f91   Paul Mundt   sh: Selective flu...
28

a7a7c0e1d   Valentin Sitdikov   sh: Fix up single...
29
  static void __flush_cache_one(unsigned long addr, unsigned long phys,
a252710fc   Paul Mundt   sh: flush_cache_r...
30
  			       unsigned long exec_offset);
b638d0b92   Richard Curnow   sh: Optimized cac...
31
32
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
34
   * Write back the range of D-cache, and purge the I-cache.
   *
09b5a10c1   Chris Smith   sh: Optimized flu...
35
36
   * Called from kernel/module.c:sys_init_module and routine for a.out format,
   * signal handler code and kprobes code
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
37
   */
2dc2f8e0c   Paul Mundt   sh: Kill off the ...
38
  static void sh4_flush_icache_range(void *args)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  {
f26b2a562   Paul Mundt   sh: Make cache fl...
40
  	struct flusher_data *data = args;
f26b2a562   Paul Mundt   sh: Make cache fl...
41
  	unsigned long start, end;
983f4c514   Paul Mundt   Revert "sh: Kill ...
42
  	unsigned long flags, v;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43
  	int i;
f26b2a562   Paul Mundt   sh: Make cache fl...
44
45
  	start = data->addr1;
  	end = data->addr2;
682f88ab7   Paul Mundt   sh: Cleanup white...
46
47
48
49
50
51
52
53
54
55
56
57
58
  	/* If there are too many pages then just blow away the caches */
  	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  		local_flush_cache_all(NULL);
  		return;
  	}
  
  	/*
  	 * Selectively flush d-cache then invalidate the i-cache.
  	 * This is inefficient, so only use this for small ranges.
  	 */
  	start &= ~(L1_CACHE_BYTES-1);
  	end += L1_CACHE_BYTES-1;
  	end &= ~(L1_CACHE_BYTES-1);
983f4c514   Paul Mundt   Revert "sh: Kill ...
59

682f88ab7   Paul Mundt   sh: Cleanup white...
60
61
  	local_irq_save(flags);
  	jump_to_uncached();
983f4c514   Paul Mundt   Revert "sh: Kill ...
62

682f88ab7   Paul Mundt   sh: Cleanup white...
63
64
  	for (v = start; v < end; v += L1_CACHE_BYTES) {
  		unsigned long icacheaddr;
a9d244a2f   Matt Fleming   sh: Account for c...
65
  		int j, n;
983f4c514   Paul Mundt   Revert "sh: Kill ...
66

682f88ab7   Paul Mundt   sh: Cleanup white...
67
  		__ocbwb(v);
983f4c514   Paul Mundt   Revert "sh: Kill ...
68

682f88ab7   Paul Mundt   sh: Cleanup white...
69
70
  		icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
  				cpu_data->icache.entry_mask);
09b5a10c1   Chris Smith   sh: Optimized flu...
71

682f88ab7   Paul Mundt   sh: Cleanup white...
72
  		/* Clear i-cache line valid-bit */
a9d244a2f   Matt Fleming   sh: Account for c...
73
  		n = boot_cpu_data.icache.n_aliases;
682f88ab7   Paul Mundt   sh: Cleanup white...
74
  		for (i = 0; i < cpu_data->icache.ways; i++) {
a9d244a2f   Matt Fleming   sh: Account for c...
75
76
  			for (j = 0; j < n; j++)
  				__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
682f88ab7   Paul Mundt   sh: Cleanup white...
77
78
  			icacheaddr += cpu_data->icache.way_incr;
  		}
09b5a10c1   Chris Smith   sh: Optimized flu...
79
  	}
682f88ab7   Paul Mundt   sh: Cleanup white...
80
81
82
  
  	back_to_cached();
  	local_irq_restore(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
83
  }
a7a7c0e1d   Valentin Sitdikov   sh: Fix up single...
84
  static inline void flush_cache_one(unsigned long start, unsigned long phys)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
85
  {
983f4c514   Paul Mundt   Revert "sh: Kill ...
86
  	unsigned long flags, exec_offset = 0;
33573c0e3   Paul Mundt   sh: Fix occasiona...
87

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
88
  	/*
1f69b6af9   Matt Fleming   sh: Prepare for d...
89
90
  	 * All types of SH-4 require PC to be uncached to operate on the I-cache.
  	 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
  	 */
7ec9d6f8c   Paul Mundt   sh: Avoid smp_pro...
92
  	if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
33573c0e3   Paul Mundt   sh: Fix occasiona...
93
  	    (start < CACHE_OC_ADDRESS_ARRAY))
1f69b6af9   Matt Fleming   sh: Prepare for d...
94
  		exec_offset = cached_to_uncached;
33573c0e3   Paul Mundt   sh: Fix occasiona...
95

983f4c514   Paul Mundt   Revert "sh: Kill ...
96
  	local_irq_save(flags);
a781d1e5f   Matt Fleming   sh: Drop associat...
97
  	__flush_cache_one(start, phys, exec_offset);
983f4c514   Paul Mundt   Revert "sh: Kill ...
98
  	local_irq_restore(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
99
100
101
102
103
104
  }
  
  /*
   * Write back & invalidate the D-cache of the page.
   * (To avoid "alias" issues)
   */
e76a0136a   Paul Mundt   sh: Fix up sh4_fl...
105
  static void sh4_flush_dcache_page(void *arg)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106
  {
e76a0136a   Paul Mundt   sh: Fix up sh4_fl...
107
  	struct page *page = arg;
b4c892762   Matt Fleming   sh: Optimise flus...
108
  	unsigned long addr = (unsigned long)page_address(page);
c139a5958   Paul Mundt   sh: Fix up cache-...
109
  #ifndef CONFIG_SMP
2277ab4a1   Paul Mundt   sh: Migrate from ...
110
  	struct address_space *mapping = page_mapping(page);
2277ab4a1   Paul Mundt   sh: Migrate from ...
111
  	if (mapping && !mapping_mapped(mapping))
55661fc1f   Paul Mundt   sh: Assume new pa...
112
  		clear_bit(PG_dcache_clean, &page->flags);
2277ab4a1   Paul Mundt   sh: Migrate from ...
113
114
  	else
  #endif
b4c892762   Matt Fleming   sh: Optimise flus...
115
116
  		flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
  				(addr & shm_align_mask), page_to_phys(page));
fdfc74f9f   Paul Mundt   sh: Support for S...
117
118
  
  	wmb();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
119
  }
28ccf7f91   Paul Mundt   sh: Selective flu...
120
  /* TODO: Selective icache invalidation through IC address array.. */
2dc2f8e0c   Paul Mundt   sh: Kill off the ...
121
  static void flush_icache_all(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
122
  {
983f4c514   Paul Mundt   Revert "sh: Kill ...
123
  	unsigned long flags, ccr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124

983f4c514   Paul Mundt   Revert "sh: Kill ...
125
  	local_irq_save(flags);
cbaa118ec   Stuart Menefy   sh: Preparation f...
126
  	jump_to_uncached();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
127
128
  
  	/* Flush I-cache */
9d56dd3b0   Paul Mundt   sh: Mass ctrl_in/...
129
  	ccr = __raw_readl(CCR);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
130
  	ccr |= CCR_CACHE_ICI;
9d56dd3b0   Paul Mundt   sh: Mass ctrl_in/...
131
  	__raw_writel(ccr, CCR);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132

298476220   Paul Mundt   sh: Add control r...
133
  	/*
cbaa118ec   Stuart Menefy   sh: Preparation f...
134
  	 * back_to_cached() will take care of the barrier for us, don't add
298476220   Paul Mundt   sh: Add control r...
135
136
  	 * another one!
  	 */
983f4c514   Paul Mundt   Revert "sh: Kill ...
137

cbaa118ec   Stuart Menefy   sh: Preparation f...
138
  	back_to_cached();
983f4c514   Paul Mundt   Revert "sh: Kill ...
139
  	local_irq_restore(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
140
  }
bd6df5748   Paul Mundt   sh: Kill off segm...
141
  static void flush_dcache_all(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
142
  {
bd6df5748   Paul Mundt   sh: Kill off segm...
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  	unsigned long addr, end_addr, entry_offset;
  
  	end_addr = CACHE_OC_ADDRESS_ARRAY +
  		(current_cpu_data.dcache.sets <<
  		 current_cpu_data.dcache.entry_shift) *
  			current_cpu_data.dcache.ways;
  
  	entry_offset = 1 << current_cpu_data.dcache.entry_shift;
  
  	for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  		__raw_writel(0, addr); addr += entry_offset;
  	}
a252710fc   Paul Mundt   sh: flush_cache_r...
162
  }
f26b2a562   Paul Mundt   sh: Make cache fl...
163
  static void sh4_flush_cache_all(void *unused)
a252710fc   Paul Mundt   sh: flush_cache_r...
164
165
  {
  	flush_dcache_all();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
166
167
  	flush_icache_all();
  }
28ccf7f91   Paul Mundt   sh: Selective flu...
168
169
170
171
172
173
  /*
   * Note : (RPC) since the caches are physically tagged, the only point
   * of flush_cache_mm for SH-4 is to get rid of aliases from the
   * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
   * lines can stay resident so long as the virtual address they were
   * accessed with (hence cache set) is in accord with the physical
654d364e2   Paul Mundt   sh: sh4_flush_cac...
174
   * address (i.e. tag).  It's no different here.
28ccf7f91   Paul Mundt   sh: Selective flu...
175
176
177
   *
   * Caller takes mm->mmap_sem.
   */
f26b2a562   Paul Mundt   sh: Make cache fl...
178
  static void sh4_flush_cache_mm(void *arg)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
179
  {
f26b2a562   Paul Mundt   sh: Make cache fl...
180
  	struct mm_struct *mm = arg;
e7b8b7f16   Paul Mundt   sh: NO_CONTEXT AS...
181
182
  	if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
  		return;
654d364e2   Paul Mundt   sh: sh4_flush_cac...
183
  	flush_dcache_all();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
184
185
186
187
188
189
190
191
  }
  
  /*
   * Write back and invalidate I/D-caches for the page.
   *
   * ADDR: Virtual Address (U0 address)
   * PFN: Physical page number
   */
f26b2a562   Paul Mundt   sh: Make cache fl...
192
  static void sh4_flush_cache_page(void *args)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
193
  {
f26b2a562   Paul Mundt   sh: Make cache fl...
194
195
  	struct flusher_data *data = args;
  	struct vm_area_struct *vma;
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
196
  	struct page *page;
f26b2a562   Paul Mundt   sh: Make cache fl...
197
  	unsigned long address, pfn, phys;
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
198
199
200
201
202
203
  	int map_coherent = 0;
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *pte;
  	void *vaddr;
b638d0b92   Richard Curnow   sh: Optimized cac...
204

f26b2a562   Paul Mundt   sh: Make cache fl...
205
  	vma = data->vma;
abeaf33a4   Paul Mundt   Merge branch 'sh/...
206
  	address = data->addr1 & PAGE_MASK;
f26b2a562   Paul Mundt   sh: Make cache fl...
207
208
  	pfn = data->addr2;
  	phys = pfn << PAGE_SHIFT;
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
209
  	page = pfn_to_page(pfn);
f26b2a562   Paul Mundt   sh: Make cache fl...
210

e7b8b7f16   Paul Mundt   sh: NO_CONTEXT AS...
211
212
  	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  		return;
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
213
214
215
216
217
218
219
220
  	pgd = pgd_offset(vma->vm_mm, address);
  	pud = pud_offset(pgd, address);
  	pmd = pmd_offset(pud, address);
  	pte = pte_offset_kernel(pmd, address);
  
  	/* If the page isn't present, there is nothing to do here. */
  	if (!(pte_val(*pte) & _PAGE_PRESENT))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221

deaef20e9   Paul Mundt   sh: Rework sh4_fl...
222
223
224
  	if ((vma->vm_mm == current->active_mm))
  		vaddr = NULL;
  	else {
b638d0b92   Richard Curnow   sh: Optimized cac...
225
  		/*
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
226
227
  		 * Use kmap_coherent or kmap_atomic to do flushes for
  		 * another ASID than the current one.
b638d0b92   Richard Curnow   sh: Optimized cac...
228
  		 */
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
229
  		map_coherent = (current_cpu_data.dcache.n_aliases &&
55661fc1f   Paul Mundt   sh: Assume new pa...
230
  			test_bit(PG_dcache_clean, &page->flags) &&
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
231
232
233
234
235
236
237
238
  			page_mapped(page));
  		if (map_coherent)
  			vaddr = kmap_coherent(page, address);
  		else
  			vaddr = kmap_atomic(page, KM_USER0);
  
  		address = (unsigned long)vaddr;
  	}
e717cc6c0   Matt Fleming   sh: Can't compare...
239
  	flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
deaef20e9   Paul Mundt   sh: Rework sh4_fl...
240
241
242
243
244
245
246
247
248
249
  			(address & shm_align_mask), phys);
  
  	if (vma->vm_flags & VM_EXEC)
  		flush_icache_all();
  
  	if (vaddr) {
  		if (map_coherent)
  			kunmap_coherent(vaddr);
  		else
  			kunmap_atomic(vaddr, KM_USER0);
b638d0b92   Richard Curnow   sh: Optimized cac...
250
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
251
252
253
254
255
256
257
258
259
260
261
  }
  
  /*
   * Write back and invalidate D-caches.
   *
   * START, END: Virtual Address (U0 address)
   *
   * NOTE: We need to flush the _physical_ page entry.
   * Flushing the cache lines for U0 only isn't enough.
   * We need to flush for P1 too, which may contain aliases.
   */
f26b2a562   Paul Mundt   sh: Make cache fl...
262
  static void sh4_flush_cache_range(void *args)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
263
  {
f26b2a562   Paul Mundt   sh: Make cache fl...
264
265
266
267
268
269
270
  	struct flusher_data *data = args;
  	struct vm_area_struct *vma;
  	unsigned long start, end;
  
  	vma = data->vma;
  	start = data->addr1;
  	end = data->addr2;
e7b8b7f16   Paul Mundt   sh: NO_CONTEXT AS...
271
272
  	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  		return;
b638d0b92   Richard Curnow   sh: Optimized cac...
273
274
275
276
  	/*
  	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
  	 * the cache is physically tagged, the data can just be left in there.
  	 */
7ec9d6f8c   Paul Mundt   sh: Avoid smp_pro...
277
  	if (boot_cpu_data.dcache.n_aliases == 0)
b638d0b92   Richard Curnow   sh: Optimized cac...
278
  		return;
654d364e2   Paul Mundt   sh: sh4_flush_cac...
279
  	flush_dcache_all();
b638d0b92   Richard Curnow   sh: Optimized cac...
280

654d364e2   Paul Mundt   sh: sh4_flush_cac...
281
  	if (vma->vm_flags & VM_EXEC)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
282
283
  		flush_icache_all();
  }
b638d0b92   Richard Curnow   sh: Optimized cac...
284
  /**
a7a7c0e1d   Valentin Sitdikov   sh: Fix up single...
285
   * __flush_cache_one
b638d0b92   Richard Curnow   sh: Optimized cac...
286
287
288
289
290
291
292
293
294
295
296
297
   *
   * @addr:  address in memory mapped cache array
   * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
   *         set i.e. associative write)
   * @exec_offset: set to 0x20000000 if flush has to be executed from P2
   *               region else 0x0
   *
   * The offset into the cache array implied by 'addr' selects the
   * 'colour' of the virtual address range that will be flushed.  The
   * operation (purge/write-back) is selected by the lower 2 bits of
   * 'phys'.
   */
a7a7c0e1d   Valentin Sitdikov   sh: Fix up single...
298
  static void __flush_cache_one(unsigned long addr, unsigned long phys,
b638d0b92   Richard Curnow   sh: Optimized cac...
299
300
301
302
303
304
305
306
  			       unsigned long exec_offset)
  {
  	int way_count;
  	unsigned long base_addr = addr;
  	struct cache_info *dcache;
  	unsigned long way_incr;
  	unsigned long a, ea, p;
  	unsigned long temp_pc;
7ec9d6f8c   Paul Mundt   sh: Avoid smp_pro...
307
  	dcache = &boot_cpu_data.dcache;
b638d0b92   Richard Curnow   sh: Optimized cac...
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
  	/* Write this way for better assembly. */
  	way_count = dcache->ways;
  	way_incr = dcache->way_incr;
  
  	/*
  	 * Apply exec_offset (i.e. branch to P2 if required.).
  	 *
  	 * FIXME:
  	 *
  	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
  	 *	trashing exec_offset before it's been added on - why?  Hence
  	 *	"=&r" as a 'workaround'
  	 */
  	asm volatile("mov.l 1f, %0
  \t"
  		     "add   %1, %0
  \t"
  		     "jmp   @%0
  \t"
  		     "nop
  \t"
  		     ".balign 4
  \t"
  		     "1:  .long 2f
  \t"
  		     "2:
  " : "=&r" (temp_pc) : "r" (exec_offset));
  
  	/*
  	 * We know there will be >=1 iteration, so write as do-while to avoid
  	 * pointless nead-of-loop check for 0 iterations.
  	 */
  	do {
  		ea = base_addr + PAGE_SIZE;
  		a = base_addr;
  		p = phys;
  
  		do {
  			*(volatile unsigned long *)a = p;
  			/*
  			 * Next line: intentionally not p+32, saves an add, p
  			 * will do since only the cache tag bits need to
  			 * match.
  			 */
  			*(volatile unsigned long *)(a+32) = p;
  			a += 64;
  			p += 64;
  		} while (a < ea);
  
  		base_addr += way_incr;
  	} while (--way_count != 0);
  }
37443ef3f   Paul Mundt   sh: Migrate SH-4 ...
360
361
362
363
364
365
366
367
368
  extern void __weak sh4__flush_region_init(void);
  
  /*
   * SH-4 has virtually indexed and physically tagged cache.
   */
  void __init sh4_cache_init(void)
  {
  	printk("PVR=%08x CVR=%08x PRR=%08x
  ",
9d56dd3b0   Paul Mundt   sh: Mass ctrl_in/...
369
370
371
  		__raw_readl(CCN_PVR),
  		__raw_readl(CCN_CVR),
  		__raw_readl(CCN_PRR));
37443ef3f   Paul Mundt   sh: Migrate SH-4 ...
372

f26b2a562   Paul Mundt   sh: Make cache fl...
373
374
375
376
377
378
379
  	local_flush_icache_range	= sh4_flush_icache_range;
  	local_flush_dcache_page		= sh4_flush_dcache_page;
  	local_flush_cache_all		= sh4_flush_cache_all;
  	local_flush_cache_mm		= sh4_flush_cache_mm;
  	local_flush_cache_dup_mm	= sh4_flush_cache_mm;
  	local_flush_cache_page		= sh4_flush_cache_page;
  	local_flush_cache_range		= sh4_flush_cache_range;
37443ef3f   Paul Mundt   sh: Migrate SH-4 ...
380
381
382
  
  	sh4__flush_region_init();
  }