Blame view

arch/sh/mm/cache-sh2a.c 4.59 KB
cce2d453e   Yoshinori Sato   SH2(A) cache update
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  /*
   * arch/sh/mm/cache-sh2a.c
   *
   * Copyright (C) 2008 Yoshinori Sato
   *
   * Released under the terms of the GNU GPL v2.0.
   */
  
  #include <linux/init.h>
  #include <linux/mm.h>
  
  #include <asm/cache.h>
  #include <asm/addrspace.h>
  #include <asm/processor.h>
  #include <asm/cacheflush.h>
  #include <asm/io.h>
c1537b486   Phil Edworthy   sh: sh2a: Improve...
17
18
19
20
21
22
23
  /*
   * The maximum number of pages we support up to when doing ranged dcache
   * flushing. Anything exceeding this will simply flush the dcache in its
   * entirety.
   */
  #define MAX_OCACHE_PAGES	32
  #define MAX_ICACHE_PAGES	32
1ae911cba   Phil Edworthy   sh: Fix sh2a buil...
24
  #ifdef CONFIG_CACHE_WRITEBACK
c1537b486   Phil Edworthy   sh: sh2a: Improve...
25
26
27
28
29
30
31
32
33
34
35
  static void sh2a_flush_oc_line(unsigned long v, int way)
  {
  	unsigned long addr = (v & 0x000007f0) | (way << 11);
  	unsigned long data;
  
  	data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
  	if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
  		data &= ~SH_CACHE_UPDATED;
  		__raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
  	}
  }
1ae911cba   Phil Edworthy   sh: Fix sh2a buil...
36
  #endif
c1537b486   Phil Edworthy   sh: sh2a: Improve...
37
38
39
40
41
42
43
44
45
46
47
  
  static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
  {
  	/* Set associative bit to hit all ways */
  	unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
  	__raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
  }
  
  /*
   * Write back the dirty D-caches, but not invalidate them.
   */
a58e1a2ab   Paul Mundt   sh: Convert SH-2A...
48
  static void sh2a__flush_wback_region(void *start, int size)
cce2d453e   Yoshinori Sato   SH2(A) cache update
49
  {
c1537b486   Phil Edworthy   sh: sh2a: Improve...
50
  #ifdef CONFIG_CACHE_WRITEBACK
cce2d453e   Yoshinori Sato   SH2(A) cache update
51
52
53
  	unsigned long v;
  	unsigned long begin, end;
  	unsigned long flags;
c1537b486   Phil Edworthy   sh: sh2a: Improve...
54
  	int nr_ways;
cce2d453e   Yoshinori Sato   SH2(A) cache update
55
56
57
58
  
  	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  		& ~(L1_CACHE_BYTES-1);
c1537b486   Phil Edworthy   sh: sh2a: Improve...
59
  	nr_ways = current_cpu_data.dcache.ways;
cce2d453e   Yoshinori Sato   SH2(A) cache update
60
61
62
  
  	local_irq_save(flags);
  	jump_to_uncached();
c1537b486   Phil Edworthy   sh: sh2a: Improve...
63
64
65
66
67
68
69
70
71
72
73
  	/* If there are too many pages then flush the entire cache */
  	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
  		begin = CACHE_OC_ADDRESS_ARRAY;
  		end = begin + (nr_ways * current_cpu_data.dcache.way_size);
  
  		for (v = begin; v < end; v += L1_CACHE_BYTES) {
  			unsigned long data = __raw_readl(v);
  			if (data & SH_CACHE_UPDATED)
  				__raw_writel(data & ~SH_CACHE_UPDATED, v);
  		}
  	} else {
cce2d453e   Yoshinori Sato   SH2(A) cache update
74
  		int way;
c1537b486   Phil Edworthy   sh: sh2a: Improve...
75
76
77
  		for (way = 0; way < nr_ways; way++) {
  			for (v = begin; v < end; v += L1_CACHE_BYTES)
  				sh2a_flush_oc_line(v, way);
cce2d453e   Yoshinori Sato   SH2(A) cache update
78
79
80
81
82
  		}
  	}
  
  	back_to_cached();
  	local_irq_restore(flags);
c1537b486   Phil Edworthy   sh: sh2a: Improve...
83
  #endif
cce2d453e   Yoshinori Sato   SH2(A) cache update
84
  }
c1537b486   Phil Edworthy   sh: sh2a: Improve...
85
86
87
  /*
   * Write back the dirty D-caches and invalidate them.
   */
a58e1a2ab   Paul Mundt   sh: Convert SH-2A...
88
  static void sh2a__flush_purge_region(void *start, int size)
cce2d453e   Yoshinori Sato   SH2(A) cache update
89
90
91
92
93
94
95
96
97
98
99
100
101
  {
  	unsigned long v;
  	unsigned long begin, end;
  	unsigned long flags;
  
  	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  		& ~(L1_CACHE_BYTES-1);
  
  	local_irq_save(flags);
  	jump_to_uncached();
  
  	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
c1537b486   Phil Edworthy   sh: sh2a: Improve...
102
103
104
105
106
107
108
  #ifdef CONFIG_CACHE_WRITEBACK
  		int way;
  		int nr_ways = current_cpu_data.dcache.ways;
  		for (way = 0; way < nr_ways; way++)
  			sh2a_flush_oc_line(v, way);
  #endif
  		sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
cce2d453e   Yoshinori Sato   SH2(A) cache update
109
  	}
c1537b486   Phil Edworthy   sh: sh2a: Improve...
110

cce2d453e   Yoshinori Sato   SH2(A) cache update
111
112
113
  	back_to_cached();
  	local_irq_restore(flags);
  }
c1537b486   Phil Edworthy   sh: sh2a: Improve...
114
115
116
  /*
   * Invalidate the D-caches, but no write back please
   */
a58e1a2ab   Paul Mundt   sh: Convert SH-2A...
117
  static void sh2a__flush_invalidate_region(void *start, int size)
cce2d453e   Yoshinori Sato   SH2(A) cache update
118
119
120
121
122
123
124
125
  {
  	unsigned long v;
  	unsigned long begin, end;
  	unsigned long flags;
  
  	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  		& ~(L1_CACHE_BYTES-1);
c1537b486   Phil Edworthy   sh: sh2a: Improve...
126

cce2d453e   Yoshinori Sato   SH2(A) cache update
127
128
  	local_irq_save(flags);
  	jump_to_uncached();
c1537b486   Phil Edworthy   sh: sh2a: Improve...
129
130
131
132
133
134
  	/* If there are too many pages then just blow the cache */
  	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
  		__raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
  	} else {
  		for (v = begin; v < end; v += L1_CACHE_BYTES)
  			sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
cce2d453e   Yoshinori Sato   SH2(A) cache update
135
  	}
c1537b486   Phil Edworthy   sh: sh2a: Improve...
136

cce2d453e   Yoshinori Sato   SH2(A) cache update
137
138
139
  	back_to_cached();
  	local_irq_restore(flags);
  }
c1537b486   Phil Edworthy   sh: sh2a: Improve...
140
141
142
  /*
   * Write back the range of D-cache, and purge the I-cache.
   */
f26b2a562   Paul Mundt   sh: Make cache fl...
143
  static void sh2a_flush_icache_range(void *args)
cce2d453e   Yoshinori Sato   SH2(A) cache update
144
  {
f26b2a562   Paul Mundt   sh: Make cache fl...
145
146
  	struct flusher_data *data = args;
  	unsigned long start, end;
cce2d453e   Yoshinori Sato   SH2(A) cache update
147
  	unsigned long v;
983f4c514   Paul Mundt   Revert "sh: Kill ...
148
  	unsigned long flags;
cce2d453e   Yoshinori Sato   SH2(A) cache update
149

f26b2a562   Paul Mundt   sh: Make cache fl...
150
151
  	start = data->addr1 & ~(L1_CACHE_BYTES-1);
  	end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
cce2d453e   Yoshinori Sato   SH2(A) cache update
152

c1537b486   Phil Edworthy   sh: sh2a: Improve...
153
154
155
  #ifdef CONFIG_CACHE_WRITEBACK
  	sh2a__flush_wback_region((void *)start, end-start);
  #endif
983f4c514   Paul Mundt   Revert "sh: Kill ...
156
  	local_irq_save(flags);
cce2d453e   Yoshinori Sato   SH2(A) cache update
157
  	jump_to_uncached();
c1537b486   Phil Edworthy   sh: sh2a: Improve...
158
159
160
161
162
163
164
  	/* I-Cache invalidate */
  	/* If there are too many pages then just blow the cache */
  	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  		__raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
  	} else {
  		for (v = start; v < end; v += L1_CACHE_BYTES)
  			sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
cce2d453e   Yoshinori Sato   SH2(A) cache update
165
166
167
  	}
  
  	back_to_cached();
983f4c514   Paul Mundt   Revert "sh: Kill ...
168
  	local_irq_restore(flags);
cce2d453e   Yoshinori Sato   SH2(A) cache update
169
  }
a58e1a2ab   Paul Mundt   sh: Convert SH-2A...
170
171
172
  
  void __init sh2a_cache_init(void)
  {
f26b2a562   Paul Mundt   sh: Make cache fl...
173
  	local_flush_icache_range	= sh2a_flush_icache_range;
a58e1a2ab   Paul Mundt   sh: Convert SH-2A...
174
175
176
177
178
  
  	__flush_wback_region		= sh2a__flush_wback_region;
  	__flush_purge_region		= sh2a__flush_purge_region;
  	__flush_invalidate_region	= sh2a__flush_invalidate_region;
  }