Blame view

arch/arm/mm/cache-feroceon-l2.c 8.08 KB
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
   *
   * Copyright (C) 2008 Marvell Semiconductor
   *
   * This file is licensed under the terms of the GNU General Public
   * License version 2.  This program is licensed "as is" without any
   * warranty of any kind, whether express or implied.
   *
   * References:
   * - Unified Layer 2 Cache for Feroceon CPU Cores,
   *   Document ID MV-S104858-00, Rev. A, October 23 2007.
   */
  
  #include <linux/init.h>
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
16
  #include <linux/highmem.h>
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
17
  #include <asm/cacheflush.h>
6f088f1d2   Lennert Buytenhek   [ARM] Move includ...
18
  #include <plat/cache-feroceon-l2.h>
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  
  /*
   * Low-level cache maintenance operations.
   *
   * As well as the regular 'clean/invalidate/flush L2 cache line by
   * MVA' instructions, the Feroceon L2 cache controller also features
   * 'clean/invalidate L2 range by MVA' operations.
   *
   * Cache range operations are initiated by writing the start and
   * end addresses to successive cp15 registers, and process every
   * cache line whose first byte address lies in the inclusive range
   * [start:end].
   *
   * The cache range operations stall the CPU pipeline until completion.
   *
   * The range operations require two successive cp15 writes, in
   * between which we don't want to be preempted.
   */
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
37

6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
38
  static inline unsigned long l2_get_va(unsigned long paddr)
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
39
40
41
  {
  #ifdef CONFIG_HIGHMEM
  	/*
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
42
43
44
  	 * Because range ops can't be done on physical addresses,
  	 * we simply install a virtual mapping for it only for the
  	 * TLB lookup to occur, hence no need to flush the untouched
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
45
46
  	 * memory mapping afterwards (note: a cache flush may happen
  	 * in some circumstances depending on the path taken in kunmap_atomic).
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
47
  	 */
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
48
49
  	void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
  	return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
50
51
52
53
  #else
  	return __phys_to_virt(paddr);
  #endif
  }
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
54
55
56
57
58
59
  static inline void l2_put_va(unsigned long vaddr)
  {
  #ifdef CONFIG_HIGHMEM
  	kunmap_atomic((void *)vaddr);
  #endif
  }
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
60
61
62
63
  static inline void l2_clean_pa(unsigned long addr)
  {
  	__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
  }
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
64
  static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
65
  {
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
66
  	unsigned long va_start, va_end, flags;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
67
68
69
70
71
72
  
  	/*
  	 * Make sure 'start' and 'end' reference the same page, as
  	 * L2 is PIPT and range operations only do a TLB lookup on
  	 * the start address.
  	 */
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
73
  	BUG_ON((start ^ end) >> PAGE_SHIFT);
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
74

6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
75
  	va_start = l2_get_va(start);
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
76
  	va_end = va_start + (end - start);
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
77
  	raw_local_irq_save(flags);
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
78
79
80
  	__asm__("mcr p15, 1, %0, c15, c9, 4
  \t"
  		"mcr p15, 1, %1, c15, c9, 5"
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
81
  		: : "r" (va_start), "r" (va_end));
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
82
  	raw_local_irq_restore(flags);
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
83
  	l2_put_va(va_start);
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
84
  }
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
85
86
87
88
89
90
91
92
93
  static inline void l2_clean_inv_pa(unsigned long addr)
  {
  	__asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
  }
  
  static inline void l2_inv_pa(unsigned long addr)
  {
  	__asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
  }
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
94
  static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
95
  {
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
96
  	unsigned long va_start, va_end, flags;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
97
98
99
100
101
102
  
  	/*
  	 * Make sure 'start' and 'end' reference the same page, as
  	 * L2 is PIPT and range operations only do a TLB lookup on
  	 * the start address.
  	 */
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
103
  	BUG_ON((start ^ end) >> PAGE_SHIFT);
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
104

6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
105
  	va_start = l2_get_va(start);
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
106
  	va_end = va_start + (end - start);
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
107
  	raw_local_irq_save(flags);
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
108
109
110
  	__asm__("mcr p15, 1, %0, c15, c11, 4
  \t"
  		"mcr p15, 1, %1, c15, c11, 5"
1bb772679   Nicolas Pitre   [ARM] Feroceon: a...
111
  		: : "r" (va_start), "r" (va_end));
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
112
  	raw_local_irq_restore(flags);
6d3e6d364   Nicolas Pitre   ARM: fix cache-fe...
113
  	l2_put_va(va_start);
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
114
  }
d75de0872   Maxime Bizon   [ARM] Kirkwood: i...
115
116
117
118
  static inline void l2_inv_all(void)
  {
  	__asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0));
  }
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
  
  /*
   * Linux primitives.
   *
   * Note that the end addresses passed to Linux primitives are
   * noninclusive, while the hardware cache range operations use
   * inclusive start and end addresses.
   */
  #define CACHE_LINE_SIZE		32
  #define MAX_RANGE_SIZE		1024
  
  static int l2_wt_override;
  
  static unsigned long calc_range_end(unsigned long start, unsigned long end)
  {
  	unsigned long range_end;
  
  	BUG_ON(start & (CACHE_LINE_SIZE - 1));
  	BUG_ON(end & (CACHE_LINE_SIZE - 1));
  
  	/*
  	 * Try to process all cache lines between 'start' and 'end'.
  	 */
  	range_end = end;
  
  	/*
  	 * Limit the number of cache lines processed at once,
  	 * since cache range operations stall the CPU pipeline
  	 * until completion.
  	 */
  	if (range_end > start + MAX_RANGE_SIZE)
  		range_end = start + MAX_RANGE_SIZE;
  
  	/*
  	 * Cache range operations can't straddle a page boundary.
  	 */
  	if (range_end > (start | (PAGE_SIZE - 1)) + 1)
  		range_end = (start | (PAGE_SIZE - 1)) + 1;
  
  	return range_end;
  }
  
  static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
  {
  	/*
  	 * Clean and invalidate partial first cache line.
  	 */
  	if (start & (CACHE_LINE_SIZE - 1)) {
  		l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
  		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
  	}
  
  	/*
  	 * Clean and invalidate partial last cache line.
  	 */
72bc2b1ad   Nicolas Pitre   [ARM] 5329/1: Fer...
174
  	if (start < end && end & (CACHE_LINE_SIZE - 1)) {
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
175
176
177
178
179
180
181
  		l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
  		end &= ~(CACHE_LINE_SIZE - 1);
  	}
  
  	/*
  	 * Invalidate all full cache lines between 'start' and 'end'.
  	 */
72bc2b1ad   Nicolas Pitre   [ARM] 5329/1: Fer...
182
  	while (start < end) {
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
  		unsigned long range_end = calc_range_end(start, end);
  		l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
  		start = range_end;
  	}
  
  	dsb();
  }
  
  static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
  {
  	/*
  	 * If L2 is forced to WT, the L2 will always be clean and we
  	 * don't need to do anything here.
  	 */
  	if (!l2_wt_override) {
  		start &= ~(CACHE_LINE_SIZE - 1);
  		end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
  		while (start != end) {
  			unsigned long range_end = calc_range_end(start, end);
  			l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
  			start = range_end;
  		}
  	}
  
  	dsb();
  }
  
  static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
  {
  	start &= ~(CACHE_LINE_SIZE - 1);
  	end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
  	while (start != end) {
  		unsigned long range_end = calc_range_end(start, end);
  		if (!l2_wt_override)
  			l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
  		l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
  		start = range_end;
  	}
  
  	dsb();
  }
  
  
  /*
   * Routines to disable and re-enable the D-cache and I-cache at run
   * time.  These are necessary because the L2 cache can only be enabled
   * or disabled while the L1 Dcache and Icache are both disabled.
   */
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
231
  static int __init flush_and_disable_dcache(void)
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
232
233
234
235
236
237
238
239
240
241
242
  {
  	u32 cr;
  
  	cr = get_cr();
  	if (cr & CR_C) {
  		unsigned long flags;
  
  		raw_local_irq_save(flags);
  		flush_cache_all();
  		set_cr(cr & ~CR_C);
  		raw_local_irq_restore(flags);
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
243
  		return 1;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
244
  	}
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
245
  	return 0;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
246
247
248
249
250
251
252
  }
  
  static void __init enable_dcache(void)
  {
  	u32 cr;
  
  	cr = get_cr();
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
253
  	set_cr(cr | CR_C);
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
254
255
256
257
  }
  
  static void __init __invalidate_icache(void)
  {
f000328ac   Nicolas Pitre   [ARM] Kirkwood: s...
258
  	__asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
259
  }
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
260
  static int __init invalidate_and_disable_icache(void)
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
261
262
263
264
265
266
267
  {
  	u32 cr;
  
  	cr = get_cr();
  	if (cr & CR_I) {
  		set_cr(cr & ~CR_I);
  		__invalidate_icache();
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
268
  		return 1;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
269
  	}
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
270
  	return 0;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
271
272
273
274
275
276
277
  }
  
  static void __init enable_icache(void)
  {
  	u32 cr;
  
  	cr = get_cr();
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
278
  	set_cr(cr | CR_I);
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
  }
  
  static inline u32 read_extra_features(void)
  {
  	u32 u;
  
  	__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
  
  	return u;
  }
  
  static inline void write_extra_features(u32 u)
  {
  	__asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
  }
  
  static void __init disable_l2_prefetch(void)
  {
  	u32 u;
  
  	/*
  	 * Read the CPU Extra Features register and verify that the
  	 * Disable L2 Prefetch bit is set.
  	 */
  	u = read_extra_features();
  	if (!(u & 0x01000000)) {
  		printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.
  ");
  		write_extra_features(u | 0x01000000);
  	}
  }
  
  static void __init enable_l2(void)
  {
  	u32 u;
  
  	u = read_extra_features();
  	if (!(u & 0x00400000)) {
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
317
  		int i, d;
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
318
319
  		printk(KERN_INFO "Feroceon L2: Enabling L2
  ");
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
320
321
  		d = flush_and_disable_dcache();
  		i = invalidate_and_disable_icache();
d75de0872   Maxime Bizon   [ARM] Kirkwood: i...
322
  		l2_inv_all();
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
323
  		write_extra_features(u | 0x00400000);
99c6bb390   Nicolas Pitre   [ARM] Feroceon: s...
324
325
326
327
  		if (i)
  			enable_icache();
  		if (d)
  			enable_dcache();
99c6dc117   Lennert Buytenhek   [ARM] Feroceon: L...
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  	}
  }
  
  void __init feroceon_l2_init(int __l2_wt_override)
  {
  	l2_wt_override = __l2_wt_override;
  
  	disable_l2_prefetch();
  
  	outer_cache.inv_range = feroceon_l2_inv_range;
  	outer_cache.clean_range = feroceon_l2_clean_range;
  	outer_cache.flush_range = feroceon_l2_flush_range;
  
  	enable_l2();
  
  	printk(KERN_INFO "Feroceon L2: Cache support initialised%s.
  ",
  			 l2_wt_override ? ", in WT override mode" : "");
  }