Commit c1537b4863da620f12f5b42ece61bf65314148ed

Authored by Phil Edworthy
Committed by Paul Mundt
1 parent e343a895a9

sh: sh2a: Improve cache flush/invalidate functions

The cache functions lock out interrupts for long periods; this patch
reduces the impact when operating on large address ranges. In such
cases it will:
- Invalidate the entire cache rather than individual addresses.
- Do nothing when flushing the operand cache in write-through mode.
- When flushing the operand cache in write-back mdoe, index the
  search for matching addresses on the cache entires instead of the
  addresses to flush

Note: sh2a__flush_purge_region was only invalidating the operand
cache, this adds flush.

Signed-off-by: Phil Edworthy <phil.edworthy@renesas.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 1 changed file with 84 additions and 39 deletions Side-by-side Diff

arch/sh/mm/cache-sh2a.c
... ... @@ -15,35 +15,78 @@
15 15 #include <asm/cacheflush.h>
16 16 #include <asm/io.h>
17 17  
  18 +/*
  19 + * The maximum number of pages we support up to when doing ranged dcache
  20 + * flushing. Anything exceeding this will simply flush the dcache in its
  21 + * entirety.
  22 + */
  23 +#define MAX_OCACHE_PAGES 32
  24 +#define MAX_ICACHE_PAGES 32
  25 +
  26 +static void sh2a_flush_oc_line(unsigned long v, int way)
  27 +{
  28 + unsigned long addr = (v & 0x000007f0) | (way << 11);
  29 + unsigned long data;
  30 +
  31 + data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
  32 + if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
  33 + data &= ~SH_CACHE_UPDATED;
  34 + __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
  35 + }
  36 +}
  37 +
  38 +static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
  39 +{
  40 + /* Set associative bit to hit all ways */
  41 + unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
  42 + __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
  43 +}
  44 +
  45 +/*
  46 + * Write back the dirty D-caches, but not invalidate them.
  47 + */
18 48 static void sh2a__flush_wback_region(void *start, int size)
19 49 {
  50 +#ifdef CONFIG_CACHE_WRITEBACK
20 51 unsigned long v;
21 52 unsigned long begin, end;
22 53 unsigned long flags;
  54 + int nr_ways;
23 55  
24 56 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
25 57 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
26 58 & ~(L1_CACHE_BYTES-1);
  59 + nr_ways = current_cpu_data.dcache.ways;
27 60  
28 61 local_irq_save(flags);
29 62 jump_to_uncached();
30 63  
31   - for (v = begin; v < end; v+=L1_CACHE_BYTES) {
32   - unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0);
  64 + /* If there are too many pages then flush the entire cache */
  65 + if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
  66 + begin = CACHE_OC_ADDRESS_ARRAY;
  67 + end = begin + (nr_ways * current_cpu_data.dcache.way_size);
  68 +
  69 + for (v = begin; v < end; v += L1_CACHE_BYTES) {
  70 + unsigned long data = __raw_readl(v);
  71 + if (data & SH_CACHE_UPDATED)
  72 + __raw_writel(data & ~SH_CACHE_UPDATED, v);
  73 + }
  74 + } else {
33 75 int way;
34   - for (way = 0; way < 4; way++) {
35   - unsigned long data = __raw_readl(addr | (way << 11));
36   - if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
37   - data &= ~SH_CACHE_UPDATED;
38   - __raw_writel(data, addr | (way << 11));
39   - }
  76 + for (way = 0; way < nr_ways; way++) {
  77 + for (v = begin; v < end; v += L1_CACHE_BYTES)
  78 + sh2a_flush_oc_line(v, way);
40 79 }
41 80 }
42 81  
43 82 back_to_cached();
44 83 local_irq_restore(flags);
  84 +#endif
45 85 }
46 86  
  87 +/*
  88 + * Write back the dirty D-caches and invalidate them.
  89 + */
47 90 static void sh2a__flush_purge_region(void *start, int size)
48 91 {
49 92 unsigned long v;
50 93  
51 94  
... ... @@ -58,13 +101,22 @@
58 101 jump_to_uncached();
59 102  
60 103 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
61   - __raw_writel((v & CACHE_PHYSADDR_MASK),
62   - CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
  104 +#ifdef CONFIG_CACHE_WRITEBACK
  105 + int way;
  106 + int nr_ways = current_cpu_data.dcache.ways;
  107 + for (way = 0; way < nr_ways; way++)
  108 + sh2a_flush_oc_line(v, way);
  109 +#endif
  110 + sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
63 111 }
  112 +
64 113 back_to_cached();
65 114 local_irq_restore(flags);
66 115 }
67 116  
  117 +/*
  118 + * Invalidate the D-caches, but no write back please
  119 + */
68 120 static void sh2a__flush_invalidate_region(void *start, int size)
69 121 {
70 122 unsigned long v;
71 123  
72 124  
73 125  
... ... @@ -74,29 +126,25 @@
74 126 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
75 127 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
76 128 & ~(L1_CACHE_BYTES-1);
  129 +
77 130 local_irq_save(flags);
78 131 jump_to_uncached();
79 132  
80   -#ifdef CONFIG_CACHE_WRITEBACK
81   - __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
82   - /* I-cache invalidate */
83   - for (v = begin; v < end; v+=L1_CACHE_BYTES) {
84   - __raw_writel((v & CACHE_PHYSADDR_MASK),
85   - CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
  133 + /* If there are too many pages then just blow the cache */
  134 + if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
  135 + __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
  136 + } else {
  137 + for (v = begin; v < end; v += L1_CACHE_BYTES)
  138 + sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
86 139 }
87   -#else
88   - for (v = begin; v < end; v+=L1_CACHE_BYTES) {
89   - __raw_writel((v & CACHE_PHYSADDR_MASK),
90   - CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
91   - __raw_writel((v & CACHE_PHYSADDR_MASK),
92   - CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
93   - }
94   -#endif
  140 +
95 141 back_to_cached();
96 142 local_irq_restore(flags);
97 143 }
98 144  
99   -/* WBack O-Cache and flush I-Cache */
  145 +/*
  146 + * Write back the range of D-cache, and purge the I-cache.
  147 + */
100 148 static void sh2a_flush_icache_range(void *args)
101 149 {
102 150 struct flusher_data *data = args;
103 151  
... ... @@ -107,23 +155,20 @@
107 155 start = data->addr1 & ~(L1_CACHE_BYTES-1);
108 156 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
109 157  
  158 +#ifdef CONFIG_CACHE_WRITEBACK
  159 + sh2a__flush_wback_region((void *)start, end-start);
  160 +#endif
  161 +
110 162 local_irq_save(flags);
111 163 jump_to_uncached();
112 164  
113   - for (v = start; v < end; v+=L1_CACHE_BYTES) {
114   - unsigned long addr = (v & 0x000007f0);
115   - int way;
116   - /* O-Cache writeback */
117   - for (way = 0; way < 4; way++) {
118   - unsigned long data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
119   - if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
120   - data &= ~SH_CACHE_UPDATED;
121   - __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
122   - }
123   - }
124   - /* I-Cache invalidate */
125   - __raw_writel(addr,
126   - CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008);
  165 + /* I-Cache invalidate */
  166 + /* If there are too many pages then just blow the cache */
  167 + if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  168 + __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
  169 + } else {
  170 + for (v = start; v < end; v += L1_CACHE_BYTES)
  171 + sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
127 172 }
128 173  
129 174 back_to_cached();