Blame view
arch/arm/mm/cache-v7.S
8.75 KB
bbe888864
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * linux/arch/arm/mm/cache-v7.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2005 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the ARMv7 processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> |
c5102f593
|
16 |
#include <asm/errno.h> |
32cfb1b16
|
17 |
#include <asm/unwind.h> |
bbe888864
|
18 19 20 21 |
#include "proc-macros.S" /* |
81d11955b
|
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
* v7_flush_icache_all() * * Flush the whole I-cache. * * Registers: * r0 - set to 0 */ ENTRY(v7_flush_icache_all) mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate mov pc, lr ENDPROC(v7_flush_icache_all) /* |
bbe888864
|
37 38 39 40 |
* v7_flush_dcache_all() * * Flush the whole D-cache. * |
347c8b70b
|
41 |
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) |
bbe888864
|
42 43 44 45 |
* * - mm - mm_struct describing address space */ ENTRY(v7_flush_dcache_all) |
c30c2f99e
|
46 |
dmb @ ensure ordering with previous memory accesses |
bbe888864
|
47 48 49 50 51 52 53 54 55 56 57 |
mrc p15, 1, r0, c0, c0, 1 @ read clidr ands r3, r0, #0x7000000 @ extract loc from clidr mov r3, r3, lsr #23 @ left align loc bit field beq finished @ if loc is 0, then no need to clean mov r10, #0 @ start clean at cache level 0 loop1: add r2, r10, r10, lsr #1 @ work out 3x current cache level mov r1, r0, lsr r2 @ extract cache type bits from clidr and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache |
b46c0f746
|
58 |
#ifdef CONFIG_PREEMPT |
8e43a905d
|
59 |
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic |
b46c0f746
|
60 |
#endif |
bbe888864
|
61 62 63 |
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr |
b46c0f746
|
64 65 66 |
#ifdef CONFIG_PREEMPT restore_irqs_notrace r9 #endif |
bbe888864
|
67 68 69 70 71 72 73 74 75 76 |
and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment ldr r7, =0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop2: mov r9, r4 @ create working copy of max way size loop3: |
347c8b70b
|
77 78 79 80 81 82 |
ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 THUMB( lsl r6, r9, r5 ) THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 THUMB( lsl r6, r7, r2 ) THUMB( orr r11, r11, r6 ) @ factor index number into r11 |
bbe888864
|
83 84 85 86 87 88 89 90 91 92 93 94 |
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the way bge loop3 subs r7, r7, #1 @ decrement the index bge loop2 skip: add r10, r10, #2 @ increment cache number cmp r3, r10 bgt loop1 finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
c30c2f99e
|
95 |
dsb |
bbe888864
|
96 97 |
isb mov pc, lr |
93ed39701
|
98 |
ENDPROC(v7_flush_dcache_all) |
bbe888864
|
99 100 101 102 103 104 105 |
/* * v7_flush_cache_all() * * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache |
25985edce
|
106 |
* maintenance instructions. |
bbe888864
|
107 108 109 110 111 |
* The instruction cache can still be invalidated back to the point of * unification in a single instruction. * */ ENTRY(v7_flush_kern_cache_all) |
347c8b70b
|
112 113 |
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) |
bbe888864
|
114 115 |
bl v7_flush_dcache_all mov r0, #0 |
f00ec48fa
|
116 117 |
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate |
347c8b70b
|
118 119 |
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) |
bbe888864
|
120 |
mov pc, lr |
93ed39701
|
121 |
ENDPROC(v7_flush_kern_cache_all) |
bbe888864
|
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
/* * v7_flush_cache_all() * * Flush all TLB entries in a particular address space * * - mm - mm_struct describing address space */ ENTRY(v7_flush_user_cache_all) /*FALLTHROUGH*/ /* * v7_flush_cache_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space * * It is assumed that: * - we have a VIPT cache. */ ENTRY(v7_flush_user_cache_range) mov pc, lr |
93ed39701
|
147 148 |
ENDPROC(v7_flush_user_cache_all) ENDPROC(v7_flush_user_cache_range) |
bbe888864
|
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
/* * v7_coherent_kern_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_kern_range) /* FALLTHROUGH */ /* * v7_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * * It is assumed that: * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) |
32cfb1b16
|
180 |
UNWIND(.fnstart ) |
bbe888864
|
181 182 |
dcache_line_size r2, r3 sub r3, r2, #1 |
da30e0ac0
|
183 |
bic r12, r0, r3 |
f630c1bdf
|
184 185 186 187 |
#ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif |
32cfb1b16
|
188 |
1: |
da30e0ac0
|
189 190 191 192 |
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification add r12, r12, r2 cmp r12, r1 blo 1b |
bbe888864
|
193 |
dsb |
da30e0ac0
|
194 195 196 |
icache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 |
32cfb1b16
|
197 |
2: |
da30e0ac0
|
198 199 200 201 |
USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line add r12, r12, r2 cmp r12, r1 blo 2b |
bbe888864
|
202 |
mov r0, #0 |
f00ec48fa
|
203 204 |
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB |
bbe888864
|
205 206 207 |
dsb isb mov pc, lr |
32cfb1b16
|
208 209 210 |
/* * Fault handling for the cache operation above. If the virtual address in r0 |
c5102f593
|
211 |
* isn't mapped, fail with -EFAULT. |
32cfb1b16
|
212 213 |
*/ 9001: |
c5102f593
|
214 215 |
mov r0, #-EFAULT mov pc, lr |
32cfb1b16
|
216 |
UNWIND(.fnend ) |
93ed39701
|
217 218 |
ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) |
bbe888864
|
219 220 |
/* |
2c9b9c849
|
221 |
* v7_flush_kern_dcache_area(void *addr, size_t size) |
bbe888864
|
222 223 224 225 |
* * Ensure that the data held in the page kaddr is written back * to the page in question. * |
2c9b9c849
|
226 227 |
* - addr - kernel address * - size - region size |
bbe888864
|
228 |
*/ |
2c9b9c849
|
229 |
ENTRY(v7_flush_kern_dcache_area) |
bbe888864
|
230 |
dcache_line_size r2, r3 |
2c9b9c849
|
231 |
add r1, r0, r1 |
a248b13b2
|
232 233 |
sub r3, r2, #1 bic r0, r0, r3 |
f630c1bdf
|
234 235 236 237 |
#ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif |
bbe888864
|
238 239 240 241 242 243 244 |
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 cmp r0, r1 blo 1b dsb mov pc, lr |
2c9b9c849
|
245 |
ENDPROC(v7_flush_kern_dcache_area) |
bbe888864
|
246 247 248 249 250 251 252 253 254 255 256 |
/* * v7_dma_inv_range(start,end) * * Invalidate the data cache within the specified region; we will * be performing a DMA operation in this region and we want to * purge old data in the cache. * * - start - virtual start address of region * - end - virtual end address of region */ |
702b94bff
|
257 |
v7_dma_inv_range: |
bbe888864
|
258 259 260 261 |
dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 bic r0, r0, r3 |
f630c1bdf
|
262 263 264 265 |
#ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif |
bbe888864
|
266 267 268 269 270 271 272 273 274 275 276 277 |
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line tst r1, r3 bic r1, r1, r3 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb mov pc, lr |
93ed39701
|
278 |
ENDPROC(v7_dma_inv_range) |
bbe888864
|
279 280 281 282 283 284 |
/* * v7_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ |
702b94bff
|
285 |
v7_dma_clean_range: |
bbe888864
|
286 287 288 |
dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 |
f630c1bdf
|
289 290 291 292 |
#ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif |
bbe888864
|
293 294 295 296 297 298 299 |
1: mcr p15, 0, r0, c7, c10, 1 @ clean D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb mov pc, lr |
93ed39701
|
300 |
ENDPROC(v7_dma_clean_range) |
bbe888864
|
301 302 303 304 305 306 307 308 309 310 |
/* * v7_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(v7_dma_flush_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 |
f630c1bdf
|
311 312 313 314 |
#ifdef CONFIG_ARM_ERRATA_764369 ALT_SMP(W(dsb)) ALT_UP(W(nop)) #endif |
bbe888864
|
315 316 317 318 319 320 321 |
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line add r0, r0, r2 cmp r0, r1 blo 1b dsb mov pc, lr |
93ed39701
|
322 |
ENDPROC(v7_dma_flush_range) |
bbe888864
|
323 |
|
a9c9147eb
|
324 325 326 327 328 329 330 331 |
/* * dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7_dma_map_area) add r1, r1, r0 |
2ffe2da3e
|
332 333 334 |
teq r2, #DMA_FROM_DEVICE beq v7_dma_inv_range b v7_dma_clean_range |
a9c9147eb
|
335 336 337 338 339 340 341 342 343 |
ENDPROC(v7_dma_map_area) /* * dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(v7_dma_unmap_area) |
2ffe2da3e
|
344 345 346 |
add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v7_dma_inv_range |
a9c9147eb
|
347 348 |
mov pc, lr ENDPROC(v7_dma_unmap_area) |
bbe888864
|
349 |
__INITDATA |
455a01ec3
|
350 351 |
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) define_cache_functions v7 |