Commit bd12976c366486ea90aebd83f1cf2863ee47c76a
1 parent
63d2dfdbf4
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
ARC: cacheflush refactor #3: Unify the {d,i}cache flush leaf helpers
With Line length being constant now, we can fold the 2 helpers into 1. This allows applying any optimizations (forthcoming) to single place. Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Showing 1 changed file with 55 additions and 84 deletions Side-by-side Diff
arch/arc/mm/cache_arc700.c
... | ... | @@ -240,7 +240,60 @@ |
240 | 240 | #define OP_INV 0x1 |
241 | 241 | #define OP_FLUSH 0x2 |
242 | 242 | #define OP_FLUSH_N_INV 0x3 |
243 | +#define OP_INV_IC 0x4 | |
243 | 244 | |
245 | +/* | |
246 | + * Common Helper for Line Operations on {I,D}-Cache | |
247 | + */ | |
248 | +static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, | |
249 | + unsigned long sz, const int cacheop) | |
250 | +{ | |
251 | + unsigned int aux_cmd, aux_tag; | |
252 | + int num_lines; | |
253 | + | |
254 | + if (cacheop == OP_INV_IC) { | |
255 | + aux_cmd = ARC_REG_IC_IVIL; | |
256 | + aux_tag = ARC_REG_IC_PTAG; | |
257 | + } | |
258 | + else { | |
259 | + /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ | |
260 | + aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; | |
261 | + aux_tag = ARC_REG_DC_PTAG; | |
262 | + } | |
263 | + | |
264 | + /* Ensure we properly floor/ceil the non-line aligned/sized requests | |
265 | + * and have @paddr - aligned to cache line and integral @num_lines. | |
266 | + * This however can be avoided for page sized since: | |
267 | + * -@paddr will be cache-line aligned already (being page aligned) | |
268 | + * -@sz will be integral multiple of line size (being page sized). | |
269 | + */ | |
270 | + if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { | |
271 | + sz += paddr & ~CACHE_LINE_MASK; | |
272 | + paddr &= CACHE_LINE_MASK; | |
273 | + vaddr &= CACHE_LINE_MASK; | |
274 | + } | |
275 | + | |
276 | + num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | |
277 | + | |
278 | +#if (CONFIG_ARC_MMU_VER <= 2) | |
279 | + /* MMUv2 and before: paddr contains stuffed vaddrs bits */ | |
280 | + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; | |
281 | +#endif | |
282 | + | |
283 | + while (num_lines-- > 0) { | |
284 | +#if (CONFIG_ARC_MMU_VER > 2) | |
285 | + /* MMUv3, cache ops require paddr seperately */ | |
286 | + write_aux_reg(ARC_REG_DC_PTAG, paddr); | |
287 | + | |
288 | + write_aux_reg(aux_cmd, vaddr); | |
289 | + vaddr += L1_CACHE_BYTES; | |
290 | +#else | |
291 | + write_aux_reg(aux, paddr); | |
292 | +#endif | |
293 | + paddr += L1_CACHE_BYTES; | |
294 | + } | |
295 | +} | |
296 | + | |
244 | 297 | #ifdef CONFIG_ARC_HAS_DCACHE |
245 | 298 | |
246 | 299 | /*************************************************************** |
... | ... | @@ -289,55 +342,6 @@ |
289 | 342 | write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); |
290 | 343 | } |
291 | 344 | |
292 | -/* | |
293 | - * Per Line Operation on D-Cache | |
294 | - * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete | |
295 | - * It's sole purpose is to help gcc generate ZOL | |
296 | - * (aliasing VIPT dcache flushing needs both vaddr and paddr) | |
297 | - */ | |
298 | -static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, | |
299 | - unsigned long sz, const int cacheop) | |
300 | -{ | |
301 | - /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ | |
302 | - const int aux = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; | |
303 | - int num_lines; | |
304 | - | |
305 | - /* Ensure we properly floor/ceil the non-line aligned/sized requests | |
306 | - * and have @paddr - aligned to cache line and integral @num_lines. | |
307 | - * This however can be avoided for page sized since: | |
308 | - * -@paddr will be cache-line aligned already (being page aligned) | |
309 | - * -@sz will be integral multiple of line size (being page sized). | |
310 | - */ | |
311 | - if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { | |
312 | - sz += paddr & ~CACHE_LINE_MASK; | |
313 | - paddr &= CACHE_LINE_MASK; | |
314 | - vaddr &= CACHE_LINE_MASK; | |
315 | - } | |
316 | - | |
317 | - num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | |
318 | - | |
319 | -#if (CONFIG_ARC_MMU_VER <= 2) | |
320 | - paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; | |
321 | -#endif | |
322 | - | |
323 | - while (num_lines-- > 0) { | |
324 | -#if (CONFIG_ARC_MMU_VER > 2) | |
325 | - /* | |
326 | - * Just as for I$, in MMU v3, D$ ops also require | |
327 | - * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops | |
328 | - */ | |
329 | - write_aux_reg(ARC_REG_DC_PTAG, paddr); | |
330 | - | |
331 | - write_aux_reg(aux, vaddr); | |
332 | - vaddr += L1_CACHE_BYTES; | |
333 | -#else | |
334 | - /* paddr contains stuffed vaddrs bits */ | |
335 | - write_aux_reg(aux, paddr); | |
336 | -#endif | |
337 | - paddr += L1_CACHE_BYTES; | |
338 | - } | |
339 | -} | |
340 | - | |
341 | 345 | /* For kernel mappings cache operation: index is same as paddr */ |
342 | 346 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
343 | 347 | |
... | ... | @@ -362,7 +366,7 @@ |
362 | 366 | write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); |
363 | 367 | } |
364 | 368 | |
365 | - __dc_line_loop(paddr, vaddr, sz, cacheop); | |
369 | + __cache_line_loop(paddr, vaddr, sz, cacheop); | |
366 | 370 | |
367 | 371 | if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ |
368 | 372 | wait_for_flush(); |
369 | 373 | |
370 | 374 | |
... | ... | @@ -434,42 +438,9 @@ |
434 | 438 | unsigned long sz) |
435 | 439 | { |
436 | 440 | unsigned long flags; |
437 | - int num_lines; | |
438 | 441 | |
439 | - /* | |
440 | - * Ensure we properly floor/ceil the non-line aligned/sized requests: | |
441 | - * However page sized flushes can be compile time optimised. | |
442 | - * -@paddr will be cache-line aligned already (being page aligned) | |
443 | - * -@sz will be integral multiple of line size (being page sized). | |
444 | - */ | |
445 | - if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { | |
446 | - sz += paddr & ~CACHE_LINE_MASK; | |
447 | - paddr &= CACHE_LINE_MASK; | |
448 | - vaddr &= CACHE_LINE_MASK; | |
449 | - } | |
450 | - | |
451 | - num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | |
452 | - | |
453 | -#if (CONFIG_ARC_MMU_VER <= 2) | |
454 | - /* bits 17:13 of vaddr go as bits 4:0 of paddr */ | |
455 | - paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; | |
456 | -#endif | |
457 | - | |
458 | 442 | local_irq_save(flags); |
459 | - while (num_lines-- > 0) { | |
460 | -#if (CONFIG_ARC_MMU_VER > 2) | |
461 | - /* tag comes from phy addr */ | |
462 | - write_aux_reg(ARC_REG_IC_PTAG, paddr); | |
463 | - | |
464 | - /* index bits come from vaddr */ | |
465 | - write_aux_reg(ARC_REG_IC_IVIL, vaddr); | |
466 | - vaddr += L1_CACHE_BYTES; | |
467 | -#else | |
468 | - /* paddr contains stuffed vaddrs bits */ | |
469 | - write_aux_reg(ARC_REG_IC_IVIL, paddr); | |
470 | -#endif | |
471 | - paddr += L1_CACHE_BYTES; | |
472 | - } | |
443 | + __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); | |
473 | 444 | local_irq_restore(flags); |
474 | 445 | } |
475 | 446 |