Commit 7d09e85448dfa78e3e58186c934449aaf6d49b50

Authored by Catalin Marinas
Committed by Russell King
1 parent 6d78b5f9c6

[ARM] 4393/2: ARMv7: Add uncompressing code for the new CPU Id format

The current arch/arm/boot/compressed/head.S code only supports cores
to ARMv6 with the old CPU Id format. This patch adds support for the
new ARMv6 with the new CPU Id and ARMv7 cores that no longer have the
ARMv4 cache operations.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Showing 1 changed file with 92 additions and 1 deletions Side-by-side Diff

arch/arm/boot/compressed/head.S
... ... @@ -436,6 +436,28 @@
436 436 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
437 437 mov pc, r12
438 438  
  439 +__armv7_mmu_cache_on:
  440 + mov r12, lr
  441 + mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
  442 + tst r11, #0xf @ VMSA
  443 + blne __setup_mmu
  444 + mov r0, #0
  445 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  446 + tst r11, #0xf @ VMSA
  447 + mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
  448 + mrc p15, 0, r0, c1, c0, 0 @ read control reg
  449 + orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
  450 + orr r0, r0, #0x003c @ write buffer
  451 + orrne r0, r0, #1 @ MMU enabled
  452 + movne r1, #-1
  453 + mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
  454 + mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
  455 + mcr p15, 0, r0, c1, c0, 0 @ load control register
  456 + mrc p15, 0, r0, c1, c0, 0 @ and read it back
  457 + mov r0, #0
  458 + mcr p15, 0, r0, c7, c5, 4 @ ISB
  459 + mov pc, r12
  460 +
439 461 __arm6_mmu_cache_on:
440 462 mov r12, lr
441 463 bl __setup_mmu
442 464  
... ... @@ -622,11 +644,17 @@
622 644 b __armv4_mmu_cache_flush
623 645  
624 646 .word 0x0007b000 @ ARMv6
625   - .word 0x0007f000
  647 + .word 0x000ff000
626 648 b __armv4_mmu_cache_on
627 649 b __armv4_mmu_cache_off
628 650 b __armv6_mmu_cache_flush
629 651  
  652 + .word 0x000f0000 @ new CPU Id
  653 + .word 0x000f0000
  654 + b __armv7_mmu_cache_on
  655 + b __armv7_mmu_cache_off
  656 + b __armv7_mmu_cache_flush
  657 +
630 658 .word 0 @ unrecognised type
631 659 .word 0
632 660 mov pc, lr
... ... @@ -674,6 +702,16 @@
674 702 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
675 703 mov pc, lr
676 704  
  705 +__armv7_mmu_cache_off:
  706 + mrc p15, 0, r0, c1, c0
  707 + bic r0, r0, #0x000d
  708 + mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
  709 + mov r12, lr
  710 + bl __armv7_mmu_cache_flush
  711 + mov r0, #0
  712 + mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
  713 + mov pc, r12
  714 +
677 715 __arm6_mmu_cache_off:
678 716 mov r0, #0x00000030 @ ARM6 control reg.
679 717 b __armv3_mmu_cache_off
... ... @@ -728,6 +766,59 @@
728 766 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
729 767 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
730 768 mcr p15, 0, r1, c7, c10, 4 @ drain WB
  769 + mov pc, lr
  770 +
  771 +__armv7_mmu_cache_flush:
  772 + mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
  773 + tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
  774 + beq hierarchical
  775 + mov r10, #0
  776 + mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
  777 + b iflush
  778 +hierarchical:
  779 + stmfd sp!, {r0-r5, r7, r9-r11}
  780 + mrc p15, 1, r0, c0, c0, 1 @ read clidr
  781 + ands r3, r0, #0x7000000 @ extract loc from clidr
  782 + mov r3, r3, lsr #23 @ left align loc bit field
  783 + beq finished @ if loc is 0, then no need to clean
  784 + mov r10, #0 @ start clean at cache level 0
  785 +loop1:
  786 + add r2, r10, r10, lsr #1 @ work out 3x current cache level
  787 + mov r1, r0, lsr r2 @ extract cache type bits from clidr
  788 + and r1, r1, #7 @ mask of the bits for current cache only
  789 + cmp r1, #2 @ see what cache we have at this level
  790 + blt skip @ skip if no cache, or just i-cache
  791 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  792 + mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
  793 + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
  794 + and r2, r1, #7 @ extract the length of the cache lines
  795 + add r2, r2, #4 @ add 4 (line length offset)
  796 + ldr r4, =0x3ff
  797 + ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  798 + .word 0xe16f5f14 @ clz r5, r4 - find bit position of way size increment
  799 + ldr r7, =0x7fff
  800 + ands r7, r7, r1, lsr #13 @ extract max number of the index size
  801 +loop2:
  802 + mov r9, r4 @ create working copy of max way size
  803 +loop3:
  804 + orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
  805 + orr r11, r11, r7, lsl r2 @ factor index number into r11
  806 + mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
  807 + subs r9, r9, #1 @ decrement the way
  808 + bge loop3
  809 + subs r7, r7, #1 @ decrement the index
  810 + bge loop2
  811 +skip:
  812 + add r10, r10, #2 @ increment cache number
  813 + cmp r3, r10
  814 + bgt loop1
  815 +finished:
  816 + mov r10, #0 @ swith back to cache level 0
  817 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  818 + ldmfd sp!, {r0-r5, r7, r9-r11}
  819 +iflush:
  820 + mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
  821 + mcr p15, 0, r10, c7, c10, 4 @ drain WB
731 822 mov pc, lr
732 823  
733 824 __armv4_mmu_cache_flush: