Commit 0861fd1c25a8ce79ae7647e384cb6555c46e1690

Authored by Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - fix EFI stub cache maintenance causing aborts during boot on certain
   platforms

 - handle byte stores in __clear_user without panicking

 - fix race condition in aarch64_insn_patch_text_sync() (instruction
   patching)

 - Couple of type fixes

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: ARCH_PFN_OFFSET should be unsigned long
  Correct the race condition in aarch64_insn_patch_text_sync()
  arm64: __clear_user: handle exceptions on strb
  arm64: Fix data type for physical address
  arm64: efi: Fix stub cache maintenance

Showing 5 changed files Side-by-side Diff

arch/arm64/include/asm/memory.h
... ... @@ -142,7 +142,7 @@
142 142 * virt_to_page(k) convert a _valid_ virtual address to struct page *
143 143 * virt_addr_valid(k) indicates whether a virtual address is valid
144 144 */
145   -#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
  145 +#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
146 146  
147 147 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
148 148 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
arch/arm64/kernel/efi-entry.S
... ... @@ -54,18 +54,17 @@
54 54 b.eq efi_load_fail
55 55  
56 56 /*
57   - * efi_entry() will have relocated the kernel image if necessary
58   - * and we return here with device tree address in x0 and the kernel
59   - * entry point stored at *image_addr. Save those values in registers
60   - * which are callee preserved.
  57 + * efi_entry() will have copied the kernel image if necessary and we
  58 + * return here with device tree address in x0 and the kernel entry
  59 + * point stored at *image_addr. Save those values in registers which
  60 + * are callee preserved.
61 61 */
62 62 mov x20, x0 // DTB address
63 63 ldr x0, [sp, #16] // relocated _text address
64 64 mov x21, x0
65 65  
66 66 /*
67   - * Flush dcache covering current runtime addresses
68   - * of kernel text/data. Then flush all of icache.
  67 + * Calculate size of the kernel Image (same for original and copy).
69 68 */
70 69 adrp x1, _text
71 70 add x1, x1, #:lo12:_text
72 71  
... ... @@ -73,9 +72,24 @@
73 72 add x2, x2, #:lo12:_edata
74 73 sub x1, x2, x1
75 74  
  75 + /*
  76 + * Flush the copied Image to the PoC, and ensure it is not shadowed by
  77 + * stale icache entries from before relocation.
  78 + */
76 79 bl __flush_dcache_area
77 80 ic ialluis
78 81  
  82 + /*
  83 + * Ensure that the rest of this function (in the original Image) is
  84 + * visible when the caches are disabled. The I-cache can't have stale
  85 + * entries for the VA range of the current image, so no maintenance is
  86 + * necessary.
  87 + */
  88 + adr x0, efi_stub_entry
  89 + adr x1, efi_stub_entry_end
  90 + sub x1, x1, x0
  91 + bl __flush_dcache_area
  92 +
79 93 /* Turn off Dcache and MMU */
80 94 mrs x0, CurrentEL
81 95 cmp x0, #CurrentEL_EL2
... ... @@ -105,5 +119,6 @@
105 119 ldp x29, x30, [sp], #32
106 120 ret
107 121  
  122 +efi_stub_entry_end:
108 123 ENDPROC(efi_stub_entry)
arch/arm64/kernel/insn.c
... ... @@ -163,9 +163,10 @@
163 163 * which ends with "dsb; isb" pair guaranteeing global
164 164 * visibility.
165 165 */
166   - atomic_set(&pp->cpu_count, -1);
  166 + /* Notify other processors with an additional increment. */
  167 + atomic_inc(&pp->cpu_count);
167 168 } else {
168   - while (atomic_read(&pp->cpu_count) != -1)
  169 + while (atomic_read(&pp->cpu_count) <= num_online_cpus())
169 170 cpu_relax();
170 171 isb();
171 172 }
arch/arm64/lib/clear_user.S
... ... @@ -46,7 +46,7 @@
46 46 sub x1, x1, #2
47 47 4: adds x1, x1, #1
48 48 b.mi 5f
49   - strb wzr, [x0]
  49 +USER(9f, strb wzr, [x0] )
50 50 5: mov x0, #0
51 51 ret
52 52 ENDPROC(__clear_user)
... ... @@ -202,7 +202,7 @@
202 202 }
203 203  
204 204 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
205   - unsigned long end, unsigned long phys,
  205 + unsigned long end, phys_addr_t phys,
206 206 int map_io)
207 207 {
208 208 pud_t *pud;