Commit 5659c0e4708d2893606df3335cb453f17220bd60

Authored by Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "A number of ARM fixes, the biggest is fixing a regression caused by
  appended DT blobs exceeding 64K, causing the decompressor fixup code
  to fail to patch the DT blob.  Another important fix is for the ASID
  allocator from Will Deacon which prevents some rare crashes seen on
  some systems.  Lastly, there's a build fix for v7M systems when printk
  support is disabled.

  The last two remaining fixes are more cosmetic - the IOMMU one
  prevents an annoying harmless warning message, and we disable the
  kernel strict memory permissions on non-MMU which can't support it
  anyway"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8299/1: mm: ensure local active ASID is marked as allocated on rollover
  ARM: 8298/1: ARM_KERNMEM_PERMS only works with MMU enabled
  ARM: 8295/1: fix v7M build for !CONFIG_PRINTK
  ARM: 8294/1: ATAG_DTB_COMPAT: remove the DT workspace's hardcoded 64KB size
  ARM: 8288/1: dma-mapping: don't detach devices without an IOMMU during teardown

Showing 5 changed files Side-by-side Diff

arch/arm/boot/compressed/head.S
... ... @@ -263,16 +263,37 @@
263 263 * OK... Let's do some funky business here.
264 264 * If we do have a DTB appended to zImage, and we do have
265 265 * an ATAG list around, we want the later to be translated
266   - * and folded into the former here. To be on the safe side,
267   - * let's temporarily move the stack away into the malloc
268   - * area. No GOT fixup has occurred yet, but none of the
269   - * code we're about to call uses any global variable.
  266 + * and folded into the former here. No GOT fixup has occurred
  267 + * yet, but none of the code we're about to call uses any
  268 + * global variable.
270 269 */
271   - add sp, sp, #0x10000
  270 +
  271 + /* Get the initial DTB size */
  272 + ldr r5, [r6, #4]
  273 +#ifndef __ARMEB__
  274 + /* convert to little endian */
  275 + eor r1, r5, r5, ror #16
  276 + bic r1, r1, #0x00ff0000
  277 + mov r5, r5, ror #8
  278 + eor r5, r5, r1, lsr #8
  279 +#endif
  280 + /* 50% DTB growth should be good enough */
  281 + add r5, r5, r5, lsr #1
  282 + /* preserve 64-bit alignment */
  283 + add r5, r5, #7
  284 + bic r5, r5, #7
  285 + /* clamp to 32KB min and 1MB max */
  286 + cmp r5, #(1 << 15)
  287 + movlo r5, #(1 << 15)
  288 + cmp r5, #(1 << 20)
  289 + movhi r5, #(1 << 20)
  290 + /* temporarily relocate the stack past the DTB work space */
  291 + add sp, sp, r5
  292 +
272 293 stmfd sp!, {r0-r3, ip, lr}
273 294 mov r0, r8
274 295 mov r1, r6
275   - sub r2, sp, r6
  296 + mov r2, r5
276 297 bl atags_to_fdt
277 298  
278 299 /*
279 300  
... ... @@ -285,11 +306,11 @@
285 306 bic r0, r0, #1
286 307 add r0, r0, #0x100
287 308 mov r1, r6
288   - sub r2, sp, r6
  309 + mov r2, r5
289 310 bleq atags_to_fdt
290 311  
291 312 ldmfd sp!, {r0-r3, ip, lr}
292   - sub sp, sp, #0x10000
  313 + sub sp, sp, r5
293 314 #endif
294 315  
295 316 mov r8, r6 @ use the appended device tree
... ... @@ -306,7 +327,7 @@
306 327 subs r1, r5, r1
307 328 addhi r9, r9, r1
308 329  
309   - /* Get the dtb's size */
  330 + /* Get the current DTB size */
310 331 ldr r5, [r6, #4]
311 332 #ifndef __ARMEB__
312 333 /* convert r5 (dtb size) to little endian */
arch/arm/kernel/entry-v7m.S
... ... @@ -22,10 +22,12 @@
22 22  
23 23 __invalid_entry:
24 24 v7m_exception_entry
  25 +#ifdef CONFIG_PRINTK
25 26 adr r0, strerr
26 27 mrs r1, ipsr
27 28 mov r2, lr
28 29 bl printk
  30 +#endif
29 31 mov r0, sp
30 32 bl show_regs
31 33 1: b 1b
... ... @@ -1012,6 +1012,7 @@
1012 1012  
1013 1013 config ARM_KERNMEM_PERMS
1014 1014 bool "Restrict kernel memory permissions"
  1015 + depends on MMU
1015 1016 help
1016 1017 If this is set, kernel memory other than kernel text (and rodata)
1017 1018 will be made non-executable. The tradeoff is that each region is
arch/arm/mm/context.c
... ... @@ -144,21 +144,17 @@
144 144 /* Update the list of reserved ASIDs and the ASID bitmap. */
145 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 146 for_each_possible_cpu(i) {
147   - if (i == cpu) {
148   - asid = 0;
149   - } else {
150   - asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
151   - /*
152   - * If this CPU has already been through a
153   - * rollover, but hasn't run another task in
154   - * the meantime, we must preserve its reserved
155   - * ASID, as this is the only trace we have of
156   - * the process it is still running.
157   - */
158   - if (asid == 0)
159   - asid = per_cpu(reserved_asids, i);
160   - __set_bit(asid & ~ASID_MASK, asid_map);
161   - }
  147 + asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
  148 + /*
  149 + * If this CPU has already been through a
  150 + * rollover, but hasn't run another task in
  151 + * the meantime, we must preserve its reserved
  152 + * ASID, as this is the only trace we have of
  153 + * the process it is still running.
  154 + */
  155 + if (asid == 0)
  156 + asid = per_cpu(reserved_asids, i);
  157 + __set_bit(asid & ~ASID_MASK, asid_map);
162 158 per_cpu(reserved_asids, i) = asid;
163 159 }
164 160  
arch/arm/mm/dma-mapping.c
... ... @@ -2048,6 +2048,9 @@
2048 2048 {
2049 2049 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
2050 2050  
  2051 + if (!mapping)
  2052 + return;
  2053 +
2051 2054 __arm_iommu_detach_device(dev);
2052 2055 arm_iommu_release_mapping(mapping);
2053 2056 }