Commit 0ad4989d6270bec0a42598dd4d804569faedf228

Authored by Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "Another round of small ARM fixes.

  restore_user_regs early stack deallocation is buggy in the presence of
  FIQs which switch to SVC mode, and could lead to corrupted registers
  being returned to a user process given an inopportune FIQ event.

  Another bug was spotted in the ARM perf code where it could lose track
  of perf counter overflows, leading to incorrect perf results.

  Lastly, a bug in arm_add_memory() was spotted where the memory sizes
  aren't properly rounded.  As most people pass properly rounded sizes,
  this hasn't been noticed"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8292/1: mm: fix size rounding-down of arm_add_memory() function
  ARM: 8255/1: perf: Prevent wraparound during overflow
  ARM: 8266/1: Remove early stack deallocation from restore_user_regs

Showing 3 changed files Side-by-side Diff

arch/arm/kernel/entry-header.S
... ... @@ -253,21 +253,22 @@
253 253 .endm
254 254  
255 255 .macro restore_user_regs, fast = 0, offset = 0
256   - ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
257   - ldr lr, [sp, #\offset + S_PC]! @ get pc
  256 + mov r2, sp
  257 + ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
  258 + ldr lr, [r2, #\offset + S_PC]! @ get pc
258 259 msr spsr_cxsf, r1 @ save in spsr_svc
259 260 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
260 261 @ We must avoid clrex due to Cortex-A15 erratum #830321
261   - strex r1, r2, [sp] @ clear the exclusive monitor
  262 + strex r1, r2, [r2] @ clear the exclusive monitor
262 263 #endif
263 264 .if \fast
264   - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
  265 + ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
265 266 .else
266   - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
  267 + ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
267 268 .endif
268 269 mov r0, r0 @ ARMv5T and earlier require a nop
269 270 @ after ldm {}^
270   - add sp, sp, #S_FRAME_SIZE - S_PC
  271 + add sp, sp, #\offset + S_FRAME_SIZE
271 272 movs pc, lr @ return & move spsr_svc into cpsr
272 273 .endm
273 274  
arch/arm/kernel/perf_event.c
... ... @@ -116,8 +116,14 @@
116 116 ret = 1;
117 117 }
118 118  
119   - if (left > (s64)armpmu->max_period)
120   - left = armpmu->max_period;
  119 + /*
  120 + * Limit the maximum period to prevent the counter value
  121 + * from overtaking the one we are about to program. In
  122 + * effect we are reducing max_period to account for
  123 + * interrupt latency (and we are being very conservative).
  124 + */
  125 + if (left > (armpmu->max_period >> 1))
  126 + left = armpmu->max_period >> 1;
121 127  
122 128 local64_set(&hwc->prev_count, (u64)-left);
123 129  
arch/arm/kernel/setup.c
... ... @@ -657,10 +657,13 @@
657 657  
658 658 /*
659 659 * Ensure that start/size are aligned to a page boundary.
660   - * Size is appropriately rounded down, start is rounded up.
  660 + * Size is rounded down, start is rounded up.
661 661 */
662   - size -= start & ~PAGE_MASK;
663 662 aligned_start = PAGE_ALIGN(start);
  663 + if (aligned_start > start + size)
  664 + size = 0;
  665 + else
  666 + size -= aligned_start - start;
664 667  
665 668 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
666 669 if (aligned_start > ULONG_MAX) {