Commit cb4f9a296472ebd1cde88970194888e962150875

Authored by Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Benjamin Herrenschmidt:
 "Here are a few fixes for 3.6 that were piling up while I was away or
  busy (I was mostly MIA a week or two before San Diego).

  Some fixes from Anton fixing up issues with our relatively new DSCR
  control feature, and a few other fixes that are either regressions or
  bugs nasty enough to warrant not waiting."

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc: Don't use __put_user() in patch_instruction
  powerpc: Make sure IPI handlers see data written by IPI senders
  powerpc: Restore correct DSCR in context switch
  powerpc: Fix DSCR inheritance in copy_thread()
  powerpc: Keep thread.dscr and thread.dscr_inherit in sync
  powerpc: Update DSCR on all CPUs when writing sysfs dscr_default
  powerpc/powernv: Always go into nap mode when CPU is offline
  powerpc: Give hypervisor decrementer interrupts their own handler
  powerpc/vphn: Fix arch_update_cpu_topology() return value

Showing 15 changed files Side-by-side Diff

arch/powerpc/include/asm/processor.h
... ... @@ -386,6 +386,7 @@
386 386 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
387 387  
388 388 extern int powersave_nap; /* set if nap mode can be used in idle loop */
  389 +extern void power7_nap(void);
389 390  
390 391 #ifdef CONFIG_PSERIES_IDLE
391 392 extern void update_smt_snooze_delay(int snooze);
arch/powerpc/kernel/asm-offsets.c
... ... @@ -76,6 +76,7 @@
76 76 DEFINE(SIGSEGV, SIGSEGV);
77 77 DEFINE(NMI_MASK, NMI_MASK);
78 78 DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
  79 + DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
79 80 #else
80 81 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
81 82 #endif /* CONFIG_PPC64 */
arch/powerpc/kernel/dbell.c
... ... @@ -28,6 +28,8 @@
28 28  
29 29 void doorbell_cause_ipi(int cpu, unsigned long data)
30 30 {
  31 + /* Order previous accesses vs. msgsnd, which is treated as a store */
  32 + mb();
31 33 ppc_msgsnd(PPC_DBELL, 0, data);
32 34 }
33 35  
arch/powerpc/kernel/entry_64.S
... ... @@ -370,6 +370,12 @@
370 370 li r3,0
371 371 b syscall_exit
372 372  
  373 + .section ".toc","aw"
  374 +DSCR_DEFAULT:
  375 + .tc dscr_default[TC],dscr_default
  376 +
  377 + .section ".text"
  378 +
373 379 /*
374 380 * This routine switches between two different tasks. The process
375 381 * state of one is saved on its kernel stack. Then the state
... ... @@ -509,9 +515,6 @@
509 515 mr r1,r8 /* start using new stack pointer */
510 516 std r7,PACAKSAVE(r13)
511 517  
512   - ld r6,_CCR(r1)
513   - mtcrf 0xFF,r6
514   -
515 518 #ifdef CONFIG_ALTIVEC
516 519 BEGIN_FTR_SECTION
517 520 ld r0,THREAD_VRSAVE(r4)
518 521  
519 522  
520 523  
... ... @@ -520,13 +523,21 @@
520 523 #endif /* CONFIG_ALTIVEC */
521 524 #ifdef CONFIG_PPC64
522 525 BEGIN_FTR_SECTION
  526 + lwz r6,THREAD_DSCR_INHERIT(r4)
  527 + ld r7,DSCR_DEFAULT@toc(2)
523 528 ld r0,THREAD_DSCR(r4)
524   - cmpd r0,r25
525   - beq 1f
  529 + cmpwi r6,0
  530 + bne 1f
  531 + ld r0,0(r7)
  532 +1: cmpd r0,r25
  533 + beq 2f
526 534 mtspr SPRN_DSCR,r0
527   -1:
  535 +2:
528 536 END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
529 537 #endif
  538 +
  539 + ld r6,_CCR(r1)
  540 + mtcrf 0xFF,r6
530 541  
531 542 /* r3-r13 are destroyed -- Cort */
532 543 REST_8GPRS(14, r1)
arch/powerpc/kernel/exceptions-64s.S
... ... @@ -186,7 +186,7 @@
186 186 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
187 187  
188 188 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
189   - MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
  189 + STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
190 190  
191 191 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
192 192 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
... ... @@ -486,6 +486,7 @@
486 486  
487 487 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
488 488 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
  489 + STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
489 490 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
490 491 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
491 492 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
arch/powerpc/kernel/idle_power7.S
... ... @@ -28,7 +28,9 @@
28 28 lwz r4,ADDROFF(powersave_nap)(r3)
29 29 cmpwi 0,r4,0
30 30 beqlr
  31 + /* fall through */
31 32  
  33 +_GLOBAL(power7_nap)
32 34 /* NAP is a state loss, we create a regs frame on the
33 35 * stack, fill it up with the state we care about and
34 36 * stick a pointer to it in PACAR1. We really only
arch/powerpc/kernel/process.c
... ... @@ -802,16 +802,8 @@
802 802 #endif /* CONFIG_PPC_STD_MMU_64 */
803 803 #ifdef CONFIG_PPC64
804 804 if (cpu_has_feature(CPU_FTR_DSCR)) {
805   - if (current->thread.dscr_inherit) {
806   - p->thread.dscr_inherit = 1;
807   - p->thread.dscr = current->thread.dscr;
808   - } else if (0 != dscr_default) {
809   - p->thread.dscr_inherit = 1;
810   - p->thread.dscr = dscr_default;
811   - } else {
812   - p->thread.dscr_inherit = 0;
813   - p->thread.dscr = 0;
814   - }
  805 + p->thread.dscr_inherit = current->thread.dscr_inherit;
  806 + p->thread.dscr = current->thread.dscr;
815 807 }
816 808 #endif
817 809  
arch/powerpc/kernel/smp.c
... ... @@ -198,8 +198,15 @@
198 198 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
199 199 char *message = (char *)&info->messages;
200 200  
  201 + /*
  202 + * Order previous accesses before accesses in the IPI handler.
  203 + */
  204 + smp_mb();
201 205 message[msg] = 1;
202   - mb();
  206 + /*
  207 + * cause_ipi functions are required to include a full barrier
  208 + * before doing whatever causes the IPI.
  209 + */
203 210 smp_ops->cause_ipi(cpu, info->data);
204 211 }
205 212  
... ... @@ -211,7 +218,7 @@
211 218 mb(); /* order any irq clear */
212 219  
213 220 do {
214   - all = xchg_local(&info->messages, 0);
  221 + all = xchg(&info->messages, 0);
215 222  
216 223 #ifdef __BIG_ENDIAN
217 224 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
arch/powerpc/kernel/sysfs.c
... ... @@ -194,6 +194,14 @@
194 194 return sprintf(buf, "%lx\n", dscr_default);
195 195 }
196 196  
  197 +static void update_dscr(void *dummy)
  198 +{
  199 + if (!current->thread.dscr_inherit) {
  200 + current->thread.dscr = dscr_default;
  201 + mtspr(SPRN_DSCR, dscr_default);
  202 + }
  203 +}
  204 +
197 205 static ssize_t __used store_dscr_default(struct device *dev,
198 206 struct device_attribute *attr, const char *buf,
199 207 size_t count)
... ... @@ -205,6 +213,8 @@
205 213 if (ret != 1)
206 214 return -EINVAL;
207 215 dscr_default = val;
  216 +
  217 + on_each_cpu(update_dscr, NULL, 1);
208 218  
209 219 return count;
210 220 }
arch/powerpc/kernel/time.c
... ... @@ -535,6 +535,15 @@
535 535 trace_timer_interrupt_exit(regs);
536 536 }
537 537  
  538 +/*
  539 + * Hypervisor decrementer interrupts shouldn't occur but are sometimes
  540 + * left pending on exit from a KVM guest. We don't need to do anything
  541 + * to clear them, as they are edge-triggered.
  542 + */
  543 +void hdec_interrupt(struct pt_regs *regs)
  544 +{
  545 +}
  546 +
538 547 #ifdef CONFIG_SUSPEND
539 548 static void generic_suspend_disable_irqs(void)
540 549 {
arch/powerpc/kernel/traps.c
... ... @@ -972,8 +972,9 @@
972 972 cpu_has_feature(CPU_FTR_DSCR)) {
973 973 PPC_WARN_EMULATED(mtdscr, regs);
974 974 rd = (instword >> 21) & 0x1f;
975   - mtspr(SPRN_DSCR, regs->gpr[rd]);
  975 + current->thread.dscr = regs->gpr[rd];
976 976 current->thread.dscr_inherit = 1;
  977 + mtspr(SPRN_DSCR, current->thread.dscr);
977 978 return 0;
978 979 }
979 980 #endif
arch/powerpc/lib/code-patching.c
... ... @@ -20,7 +20,7 @@
20 20 {
21 21 int err;
22 22  
23   - err = __put_user(instr, addr);
  23 + __put_user_size(instr, addr, 4, err);
24 24 if (err)
25 25 return err;
26 26 asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
arch/powerpc/mm/numa.c
... ... @@ -1436,11 +1436,11 @@
1436 1436  
1437 1437 /*
1438 1438 * Update the node maps and sysfs entries for each cpu whose home node
1439   - * has changed.
  1439 + * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1440 1440 */
1441 1441 int arch_update_cpu_topology(void)
1442 1442 {
1443   - int cpu, nid, old_nid;
  1443 + int cpu, nid, old_nid, changed = 0;
1444 1444 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1445 1445 struct device *dev;
1446 1446  
1447 1447  
... ... @@ -1466,9 +1466,10 @@
1466 1466 dev = get_cpu_device(cpu);
1467 1467 if (dev)
1468 1468 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
  1469 + changed = 1;
1469 1470 }
1470 1471  
1471   - return 1;
  1472 + return changed;
1472 1473 }
1473 1474  
1474 1475 static void topology_work_fn(struct work_struct *work)
arch/powerpc/platforms/powernv/smp.c
... ... @@ -106,14 +106,6 @@
106 106 {
107 107 unsigned int cpu;
108 108  
109   - /* If powersave_nap is enabled, use NAP mode, else just
110   - * spin aimlessly
111   - */
112   - if (!powersave_nap) {
113   - generic_mach_cpu_die();
114   - return;
115   - }
116   -
117 109 /* Standard hot unplug procedure */
118 110 local_irq_disable();
119 111 idle_task_exit();
... ... @@ -128,7 +120,7 @@
128 120 */
129 121 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
130 122 while (!generic_check_cpu_restart(cpu)) {
131   - power7_idle();
  123 + power7_nap();
132 124 if (!generic_check_cpu_restart(cpu)) {
133 125 DBG("CPU%d Unexpected exit while offline !\n", cpu);
134 126 /* We may be getting an IPI, so we re-enable
arch/powerpc/sysdev/xics/icp-hv.c
... ... @@ -65,7 +65,11 @@
65 65 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
66 66 {
67 67 int hw_cpu = get_hard_smp_processor_id(n_cpu);
68   - long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
  68 + long rc;
  69 +
  70 + /* Make sure all previous accesses are ordered before IPI sending */
  71 + mb();
  72 + rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
69 73 if (rc != H_SUCCESS) {
70 74 pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
71 75 "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);