Commit d04c56f73c30a5e593202ecfcf25ed43d42363a2

Authored by Paul Mackerras
1 parent 284a940675

[POWERPC] Lazy interrupt disabling for 64-bit machines

This implements a lazy strategy for disabling interrupts.  This means
that local_irq_disable() et al. just clear the 'interrupts are
enabled' flag in the paca.  If an interrupt comes along, the interrupt
entry code notices that interrupts are supposed to be disabled, and
clears the EE bit in SRR1, clears the 'interrupts are hard-enabled'
flag in the paca, and returns.  This means that interrupts only
actually get disabled in the processor when an interrupt comes along.

When interrupts are enabled by local_irq_enable() et al., the code
sets the interrupts-enabled flag in the paca, and then checks whether
interrupts got hard-disabled.  If so, it also sets the EE bit in the
MSR to hard-enable the interrupts.

This has the potential to improve performance, and also makes it
easier to make a kernel that can boot on iSeries and on other 64-bit
machines, since this lazy-disable strategy is very similar to the
soft-disable strategy that iSeries already uses.

This version renames paca->proc_enabled to paca->soft_enabled, and
changes a couple of soft-disables in the kexec code to hard-disables,
which should fix the crash that Michael Ellerman saw.  This doesn't
yet use a reserved CR field for the soft_enabled and hard_enabled
flags.  This applies on top of Stephen Rothwell's patches to make it
possible to build a combined iSeries/other kernel.

Signed-off-by: Paul Mackerras <paulus@samba.org>

Showing 12 changed files with 160 additions and 111 deletions Side-by-side Diff

arch/powerpc/kernel/asm-offsets.c
... ... @@ -118,7 +118,8 @@
118 118 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
119 119 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
120 120 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
121   - DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
  121 + DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
  122 + DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
122 123 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
123 124 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
124 125 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
arch/powerpc/kernel/crash.c
... ... @@ -111,7 +111,7 @@
111 111 if (!cpu_online(cpu))
112 112 return;
113 113  
114   - local_irq_disable();
  114 + hard_irq_disable();
115 115 if (!cpu_isset(cpu, cpus_in_crash))
116 116 crash_save_this_cpu(regs, cpu);
117 117 cpu_set(cpu, cpus_in_crash);
... ... @@ -289,7 +289,7 @@
289 289 * an SMP system.
290 290 * The kernel is broken so disable interrupts.
291 291 */
292   - local_irq_disable();
  292 + hard_irq_disable();
293 293  
294 294 for_each_irq(irq) {
295 295 struct irq_desc *desc = irq_desc + irq;
arch/powerpc/kernel/entry_64.S
... ... @@ -87,6 +87,10 @@
87 87 addi r9,r1,STACK_FRAME_OVERHEAD
88 88 ld r11,exception_marker@toc(r2)
89 89 std r11,-16(r9) /* "regshere" marker */
  90 + li r10,1
  91 + stb r10,PACASOFTIRQEN(r13)
  92 + stb r10,PACAHARDIRQEN(r13)
  93 + std r10,SOFTE(r1)
90 94 #ifdef CONFIG_PPC_ISERIES
91 95 BEGIN_FW_FTR_SECTION
92 96 /* Hack for handling interrupts when soft-enabling on iSeries */
... ... @@ -94,8 +98,6 @@
94 98 andi. r10,r12,MSR_PR /* from kernel */
95 99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
96 100 beq hardware_interrupt_entry
97   - lbz r10,PACAPROCENABLED(r13)
98   - std r10,SOFTE(r1)
99 101 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
100 102 #endif
101 103 mfmsr r11
102 104  
... ... @@ -460,9 +462,9 @@
460 462 #endif
461 463  
462 464 restore:
  465 + ld r5,SOFTE(r1)
463 466 #ifdef CONFIG_PPC_ISERIES
464 467 BEGIN_FW_FTR_SECTION
465   - ld r5,SOFTE(r1)
466 468 cmpdi 0,r5,0
467 469 beq 4f
468 470 /* Check for pending interrupts (iSeries) */
469 471  
470 472  
... ... @@ -472,16 +474,16 @@
472 474 beq+ 4f /* skip do_IRQ if no interrupts */
473 475  
474 476 li r3,0
475   - stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
  477 + stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
476 478 ori r10,r10,MSR_EE
477 479 mtmsrd r10 /* hard-enable again */
478 480 addi r3,r1,STACK_FRAME_OVERHEAD
479 481 bl .do_IRQ
480 482 b .ret_from_except_lite /* loop back and handle more */
481   -
482   -4: stb r5,PACAPROCENABLED(r13)
  483 +4:
483 484 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
484 485 #endif
  486 + stb r5,PACASOFTIRQEN(r13)
485 487  
486 488 ld r3,_MSR(r1)
487 489 andi. r0,r3,MSR_RI
488 490  
489 491  
490 492  
... ... @@ -538,25 +540,15 @@
538 540 /* Check that preempt_count() == 0 and interrupts are enabled */
539 541 lwz r8,TI_PREEMPT(r9)
540 542 cmpwi cr1,r8,0
541   -#ifdef CONFIG_PPC_ISERIES
542   -BEGIN_FW_FTR_SECTION
543 543 ld r0,SOFTE(r1)
544 544 cmpdi r0,0
545   -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
546   -#endif
547   -BEGIN_FW_FTR_SECTION
548   - andi. r0,r3,MSR_EE
549   -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
550 545 crandc eq,cr1*4+eq,eq
551 546 bne restore
552 547 /* here we are preempting the current task */
553 548 1:
554   -#ifdef CONFIG_PPC_ISERIES
555   -BEGIN_FW_FTR_SECTION
556 549 li r0,1
557   - stb r0,PACAPROCENABLED(r13)
558   -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
559   -#endif
  550 + stb r0,PACASOFTIRQEN(r13)
  551 + stb r0,PACAHARDIRQEN(r13)
560 552 ori r10,r10,MSR_EE
561 553 mtmsrd r10,1 /* reenable interrupts */
562 554 bl .preempt_schedule
... ... @@ -639,8 +631,7 @@
639 631 /* There is no way it is acceptable to get here with interrupts enabled,
640 632 * check it with the asm equivalent of WARN_ON
641 633 */
642   - mfmsr r6
643   - andi. r0,r6,MSR_EE
  634 + lbz r0,PACASOFTIRQEN(r13)
644 635 1: tdnei r0,0
645 636 .section __bug_table,"a"
646 637 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
... ... @@ -649,7 +640,13 @@
649 640 1: .asciz __FILE__
650 641 2: .asciz "enter_rtas"
651 642 .previous
652   -
  643 +
  644 + /* Hard-disable interrupts */
  645 + mfmsr r6
  646 + rldicl r7,r6,48,1
  647 + rotldi r7,r7,16
  648 + mtmsrd r7,1
  649 +
653 650 /* Unfortunately, the stack pointer and the MSR are also clobbered,
654 651 * so they are saved in the PACA which allows us to restore
655 652 * our original state after RTAS returns.
arch/powerpc/kernel/head_64.S
... ... @@ -35,9 +35,7 @@
35 35 #include <asm/thread_info.h>
36 36 #include <asm/firmware.h>
37 37  
38   -#ifdef CONFIG_PPC_ISERIES
39 38 #define DO_SOFT_DISABLE
40   -#endif
41 39  
42 40 /*
43 41 * We layout physical memory as follows:
44 42  
... ... @@ -308,7 +306,9 @@
308 306 std r9,_LINK(r1); \
309 307 mfctr r10; /* save CTR in stackframe */ \
310 308 std r10,_CTR(r1); \
  309 + lbz r10,PACASOFTIRQEN(r13); \
311 310 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
  311 + std r10,SOFTE(r1); \
312 312 std r11,_XER(r1); \
313 313 li r9,(n)+1; \
314 314 std r9,_TRAP(r1); /* set trap number */ \
... ... @@ -343,6 +343,34 @@
343 343 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
344 344  
345 345  
  346 +#define MASKABLE_EXCEPTION_PSERIES(n, label) \
  347 + . = n; \
  348 + .globl label##_pSeries; \
  349 +label##_pSeries: \
  350 + HMT_MEDIUM; \
  351 + mtspr SPRN_SPRG1,r13; /* save r13 */ \
  352 + mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  353 + std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
  354 + std r10,PACA_EXGEN+EX_R10(r13); \
  355 + lbz r10,PACASOFTIRQEN(r13); \
  356 + mfcr r9; \
  357 + cmpwi r10,0; \
  358 + beq masked_interrupt; \
  359 + mfspr r10,SPRN_SPRG1; \
  360 + std r10,PACA_EXGEN+EX_R13(r13); \
  361 + std r11,PACA_EXGEN+EX_R11(r13); \
  362 + std r12,PACA_EXGEN+EX_R12(r13); \
  363 + clrrdi r12,r13,32; /* get high part of &label */ \
  364 + mfmsr r10; \
  365 + mfspr r11,SPRN_SRR0; /* save SRR0 */ \
  366 + LOAD_HANDLER(r12,label##_common) \
  367 + ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
  368 + mtspr SPRN_SRR0,r12; \
  369 + mfspr r12,SPRN_SRR1; /* and SRR1 */ \
  370 + mtspr SPRN_SRR1,r10; \
  371 + rfid; \
  372 + b . /* prevent speculative execution */
  373 +
346 374 #define STD_EXCEPTION_ISERIES(n, label, area) \
347 375 .globl label##_iSeries; \
348 376 label##_iSeries: \
349 377  
350 378  
351 379  
352 380  
353 381  
354 382  
... ... @@ -358,40 +386,32 @@
358 386 HMT_MEDIUM; \
359 387 mtspr SPRN_SPRG1,r13; /* save r13 */ \
360 388 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
361   - lbz r10,PACAPROCENABLED(r13); \
  389 + lbz r10,PACASOFTIRQEN(r13); \
362 390 cmpwi 0,r10,0; \
363 391 beq- label##_iSeries_masked; \
364 392 EXCEPTION_PROLOG_ISERIES_2; \
365 393 b label##_common; \
366 394  
367   -#ifdef DO_SOFT_DISABLE
  395 +#ifdef CONFIG_PPC_ISERIES
368 396 #define DISABLE_INTS \
369   -BEGIN_FW_FTR_SECTION; \
370   - lbz r10,PACAPROCENABLED(r13); \
371 397 li r11,0; \
372   - std r10,SOFTE(r1); \
  398 + stb r11,PACASOFTIRQEN(r13); \
  399 +BEGIN_FW_FTR_SECTION; \
  400 + stb r11,PACAHARDIRQEN(r13); \
  401 +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
  402 +BEGIN_FW_FTR_SECTION; \
373 403 mfmsr r10; \
374   - stb r11,PACAPROCENABLED(r13); \
375 404 ori r10,r10,MSR_EE; \
376 405 mtmsrd r10,1; \
377 406 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
378 407  
379   -#define ENABLE_INTS \
380   -BEGIN_FW_FTR_SECTION; \
381   - lbz r10,PACAPROCENABLED(r13); \
382   - mfmsr r11; \
383   - std r10,SOFTE(r1); \
384   - ori r11,r11,MSR_EE; \
385   -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \
386   -BEGIN_FW_FTR_SECTION; \
387   - ld r12,_MSR(r1); \
388   - mfmsr r11; \
389   - rlwimi r11,r12,0,MSR_EE; \
390   -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
391   - mtmsrd r11,1
  408 +#else
  409 +#define DISABLE_INTS \
  410 + li r11,0; \
  411 + stb r11,PACASOFTIRQEN(r13); \
  412 + stb r11,PACAHARDIRQEN(r13)
392 413  
393   -#else /* hard enable/disable interrupts */
394   -#define DISABLE_INTS
  414 +#endif /* CONFIG_PPC_ISERIES */
395 415  
396 416 #define ENABLE_INTS \
397 417 ld r12,_MSR(r1); \
... ... @@ -399,8 +419,6 @@
399 419 rlwimi r11,r12,0,MSR_EE; \
400 420 mtmsrd r11,1
401 421  
402   -#endif
403   -
404 422 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
405 423 .align 7; \
406 424 .globl label##_common; \
407 425  
... ... @@ -541,11 +559,11 @@
541 559 mfspr r12,SPRN_SRR1 /* and SRR1 */
542 560 b .slb_miss_realmode /* Rel. branch works in real mode */
543 561  
544   - STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
  562 + MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
545 563 STD_EXCEPTION_PSERIES(0x600, alignment)
546 564 STD_EXCEPTION_PSERIES(0x700, program_check)
547 565 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
548   - STD_EXCEPTION_PSERIES(0x900, decrementer)
  566 + MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
549 567 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
550 568 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
551 569  
552 570  
... ... @@ -597,8 +615,25 @@
597 615 /*** pSeries interrupt support ***/
598 616  
599 617 /* moved from 0xf00 */
600   - STD_EXCEPTION_PSERIES(., performance_monitor)
  618 + MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
601 619  
  620 +/*
  621 + * An interrupt came in while soft-disabled; clear EE in SRR1,
  622 + * clear paca->hard_enabled and return.
  623 + */
  624 +masked_interrupt:
  625 + stb r10,PACAHARDIRQEN(r13)
  626 + mtcrf 0x80,r9
  627 + ld r9,PACA_EXGEN+EX_R9(r13)
  628 + mfspr r10,SPRN_SRR1
  629 + rldicl r10,r10,48,1 /* clear MSR_EE */
  630 + rotldi r10,r10,16
  631 + mtspr SPRN_SRR1,r10
  632 + ld r10,PACA_EXGEN+EX_R10(r13)
  633 + mfspr r13,SPRN_SPRG1
  634 + rfid
  635 + b .
  636 +
602 637 .align 7
603 638 _GLOBAL(do_stab_bolted_pSeries)
604 639 mtcrf 0x80,r12
... ... @@ -952,7 +987,8 @@
952 987 REST_8GPRS(2, r1)
953 988  
954 989 mfmsr r10
955   - clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
  990 + rldicl r10,r10,48,1 /* clear EE */
  991 + rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
956 992 mtmsrd r10,1
957 993  
958 994 mtspr SPRN_SRR1,r12
959 995  
... ... @@ -1877,11 +1913,16 @@
1877 1913 /* enable MMU and jump to start_secondary */
1878 1914 LOAD_REG_ADDR(r3, .start_secondary_prolog)
1879 1915 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1880   -#ifdef DO_SOFT_DISABLE
  1916 +#ifdef CONFIG_PPC_ISERIES
1881 1917 BEGIN_FW_FTR_SECTION
1882 1918 ori r4,r4,MSR_EE
1883 1919 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1884 1920 #endif
  1921 +BEGIN_FW_FTR_SECTION
  1922 + stb r7,PACASOFTIRQEN(r13)
  1923 + stb r7,PACAHARDIRQEN(r13)
  1924 +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
  1925 +
1885 1926 mtspr SPRN_SRR0,r3
1886 1927 mtspr SPRN_SRR1,r4
1887 1928 rfid
1888 1929  
1889 1930  
... ... @@ -2019,15 +2060,18 @@
2019 2060  
2020 2061 /* Load up the kernel context */
2021 2062 5:
2022   -#ifdef DO_SOFT_DISABLE
2023   -BEGIN_FW_FTR_SECTION
2024 2063 li r5,0
2025   - stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
  2064 + stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
  2065 +#ifdef CONFIG_PPC_ISERIES
  2066 +BEGIN_FW_FTR_SECTION
2026 2067 mfmsr r5
2027 2068 ori r5,r5,MSR_EE /* Hard Enabled */
2028 2069 mtmsrd r5
2029 2070 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
2030 2071 #endif
  2072 +BEGIN_FW_FTR_SECTION
  2073 + stb r5,PACAHARDIRQEN(r13)
  2074 +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
2031 2075  
2032 2076 bl .start_kernel
2033 2077  
arch/powerpc/kernel/idle_power4.S
... ... @@ -30,6 +30,13 @@
30 30 beqlr
31 31  
32 32 /* Go to NAP now */
  33 + mfmsr r7
  34 + rldicl r0,r7,48,1
  35 + rotldi r0,r0,16
  36 + mtmsrd r0,1 /* hard-disable interrupts */
  37 + li r0,1
  38 + stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
  39 + stb r0,PACAHARDIRQEN(r13)
33 40 BEGIN_FTR_SECTION
34 41 DSSALL
35 42 sync
... ... @@ -38,7 +45,6 @@
38 45 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
39 46 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
40 47 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
41   - mfmsr r7
42 48 ori r7,r7,MSR_EE
43 49 oris r7,r7,MSR_POW@h
44 50 1: sync
arch/powerpc/kernel/irq.c
... ... @@ -64,8 +64,9 @@
64 64 #include <asm/ptrace.h>
65 65 #include <asm/machdep.h>
66 66 #include <asm/udbg.h>
67   -#ifdef CONFIG_PPC_ISERIES
  67 +#ifdef CONFIG_PPC64
68 68 #include <asm/paca.h>
  69 +#include <asm/firmware.h>
69 70 #endif
70 71  
71 72 int __irq_offset_value;
... ... @@ -95,6 +96,27 @@
95 96 EXPORT_SYMBOL(irq_desc);
96 97  
97 98 int distribute_irqs = 1;
  99 +
  100 +void local_irq_restore(unsigned long en)
  101 +{
  102 + get_paca()->soft_enabled = en;
  103 + if (!en)
  104 + return;
  105 +
  106 + if (firmware_has_feature(FW_FEATURE_ISERIES)) {
  107 + if (get_paca()->lppaca_ptr->int_dword.any_int)
  108 + iseries_handle_interrupts();
  109 + return;
  110 + }
  111 +
  112 + if (get_paca()->hard_enabled)
  113 + return;
  114 + /* need to hard-enable interrupts here */
  115 + get_paca()->hard_enabled = en;
  116 + if ((int)mfspr(SPRN_DEC) < 0)
  117 + mtspr(SPRN_DEC, 1);
  118 + hard_irq_enable();
  119 +}
98 120 #endif /* CONFIG_PPC64 */
99 121  
100 122 int show_interrupts(struct seq_file *p, void *v)
arch/powerpc/kernel/ppc_ksyms.c
... ... @@ -49,6 +49,10 @@
49 49 #include <asm/commproc.h>
50 50 #endif
51 51  
  52 +#ifdef CONFIG_PPC64
  53 +EXPORT_SYMBOL(local_irq_restore);
  54 +#endif
  55 +
52 56 #ifdef CONFIG_PPC32
53 57 extern void transfer_to_handler(void);
54 58 extern void do_IRQ(struct pt_regs *regs);
arch/powerpc/kernel/setup_64.c
... ... @@ -223,8 +223,8 @@
223 223 {
224 224 struct paca_struct *lpaca = get_paca();
225 225  
226   - /* Mark enabled in PACA */
227   - lpaca->proc_enabled = 0;
  226 + /* Mark interrupts enabled in PACA */
  227 + lpaca->soft_enabled = 0;
228 228  
229 229 /* Initialize hash table for that CPU */
230 230 htab_initialize_secondary();
arch/powerpc/platforms/iseries/ksyms.c
... ... @@ -19,10 +19,4 @@
19 19 EXPORT_SYMBOL(HvCall5);
20 20 EXPORT_SYMBOL(HvCall6);
21 21 EXPORT_SYMBOL(HvCall7);
22   -
23   -#ifdef CONFIG_SMP
24   -EXPORT_SYMBOL(local_get_flags);
25   -EXPORT_SYMBOL(local_irq_disable);
26   -EXPORT_SYMBOL(local_irq_restore);
27   -#endif
arch/powerpc/platforms/iseries/misc.S
... ... @@ -19,39 +19,8 @@
19 19  
20 20 .text
21 21  
22   -/* unsigned long local_save_flags(void) */
23   -_GLOBAL(local_get_flags)
24   - lbz r3,PACAPROCENABLED(r13)
25   - blr
26   -
27   -/* unsigned long local_irq_disable(void) */
28   -_GLOBAL(local_irq_disable)
29   - lbz r3,PACAPROCENABLED(r13)
30   - li r4,0
31   - stb r4,PACAPROCENABLED(r13)
32   - blr /* Done */
33   -
34   -/* void local_irq_restore(unsigned long flags) */
35   -_GLOBAL(local_irq_restore)
36   - lbz r5,PACAPROCENABLED(r13)
37   - /* Check if things are setup the way we want _already_. */
38   - cmpw 0,r3,r5
39   - beqlr
40   - /* are we enabling interrupts? */
41   - cmpdi 0,r3,0
42   - stb r3,PACAPROCENABLED(r13)
43   - beqlr
44   - /* Check pending interrupts */
45   - /* A decrementer, IPI or PMC interrupt may have occurred
46   - * while we were in the hypervisor (which enables) */
47   - ld r4,PACALPPACAPTR(r13)
48   - ld r4,LPPACAANYINT(r4)
49   - cmpdi r4,0
50   - beqlr
51   -
52   - /*
53   - * Handle pending interrupts in interrupt context
54   - */
  22 +/* Handle pending interrupts in interrupt context */
  23 +_GLOBAL(iseries_handle_interrupts)
55 24 li r0,0x5555
56 25 sc
57 26 blr
include/asm-powerpc/hw_irq.h
... ... @@ -7,16 +7,30 @@
7 7 #ifdef __KERNEL__
8 8  
9 9 #include <linux/errno.h>
  10 +#include <linux/compiler.h>
10 11 #include <asm/ptrace.h>
11 12 #include <asm/processor.h>
12 13  
13 14 extern void timer_interrupt(struct pt_regs *);
14 15  
15   -#ifdef CONFIG_PPC_ISERIES
  16 +#ifdef CONFIG_PPC64
  17 +#include <asm/paca.h>
16 18  
17   -extern unsigned long local_get_flags(void);
18   -extern unsigned long local_irq_disable(void);
  19 +static inline unsigned long local_get_flags(void)
  20 +{
  21 + return get_paca()->soft_enabled;
  22 +}
  23 +
  24 +static inline unsigned long local_irq_disable(void)
  25 +{
  26 + unsigned long flag = get_paca()->soft_enabled;
  27 + get_paca()->soft_enabled = 0;
  28 + barrier();
  29 + return flag;
  30 +}
  31 +
19 32 extern void local_irq_restore(unsigned long);
  33 +extern void iseries_handle_interrupts(void);
20 34  
21 35 #define local_irq_enable() local_irq_restore(1)
22 36 #define local_save_flags(flags) ((flags) = local_get_flags())
23 37  
... ... @@ -24,17 +38,14 @@
24 38  
25 39 #define irqs_disabled() (local_get_flags() == 0)
26 40  
  41 +#define hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
  42 +#define hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
  43 +
27 44 #else
28 45  
29 46 #if defined(CONFIG_BOOKE)
30 47 #define SET_MSR_EE(x) mtmsr(x)
31 48 #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
32   -#elif defined(__powerpc64__)
33   -#define SET_MSR_EE(x) __mtmsrd(x, 1)
34   -#define local_irq_restore(flags) do { \
35   - __asm__ __volatile__("": : :"memory"); \
36   - __mtmsrd((flags), 1); \
37   -} while(0)
38 49 #else
39 50 #define SET_MSR_EE(x) mtmsr(x)
40 51 #define local_irq_restore(flags) mtmsr(flags)
... ... @@ -81,7 +92,7 @@
81 92 #define local_irq_save(flags) local_irq_save_ptr(&flags)
82 93 #define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
83 94  
84   -#endif /* CONFIG_PPC_ISERIES */
  95 +#endif /* CONFIG_PPC64 */
85 96  
86 97 #define mask_irq(irq) \
87 98 ({ \
include/asm-powerpc/paca.h
... ... @@ -93,7 +93,8 @@
93 93 u64 stab_rr; /* stab/slb round-robin counter */
94 94 u64 saved_r1; /* r1 save for RTAS calls */
95 95 u64 saved_msr; /* MSR saved here by enter_rtas */
96   - u8 proc_enabled; /* irq soft-enable flag */
  96 + u8 soft_enabled; /* irq soft-enable flag */
  97 + u8 hard_enabled; /* set if irqs are enabled in MSR */
97 98 u8 io_sync; /* writel() needs spin_unlock sync */
98 99  
99 100 /* Stuff for accurate time accounting */