Commit be835674b55324c1abe973b15343c3663910c620

Authored by Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/perf_event: Fix oops due to perf_event_do_pending call
  powerpc/swiotlb: Fix off by one in determining boundary of which ops to use

Showing 6 changed files Inline Diff

arch/powerpc/include/asm/hw_irq.h
1 /* 1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */ 3 */
4 #ifndef _ASM_POWERPC_HW_IRQ_H 4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H 5 #define _ASM_POWERPC_HW_IRQ_H
6 6
7 #ifdef __KERNEL__ 7 #ifdef __KERNEL__
8 8
9 #include <linux/errno.h> 9 #include <linux/errno.h>
10 #include <linux/compiler.h> 10 #include <linux/compiler.h>
11 #include <asm/ptrace.h> 11 #include <asm/ptrace.h>
12 #include <asm/processor.h> 12 #include <asm/processor.h>
13 13
14 extern void timer_interrupt(struct pt_regs *); 14 extern void timer_interrupt(struct pt_regs *);
15 15
16 #ifdef CONFIG_PPC64 16 #ifdef CONFIG_PPC64
17 #include <asm/paca.h> 17 #include <asm/paca.h>
18 18
19 static inline unsigned long local_get_flags(void) 19 static inline unsigned long local_get_flags(void)
20 { 20 {
21 unsigned long flags; 21 unsigned long flags;
22 22
23 __asm__ __volatile__("lbz %0,%1(13)" 23 __asm__ __volatile__("lbz %0,%1(13)"
24 : "=r" (flags) 24 : "=r" (flags)
25 : "i" (offsetof(struct paca_struct, soft_enabled))); 25 : "i" (offsetof(struct paca_struct, soft_enabled)));
26 26
27 return flags; 27 return flags;
28 } 28 }
29 29
30 static inline unsigned long raw_local_irq_disable(void) 30 static inline unsigned long raw_local_irq_disable(void)
31 { 31 {
32 unsigned long flags, zero; 32 unsigned long flags, zero;
33 33
34 __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" 34 __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
35 : "=r" (flags), "=&r" (zero) 35 : "=r" (flags), "=&r" (zero)
36 : "i" (offsetof(struct paca_struct, soft_enabled)) 36 : "i" (offsetof(struct paca_struct, soft_enabled))
37 : "memory"); 37 : "memory");
38 38
39 return flags; 39 return flags;
40 } 40 }
41 41
42 extern void raw_local_irq_restore(unsigned long); 42 extern void raw_local_irq_restore(unsigned long);
43 extern void iseries_handle_interrupts(void); 43 extern void iseries_handle_interrupts(void);
44 44
45 #define raw_local_irq_enable() raw_local_irq_restore(1) 45 #define raw_local_irq_enable() raw_local_irq_restore(1)
46 #define raw_local_save_flags(flags) ((flags) = local_get_flags()) 46 #define raw_local_save_flags(flags) ((flags) = local_get_flags())
47 #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) 47 #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
48 48
49 #define raw_irqs_disabled() (local_get_flags() == 0) 49 #define raw_irqs_disabled() (local_get_flags() == 0)
50 #define raw_irqs_disabled_flags(flags) ((flags) == 0) 50 #define raw_irqs_disabled_flags(flags) ((flags) == 0)
51 51
52 #ifdef CONFIG_PPC_BOOK3E 52 #ifdef CONFIG_PPC_BOOK3E
53 #define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory"); 53 #define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory");
54 #define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory"); 54 #define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory");
55 #else 55 #else
56 #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 56 #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
57 #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) 57 #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
58 #endif 58 #endif
59 59
60 #define hard_irq_disable() \ 60 #define hard_irq_disable() \
61 do { \ 61 do { \
62 __hard_irq_disable(); \ 62 __hard_irq_disable(); \
63 get_paca()->soft_enabled = 0; \ 63 get_paca()->soft_enabled = 0; \
64 get_paca()->hard_enabled = 0; \ 64 get_paca()->hard_enabled = 0; \
65 } while(0) 65 } while(0)
66 66
67 #else 67 #else
68 68
69 #if defined(CONFIG_BOOKE) 69 #if defined(CONFIG_BOOKE)
70 #define SET_MSR_EE(x) mtmsr(x) 70 #define SET_MSR_EE(x) mtmsr(x)
71 #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 71 #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
72 #else 72 #else
73 #define SET_MSR_EE(x) mtmsr(x) 73 #define SET_MSR_EE(x) mtmsr(x)
74 #define raw_local_irq_restore(flags) mtmsr(flags) 74 #define raw_local_irq_restore(flags) mtmsr(flags)
75 #endif 75 #endif
76 76
77 static inline void raw_local_irq_disable(void) 77 static inline void raw_local_irq_disable(void)
78 { 78 {
79 #ifdef CONFIG_BOOKE 79 #ifdef CONFIG_BOOKE
80 __asm__ __volatile__("wrteei 0": : :"memory"); 80 __asm__ __volatile__("wrteei 0": : :"memory");
81 #else 81 #else
82 unsigned long msr; 82 unsigned long msr;
83 83
84 msr = mfmsr(); 84 msr = mfmsr();
85 SET_MSR_EE(msr & ~MSR_EE); 85 SET_MSR_EE(msr & ~MSR_EE);
86 #endif 86 #endif
87 } 87 }
88 88
89 static inline void raw_local_irq_enable(void) 89 static inline void raw_local_irq_enable(void)
90 { 90 {
91 #ifdef CONFIG_BOOKE 91 #ifdef CONFIG_BOOKE
92 __asm__ __volatile__("wrteei 1": : :"memory"); 92 __asm__ __volatile__("wrteei 1": : :"memory");
93 #else 93 #else
94 unsigned long msr; 94 unsigned long msr;
95 95
96 msr = mfmsr(); 96 msr = mfmsr();
97 SET_MSR_EE(msr | MSR_EE); 97 SET_MSR_EE(msr | MSR_EE);
98 #endif 98 #endif
99 } 99 }
100 100
101 static inline void raw_local_irq_save_ptr(unsigned long *flags) 101 static inline void raw_local_irq_save_ptr(unsigned long *flags)
102 { 102 {
103 unsigned long msr; 103 unsigned long msr;
104 msr = mfmsr(); 104 msr = mfmsr();
105 *flags = msr; 105 *flags = msr;
106 #ifdef CONFIG_BOOKE 106 #ifdef CONFIG_BOOKE
107 __asm__ __volatile__("wrteei 0": : :"memory"); 107 __asm__ __volatile__("wrteei 0": : :"memory");
108 #else 108 #else
109 SET_MSR_EE(msr & ~MSR_EE); 109 SET_MSR_EE(msr & ~MSR_EE);
110 #endif 110 #endif
111 } 111 }
112 112
113 #define raw_local_save_flags(flags) ((flags) = mfmsr()) 113 #define raw_local_save_flags(flags) ((flags) = mfmsr())
114 #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) 114 #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
115 #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) 115 #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
116 #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) 116 #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
117 117
118 #define hard_irq_disable() raw_local_irq_disable() 118 #define hard_irq_disable() raw_local_irq_disable()
119 119
120 static inline int irqs_disabled_flags(unsigned long flags) 120 static inline int irqs_disabled_flags(unsigned long flags)
121 { 121 {
122 return (flags & MSR_EE) == 0; 122 return (flags & MSR_EE) == 0;
123 } 123 }
124 124
125 #endif /* CONFIG_PPC64 */ 125 #endif /* CONFIG_PPC64 */
126 126
127 /* 127 /*
128 * interrupt-retrigger: should we handle this via lost interrupts and IPIs 128 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
129 * or should we not care like we do now ? --BenH. 129 * or should we not care like we do now ? --BenH.
130 */ 130 */
131 struct irq_chip; 131 struct irq_chip;
132 132
133 #ifdef CONFIG_PERF_EVENTS
134
135 #ifdef CONFIG_PPC64
136 static inline unsigned long test_perf_event_pending(void)
137 {
138 unsigned long x;
139
140 asm volatile("lbz %0,%1(13)"
141 : "=r" (x)
142 : "i" (offsetof(struct paca_struct, perf_event_pending)));
143 return x;
144 }
145
146 static inline void set_perf_event_pending(void)
147 {
148 asm volatile("stb %0,%1(13)" : :
149 "r" (1),
150 "i" (offsetof(struct paca_struct, perf_event_pending)));
151 }
152
153 static inline void clear_perf_event_pending(void)
154 {
155 asm volatile("stb %0,%1(13)" : :
156 "r" (0),
157 "i" (offsetof(struct paca_struct, perf_event_pending)));
158 }
159 #endif /* CONFIG_PPC64 */
160
161 #else /* CONFIG_PERF_EVENTS */
162
163 static inline unsigned long test_perf_event_pending(void)
164 {
165 return 0;
166 }
167
168 static inline void clear_perf_event_pending(void) {}
169 #endif /* CONFIG_PERF_EVENTS */
170
171 #endif /* __KERNEL__ */ 133 #endif /* __KERNEL__ */
172 #endif /* _ASM_POWERPC_HW_IRQ_H */ 134 #endif /* _ASM_POWERPC_HW_IRQ_H */
173 135
arch/powerpc/kernel/asm-offsets.c
1 /* 1 /*
2 * This program is used to generate definitions needed by 2 * This program is used to generate definitions needed by
3 * assembly language modules. 3 * assembly language modules.
4 * 4 *
5 * We use the technique used in the OSF Mach kernel code: 5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines, 6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the 7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output. 8 * #defines from the assembly-language output.
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 15
16 #include <linux/signal.h> 16 #include <linux/signal.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/errno.h> 19 #include <linux/errno.h>
20 #include <linux/string.h> 20 #include <linux/string.h>
21 #include <linux/types.h> 21 #include <linux/types.h>
22 #include <linux/mman.h> 22 #include <linux/mman.h>
23 #include <linux/mm.h> 23 #include <linux/mm.h>
24 #include <linux/suspend.h> 24 #include <linux/suspend.h>
25 #include <linux/hrtimer.h> 25 #include <linux/hrtimer.h>
26 #ifdef CONFIG_PPC64 26 #ifdef CONFIG_PPC64
27 #include <linux/time.h> 27 #include <linux/time.h>
28 #include <linux/hardirq.h> 28 #include <linux/hardirq.h>
29 #endif 29 #endif
30 #include <linux/kbuild.h> 30 #include <linux/kbuild.h>
31 31
32 #include <asm/io.h> 32 #include <asm/io.h>
33 #include <asm/page.h> 33 #include <asm/page.h>
34 #include <asm/pgtable.h> 34 #include <asm/pgtable.h>
35 #include <asm/processor.h> 35 #include <asm/processor.h>
36 #include <asm/cputable.h> 36 #include <asm/cputable.h>
37 #include <asm/thread_info.h> 37 #include <asm/thread_info.h>
38 #include <asm/rtas.h> 38 #include <asm/rtas.h>
39 #include <asm/vdso_datapage.h> 39 #include <asm/vdso_datapage.h>
40 #ifdef CONFIG_PPC64 40 #ifdef CONFIG_PPC64
41 #include <asm/paca.h> 41 #include <asm/paca.h>
42 #include <asm/lppaca.h> 42 #include <asm/lppaca.h>
43 #include <asm/cache.h> 43 #include <asm/cache.h>
44 #include <asm/compat.h> 44 #include <asm/compat.h>
45 #include <asm/mmu.h> 45 #include <asm/mmu.h>
46 #include <asm/hvcall.h> 46 #include <asm/hvcall.h>
47 #endif 47 #endif
48 #ifdef CONFIG_PPC_ISERIES 48 #ifdef CONFIG_PPC_ISERIES
49 #include <asm/iseries/alpaca.h> 49 #include <asm/iseries/alpaca.h>
50 #endif 50 #endif
51 #ifdef CONFIG_KVM 51 #ifdef CONFIG_KVM
52 #include <linux/kvm_host.h> 52 #include <linux/kvm_host.h>
53 #endif 53 #endif
54 54
55 #ifdef CONFIG_PPC32 55 #ifdef CONFIG_PPC32
56 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 56 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
57 #include "head_booke.h" 57 #include "head_booke.h"
58 #endif 58 #endif
59 #endif 59 #endif
60 60
61 #if defined(CONFIG_FSL_BOOKE) 61 #if defined(CONFIG_FSL_BOOKE)
62 #include "../mm/mmu_decl.h" 62 #include "../mm/mmu_decl.h"
63 #endif 63 #endif
64 64
65 int main(void) 65 int main(void)
66 { 66 {
67 DEFINE(THREAD, offsetof(struct task_struct, thread)); 67 DEFINE(THREAD, offsetof(struct task_struct, thread));
68 DEFINE(MM, offsetof(struct task_struct, mm)); 68 DEFINE(MM, offsetof(struct task_struct, mm));
69 DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); 69 DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
70 #ifdef CONFIG_PPC64 70 #ifdef CONFIG_PPC64
71 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); 71 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
72 DEFINE(SIGSEGV, SIGSEGV); 72 DEFINE(SIGSEGV, SIGSEGV);
73 DEFINE(NMI_MASK, NMI_MASK); 73 DEFINE(NMI_MASK, NMI_MASK);
74 #else 74 #else
75 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 75 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
76 #endif /* CONFIG_PPC64 */ 76 #endif /* CONFIG_PPC64 */
77 77
78 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 78 DEFINE(KSP, offsetof(struct thread_struct, ksp));
79 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); 79 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
80 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 80 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
81 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); 81 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
82 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); 82 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
83 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); 83 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
84 #ifdef CONFIG_ALTIVEC 84 #ifdef CONFIG_ALTIVEC
85 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); 85 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
86 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); 86 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
87 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); 87 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
88 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); 88 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
89 #endif /* CONFIG_ALTIVEC */ 89 #endif /* CONFIG_ALTIVEC */
90 #ifdef CONFIG_VSX 90 #ifdef CONFIG_VSX
91 DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr)); 91 DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
92 DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); 92 DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
93 #endif /* CONFIG_VSX */ 93 #endif /* CONFIG_VSX */
94 #ifdef CONFIG_PPC64 94 #ifdef CONFIG_PPC64
95 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); 95 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
96 #else /* CONFIG_PPC64 */ 96 #else /* CONFIG_PPC64 */
97 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); 97 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
98 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 98 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
99 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); 99 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
100 #endif 100 #endif
101 #ifdef CONFIG_SPE 101 #ifdef CONFIG_SPE
102 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); 102 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
103 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); 103 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
104 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); 104 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
105 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); 105 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
106 #endif /* CONFIG_SPE */ 106 #endif /* CONFIG_SPE */
107 #endif /* CONFIG_PPC64 */ 107 #endif /* CONFIG_PPC64 */
108 108
109 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 109 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
110 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 110 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
111 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 111 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
112 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 112 DEFINE(TI_TASK, offsetof(struct thread_info, task));
113 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 113 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
114 114
115 #ifdef CONFIG_PPC64 115 #ifdef CONFIG_PPC64
116 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); 116 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
117 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); 117 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
118 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); 118 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
119 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); 119 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
120 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 120 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
121 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 121 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
122 /* paca */ 122 /* paca */
123 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 123 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
124 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 124 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
125 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); 125 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
126 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); 126 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
127 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); 127 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
128 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); 128 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
129 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); 129 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
130 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); 130 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
131 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); 131 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
132 DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); 132 DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 136 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
138 #ifdef CONFIG_PPC_MM_SLICES 137 #ifdef CONFIG_PPC_MM_SLICES
139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 138 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
140 context.low_slices_psize)); 139 context.low_slices_psize));
141 DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, 140 DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
142 context.high_slices_psize)); 141 context.high_slices_psize));
143 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); 142 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
144 #endif /* CONFIG_PPC_MM_SLICES */ 143 #endif /* CONFIG_PPC_MM_SLICES */
145 144
146 #ifdef CONFIG_PPC_BOOK3E 145 #ifdef CONFIG_PPC_BOOK3E
147 DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); 146 DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
148 DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd)); 147 DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd));
149 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 148 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
150 DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb)); 149 DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb));
151 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); 150 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
152 DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit)); 151 DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit));
153 DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg)); 152 DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg));
154 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); 153 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
155 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); 154 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
156 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); 155 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
157 #endif /* CONFIG_PPC_BOOK3E */ 156 #endif /* CONFIG_PPC_BOOK3E */
158 157
159 #ifdef CONFIG_PPC_STD_MMU_64 158 #ifdef CONFIG_PPC_STD_MMU_64
160 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); 159 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
161 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); 160 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
162 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 161 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
163 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 162 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
164 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); 163 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
165 #ifdef CONFIG_PPC_MM_SLICES 164 #ifdef CONFIG_PPC_MM_SLICES
166 DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); 165 DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
167 #else 166 #else
168 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); 167 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
169 #endif /* CONFIG_PPC_MM_SLICES */ 168 #endif /* CONFIG_PPC_MM_SLICES */
170 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 169 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
171 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); 170 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
172 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); 171 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
173 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); 172 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
174 DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); 173 DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
175 DEFINE(SLBSHADOW_STACKVSID, 174 DEFINE(SLBSHADOW_STACKVSID,
176 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); 175 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
177 DEFINE(SLBSHADOW_STACKESID, 176 DEFINE(SLBSHADOW_STACKESID,
178 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); 177 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
179 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 178 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
180 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 179 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
181 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 180 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
182 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 181 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
183 DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); 182 DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
184 #endif /* CONFIG_PPC_STD_MMU_64 */ 183 #endif /* CONFIG_PPC_STD_MMU_64 */
185 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 184 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
186 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 185 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
187 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 186 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
188 DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); 187 DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
189 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 188 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
190 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 189 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
191 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); 190 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
192 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 191 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
193 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 192 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); 193 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); 194 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); 195 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
197 DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr)); 196 DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
198 DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer)); 197 DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
199 DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0])); 198 DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
200 DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1])); 199 DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
201 DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2])); 200 DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
202 DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3])); 201 DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
203 DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4])); 202 DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
204 DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5])); 203 DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
205 DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6])); 204 DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
206 DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7])); 205 DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
207 DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8])); 206 DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
208 DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9])); 207 DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
209 DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10])); 208 DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
210 DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11])); 209 DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
211 DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12])); 210 DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
212 DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13])); 211 DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
213 DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1)); 212 DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2)); 213 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct, 214 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
216 shadow_vcpu.vmhandler)); 215 shadow_vcpu.vmhandler));
217 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct, 216 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
218 shadow_vcpu.scratch0)); 217 shadow_vcpu.scratch0));
219 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct, 218 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
220 shadow_vcpu.scratch1)); 219 shadow_vcpu.scratch1));
221 #endif 220 #endif
222 #endif /* CONFIG_PPC64 */ 221 #endif /* CONFIG_PPC64 */
223 222
224 /* RTAS */ 223 /* RTAS */
225 DEFINE(RTASBASE, offsetof(struct rtas_t, base)); 224 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
226 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); 225 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
227 226
228 /* Interrupt register frame */ 227 /* Interrupt register frame */
229 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); 228 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
230 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); 229 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
231 #ifdef CONFIG_PPC64 230 #ifdef CONFIG_PPC64
232 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); 231 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
233 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ 232 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
234 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 233 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
235 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 234 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
236 235
237 /* hcall statistics */ 236 /* hcall statistics */
238 DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats)); 237 DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats));
239 DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls)); 238 DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls));
240 DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total)); 239 DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total));
241 DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total)); 240 DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total));
242 #endif /* CONFIG_PPC64 */ 241 #endif /* CONFIG_PPC64 */
243 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); 242 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
244 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); 243 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
245 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); 244 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
246 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); 245 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
247 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); 246 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
248 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); 247 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
249 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); 248 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
250 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); 249 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
251 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); 250 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
252 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); 251 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
253 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); 252 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
254 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); 253 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
255 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); 254 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
256 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); 255 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
257 #ifndef CONFIG_PPC64 256 #ifndef CONFIG_PPC64
258 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); 257 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
259 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); 258 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
260 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); 259 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
261 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); 260 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
262 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); 261 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
263 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); 262 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
264 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); 263 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
265 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); 264 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
266 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); 265 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
267 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); 266 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
268 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); 267 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
269 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); 268 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
270 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); 269 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
271 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); 270 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
272 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); 271 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
273 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); 272 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
274 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); 273 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
275 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); 274 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
276 #endif /* CONFIG_PPC64 */ 275 #endif /* CONFIG_PPC64 */
277 /* 276 /*
278 * Note: these symbols include _ because they overlap with special 277 * Note: these symbols include _ because they overlap with special
279 * register names 278 * register names
280 */ 279 */
281 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); 280 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
282 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); 281 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
283 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); 282 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
284 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); 283 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
285 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); 284 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
286 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); 285 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
287 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); 286 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
288 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); 287 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
289 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); 288 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
290 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); 289 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
291 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); 290 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
292 #ifndef CONFIG_PPC64 291 #ifndef CONFIG_PPC64
293 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); 292 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
294 /* 293 /*
295 * The PowerPC 400-class & Book-E processors have neither the DAR 294 * The PowerPC 400-class & Book-E processors have neither the DAR
296 * nor the DSISR SPRs. Hence, we overload them to hold the similar 295 * nor the DSISR SPRs. Hence, we overload them to hold the similar
297 * DEAR and ESR SPRs for such processors. For critical interrupts 296 * DEAR and ESR SPRs for such processors. For critical interrupts
298 * we use them to hold SRR0 and SRR1. 297 * we use them to hold SRR0 and SRR1.
299 */ 298 */
300 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); 299 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
301 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); 300 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
302 #else /* CONFIG_PPC64 */ 301 #else /* CONFIG_PPC64 */
303 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); 302 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
304 303
305 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ 304 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
306 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); 305 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
307 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); 306 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
308 #endif /* CONFIG_PPC64 */ 307 #endif /* CONFIG_PPC64 */
309 308
310 #if defined(CONFIG_PPC32) 309 #if defined(CONFIG_PPC32)
311 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 310 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
312 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); 311 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
313 DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 312 DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
314 /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ 313 /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
315 DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 314 DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
316 DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); 315 DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
317 DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); 316 DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
318 DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); 317 DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
319 DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); 318 DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
320 DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); 319 DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
321 DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); 320 DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
322 DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); 321 DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
323 DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); 322 DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
324 DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); 323 DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
325 DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); 324 DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
326 DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); 325 DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
327 DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); 326 DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
328 #endif 327 #endif
329 #endif 328 #endif
330 DEFINE(CLONE_VM, CLONE_VM); 329 DEFINE(CLONE_VM, CLONE_VM);
331 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); 330 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
332 331
333 #ifndef CONFIG_PPC64 332 #ifndef CONFIG_PPC64
334 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); 333 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
335 #endif /* ! CONFIG_PPC64 */ 334 #endif /* ! CONFIG_PPC64 */
336 335
337 /* About the CPU features table */ 336 /* About the CPU features table */
338 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 337 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
339 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 338 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
340 DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); 339 DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
341 340
342 DEFINE(pbe_address, offsetof(struct pbe, address)); 341 DEFINE(pbe_address, offsetof(struct pbe, address));
343 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 342 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
344 DEFINE(pbe_next, offsetof(struct pbe, next)); 343 DEFINE(pbe_next, offsetof(struct pbe, next));
345 344
346 #ifndef CONFIG_PPC64 345 #ifndef CONFIG_PPC64
347 DEFINE(TASK_SIZE, TASK_SIZE); 346 DEFINE(TASK_SIZE, TASK_SIZE);
348 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); 347 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
349 #endif /* ! CONFIG_PPC64 */ 348 #endif /* ! CONFIG_PPC64 */
350 349
351 /* datapage offsets for use by vdso */ 350 /* datapage offsets for use by vdso */
352 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); 351 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
353 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); 352 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
354 DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); 353 DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
355 DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec)); 354 DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
356 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); 355 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
357 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); 356 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
358 DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); 357 DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
359 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); 358 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
360 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 359 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
361 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 360 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
362 DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); 361 DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
363 DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); 362 DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
364 DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); 363 DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
365 DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); 364 DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
366 DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); 365 DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size));
367 #ifdef CONFIG_PPC64 366 #ifdef CONFIG_PPC64
368 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); 367 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64));
369 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); 368 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
370 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); 369 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
371 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); 370 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
372 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); 371 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
373 DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); 372 DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec));
374 DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); 373 DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec));
375 DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); 374 DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec));
376 DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); 375 DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
377 #else 376 #else
378 DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); 377 DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
379 DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); 378 DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
380 DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); 379 DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec));
381 DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); 380 DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
382 #endif 381 #endif
383 /* timeval/timezone offsets for use by vdso */ 382 /* timeval/timezone offsets for use by vdso */
384 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); 383 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
385 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); 384 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
386 385
387 /* Other bits used by the vdso */ 386 /* Other bits used by the vdso */
388 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 387 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
389 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 388 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
390 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); 389 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
391 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 390 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
392 391
393 #ifdef CONFIG_BUG 392 #ifdef CONFIG_BUG
394 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); 393 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
395 #endif 394 #endif
396 395
397 #ifdef CONFIG_PPC_ISERIES 396 #ifdef CONFIG_PPC_ISERIES
398 /* the assembler miscalculates the VSID values */ 397 /* the assembler miscalculates the VSID values */
399 DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET)); 398 DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET));
400 DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET)); 399 DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET));
401 DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START)); 400 DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START));
402 DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START)); 401 DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START));
403 402
404 /* alpaca */ 403 /* alpaca */
405 DEFINE(ALPACA_SIZE, sizeof(struct alpaca)); 404 DEFINE(ALPACA_SIZE, sizeof(struct alpaca));
406 #endif 405 #endif
407 406
408 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); 407 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
409 DEFINE(PTE_SIZE, sizeof(pte_t)); 408 DEFINE(PTE_SIZE, sizeof(pte_t));
410 409
411 #ifdef CONFIG_KVM 410 #ifdef CONFIG_KVM
412 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 411 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
413 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 412 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
414 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 413 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
415 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 414 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
416 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 415 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
417 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 416 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
418 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); 417 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
419 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 418 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
420 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 419 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
421 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 420 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
422 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 421 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
423 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 422 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
424 423
425 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 424 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
426 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 425 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
427 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 426 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
428 427
429 /* book3s_64 */ 428 /* book3s_64 */
430 #ifdef CONFIG_PPC64 429 #ifdef CONFIG_PPC64
431 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); 430 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
432 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 431 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
433 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); 432 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
434 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 433 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
435 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 434 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
436 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); 435 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
437 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 436 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
438 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 437 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
439 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 438 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
440 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 439 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
441 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 440 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
442 #else 441 #else
443 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 442 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
444 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 443 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
445 #endif /* CONFIG_PPC64 */ 444 #endif /* CONFIG_PPC64 */
446 #endif 445 #endif
447 #ifdef CONFIG_44x 446 #ifdef CONFIG_44x
448 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 447 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
449 DEFINE(PTE_T_LOG2, PTE_T_LOG2); 448 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
450 #endif 449 #endif
451 450
452 #ifdef CONFIG_KVM_EXIT_TIMING 451 #ifdef CONFIG_KVM_EXIT_TIMING
453 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 452 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
454 arch.timing_exit.tv32.tbu)); 453 arch.timing_exit.tv32.tbu));
455 DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, 454 DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
456 arch.timing_exit.tv32.tbl)); 455 arch.timing_exit.tv32.tbl));
457 DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, 456 DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
458 arch.timing_last_enter.tv32.tbu)); 457 arch.timing_last_enter.tv32.tbu));
459 DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, 458 DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
460 arch.timing_last_enter.tv32.tbl)); 459 arch.timing_last_enter.tv32.tbl));
461 #endif 460 #endif
462 461
463 return 0; 462 return 0;
464 } 463 }
465 464
arch/powerpc/kernel/dma-swiotlb.c
1 /* 1 /*
2 * Contains routines needed to support swiotlb for ppc. 2 * Contains routines needed to support swiotlb for ppc.
3 * 3 *
4 * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor 4 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
5 * Author: Becky Bruce
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your 9 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. 10 * option) any later version.
10 * 11 *
11 */ 12 */
12 13
13 #include <linux/dma-mapping.h> 14 #include <linux/dma-mapping.h>
14 #include <linux/pfn.h> 15 #include <linux/pfn.h>
15 #include <linux/of_platform.h> 16 #include <linux/of_platform.h>
16 #include <linux/platform_device.h> 17 #include <linux/platform_device.h>
17 #include <linux/pci.h> 18 #include <linux/pci.h>
18 19
19 #include <asm/machdep.h> 20 #include <asm/machdep.h>
20 #include <asm/swiotlb.h> 21 #include <asm/swiotlb.h>
21 #include <asm/dma.h> 22 #include <asm/dma.h>
22 #include <asm/abs_addr.h> 23 #include <asm/abs_addr.h>
23 24
24 unsigned int ppc_swiotlb_enable; 25 unsigned int ppc_swiotlb_enable;
25 26
26 /* 27 /*
27 * At the moment, all platforms that use this code only require 28 * At the moment, all platforms that use this code only require
28 * swiotlb to be used if we're operating on HIGHMEM. Since 29 * swiotlb to be used if we're operating on HIGHMEM. Since
29 * we don't ever call anything other than map_sg, unmap_sg, 30 * we don't ever call anything other than map_sg, unmap_sg,
30 * map_page, and unmap_page on highmem, use normal dma_ops 31 * map_page, and unmap_page on highmem, use normal dma_ops
31 * for everything else. 32 * for everything else.
32 */ 33 */
33 struct dma_map_ops swiotlb_dma_ops = { 34 struct dma_map_ops swiotlb_dma_ops = {
34 .alloc_coherent = dma_direct_alloc_coherent, 35 .alloc_coherent = dma_direct_alloc_coherent,
35 .free_coherent = dma_direct_free_coherent, 36 .free_coherent = dma_direct_free_coherent,
36 .map_sg = swiotlb_map_sg_attrs, 37 .map_sg = swiotlb_map_sg_attrs,
37 .unmap_sg = swiotlb_unmap_sg_attrs, 38 .unmap_sg = swiotlb_unmap_sg_attrs,
38 .dma_supported = swiotlb_dma_supported, 39 .dma_supported = swiotlb_dma_supported,
39 .map_page = swiotlb_map_page, 40 .map_page = swiotlb_map_page,
40 .unmap_page = swiotlb_unmap_page, 41 .unmap_page = swiotlb_unmap_page,
41 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 42 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
42 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 43 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
43 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
44 .sync_sg_for_device = swiotlb_sync_sg_for_device, 45 .sync_sg_for_device = swiotlb_sync_sg_for_device,
45 .mapping_error = swiotlb_dma_mapping_error, 46 .mapping_error = swiotlb_dma_mapping_error,
46 }; 47 };
47 48
48 void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) 49 void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
49 { 50 {
50 struct pci_controller *hose; 51 struct pci_controller *hose;
51 struct dev_archdata *sd; 52 struct dev_archdata *sd;
52 53
53 hose = pci_bus_to_host(pdev->bus); 54 hose = pci_bus_to_host(pdev->bus);
54 sd = &pdev->dev.archdata; 55 sd = &pdev->dev.archdata;
55 sd->max_direct_dma_addr = 56 sd->max_direct_dma_addr =
56 hose->dma_window_base_cur + hose->dma_window_size; 57 hose->dma_window_base_cur + hose->dma_window_size;
57 } 58 }
58 59
59 static int ppc_swiotlb_bus_notify(struct notifier_block *nb, 60 static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
60 unsigned long action, void *data) 61 unsigned long action, void *data)
61 { 62 {
62 struct device *dev = data; 63 struct device *dev = data;
63 struct dev_archdata *sd; 64 struct dev_archdata *sd;
64 65
65 /* We are only intereted in device addition */ 66 /* We are only intereted in device addition */
66 if (action != BUS_NOTIFY_ADD_DEVICE) 67 if (action != BUS_NOTIFY_ADD_DEVICE)
67 return 0; 68 return 0;
68 69
69 sd = &dev->archdata; 70 sd = &dev->archdata;
70 sd->max_direct_dma_addr = 0; 71 sd->max_direct_dma_addr = 0;
71 72
72 /* May need to bounce if the device can't address all of DRAM */ 73 /* May need to bounce if the device can't address all of DRAM */
73 if (dma_get_mask(dev) < lmb_end_of_DRAM()) 74 if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
74 set_dma_ops(dev, &swiotlb_dma_ops); 75 set_dma_ops(dev, &swiotlb_dma_ops);
75 76
76 return NOTIFY_DONE; 77 return NOTIFY_DONE;
77 } 78 }
78 79
79 static struct notifier_block ppc_swiotlb_plat_bus_notifier = { 80 static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
80 .notifier_call = ppc_swiotlb_bus_notify, 81 .notifier_call = ppc_swiotlb_bus_notify,
81 .priority = 0, 82 .priority = 0,
82 }; 83 };
83 84
84 static struct notifier_block ppc_swiotlb_of_bus_notifier = { 85 static struct notifier_block ppc_swiotlb_of_bus_notifier = {
85 .notifier_call = ppc_swiotlb_bus_notify, 86 .notifier_call = ppc_swiotlb_bus_notify,
86 .priority = 0, 87 .priority = 0,
87 }; 88 };
88 89
89 int __init swiotlb_setup_bus_notifier(void) 90 int __init swiotlb_setup_bus_notifier(void)
90 { 91 {
91 bus_register_notifier(&platform_bus_type, 92 bus_register_notifier(&platform_bus_type,
92 &ppc_swiotlb_plat_bus_notifier); 93 &ppc_swiotlb_plat_bus_notifier);
93 bus_register_notifier(&of_platform_bus_type, 94 bus_register_notifier(&of_platform_bus_type,
94 &ppc_swiotlb_of_bus_notifier); 95 &ppc_swiotlb_of_bus_notifier);
95 96
96 return 0; 97 return 0;
97 } 98 }
98 99
arch/powerpc/kernel/entry_64.S
1 /* 1 /*
2 * PowerPC version 2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras. 6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support 7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras. 8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras. 9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 * 11 *
12 * This file contains the system call entry code, context switch 12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC. 13 * code, and exception/interrupt return code for PowerPC.
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21 #include <linux/errno.h> 21 #include <linux/errno.h>
22 #include <asm/unistd.h> 22 #include <asm/unistd.h>
23 #include <asm/processor.h> 23 #include <asm/processor.h>
24 #include <asm/page.h> 24 #include <asm/page.h>
25 #include <asm/mmu.h> 25 #include <asm/mmu.h>
26 #include <asm/thread_info.h> 26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h> 27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h> 28 #include <asm/asm-offsets.h>
29 #include <asm/cputable.h> 29 #include <asm/cputable.h>
30 #include <asm/firmware.h> 30 #include <asm/firmware.h>
31 #include <asm/bug.h> 31 #include <asm/bug.h>
32 #include <asm/ptrace.h> 32 #include <asm/ptrace.h>
33 #include <asm/irqflags.h> 33 #include <asm/irqflags.h>
34 #include <asm/ftrace.h> 34 #include <asm/ftrace.h>
35 35
36 /* 36 /*
37 * System calls. 37 * System calls.
38 */ 38 */
39 .section ".toc","aw" 39 .section ".toc","aw"
40 .SYS_CALL_TABLE: 40 .SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table 41 .tc .sys_call_table[TC],.sys_call_table
42 42
43 /* This value is used to mark exception frames on the stack. */ 43 /* This value is used to mark exception frames on the stack. */
44 exception_marker: 44 exception_marker:
45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER 45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
46 46
47 .section ".text" 47 .section ".text"
48 .align 7 48 .align 7
49 49
50 #undef SHOW_SYSCALLS 50 #undef SHOW_SYSCALLS
51 51
52 .globl system_call_common 52 .globl system_call_common
53 system_call_common: 53 system_call_common:
54 andi. r10,r12,MSR_PR 54 andi. r10,r12,MSR_PR
55 mr r10,r1 55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE 56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f 57 beq- 1f
58 ld r1,PACAKSAVE(r13) 58 ld r1,PACAKSAVE(r13)
59 1: std r10,0(r1) 59 1: std r10,0(r1)
60 std r11,_NIP(r1) 60 std r11,_NIP(r1)
61 std r12,_MSR(r1) 61 std r12,_MSR(r1)
62 std r0,GPR0(r1) 62 std r0,GPR0(r1)
63 std r10,GPR1(r1) 63 std r10,GPR1(r1)
64 ACCOUNT_CPU_USER_ENTRY(r10, r11) 64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
65 /* 65 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on 66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction 67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set, 68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail. 69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */ 70 */
71 crclr so 71 crclr so
72 std r2,GPR2(r1) 72 std r2,GPR2(r1)
73 std r3,GPR3(r1) 73 std r3,GPR3(r1)
74 std r4,GPR4(r1) 74 std r4,GPR4(r1)
75 std r5,GPR5(r1) 75 std r5,GPR5(r1)
76 std r6,GPR6(r1) 76 std r6,GPR6(r1)
77 std r7,GPR7(r1) 77 std r7,GPR7(r1)
78 std r8,GPR8(r1) 78 std r8,GPR8(r1)
79 li r11,0 79 li r11,0
80 std r11,GPR9(r1) 80 std r11,GPR9(r1)
81 std r11,GPR10(r1) 81 std r11,GPR10(r1)
82 std r11,GPR11(r1) 82 std r11,GPR11(r1)
83 std r11,GPR12(r1) 83 std r11,GPR12(r1)
84 std r9,GPR13(r1) 84 std r9,GPR13(r1)
85 mfcr r9 85 mfcr r9
86 mflr r10 86 mflr r10
87 li r11,0xc01 87 li r11,0xc01
88 std r9,_CCR(r1) 88 std r9,_CCR(r1)
89 std r10,_LINK(r1) 89 std r10,_LINK(r1)
90 std r11,_TRAP(r1) 90 std r11,_TRAP(r1)
91 mfxer r9 91 mfxer r9
92 mfctr r10 92 mfctr r10
93 std r9,_XER(r1) 93 std r9,_XER(r1)
94 std r10,_CTR(r1) 94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1) 95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13) 96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD 97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2) 98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */ 99 std r11,-16(r9) /* "regshere" marker */
100 #ifdef CONFIG_TRACE_IRQFLAGS 100 #ifdef CONFIG_TRACE_IRQFLAGS
101 bl .trace_hardirqs_on 101 bl .trace_hardirqs_on
102 REST_GPR(0,r1) 102 REST_GPR(0,r1)
103 REST_4GPRS(3,r1) 103 REST_4GPRS(3,r1)
104 REST_2GPRS(7,r1) 104 REST_2GPRS(7,r1)
105 addi r9,r1,STACK_FRAME_OVERHEAD 105 addi r9,r1,STACK_FRAME_OVERHEAD
106 ld r12,_MSR(r1) 106 ld r12,_MSR(r1)
107 #endif /* CONFIG_TRACE_IRQFLAGS */ 107 #endif /* CONFIG_TRACE_IRQFLAGS */
108 li r10,1 108 li r10,1
109 stb r10,PACASOFTIRQEN(r13) 109 stb r10,PACASOFTIRQEN(r13)
110 stb r10,PACAHARDIRQEN(r13) 110 stb r10,PACAHARDIRQEN(r13)
111 std r10,SOFTE(r1) 111 std r10,SOFTE(r1)
112 #ifdef CONFIG_PPC_ISERIES 112 #ifdef CONFIG_PPC_ISERIES
113 BEGIN_FW_FTR_SECTION 113 BEGIN_FW_FTR_SECTION
114 /* Hack for handling interrupts when soft-enabling on iSeries */ 114 /* Hack for handling interrupts when soft-enabling on iSeries */
115 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ 115 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
116 andi. r10,r12,MSR_PR /* from kernel */ 116 andi. r10,r12,MSR_PR /* from kernel */
117 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq 117 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
118 bne 2f 118 bne 2f
119 b hardware_interrupt_entry 119 b hardware_interrupt_entry
120 2: 120 2:
121 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 121 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
122 #endif /* CONFIG_PPC_ISERIES */ 122 #endif /* CONFIG_PPC_ISERIES */
123 123
124 /* Hard enable interrupts */ 124 /* Hard enable interrupts */
125 #ifdef CONFIG_PPC_BOOK3E 125 #ifdef CONFIG_PPC_BOOK3E
126 wrteei 1 126 wrteei 1
127 #else 127 #else
128 mfmsr r11 128 mfmsr r11
129 ori r11,r11,MSR_EE 129 ori r11,r11,MSR_EE
130 mtmsrd r11,1 130 mtmsrd r11,1
131 #endif /* CONFIG_PPC_BOOK3E */ 131 #endif /* CONFIG_PPC_BOOK3E */
132 132
133 #ifdef SHOW_SYSCALLS 133 #ifdef SHOW_SYSCALLS
134 bl .do_show_syscall 134 bl .do_show_syscall
135 REST_GPR(0,r1) 135 REST_GPR(0,r1)
136 REST_4GPRS(3,r1) 136 REST_4GPRS(3,r1)
137 REST_2GPRS(7,r1) 137 REST_2GPRS(7,r1)
138 addi r9,r1,STACK_FRAME_OVERHEAD 138 addi r9,r1,STACK_FRAME_OVERHEAD
139 #endif 139 #endif
140 clrrdi r11,r1,THREAD_SHIFT 140 clrrdi r11,r1,THREAD_SHIFT
141 ld r10,TI_FLAGS(r11) 141 ld r10,TI_FLAGS(r11)
142 andi. r11,r10,_TIF_SYSCALL_T_OR_A 142 andi. r11,r10,_TIF_SYSCALL_T_OR_A
143 bne- syscall_dotrace 143 bne- syscall_dotrace
144 syscall_dotrace_cont: 144 syscall_dotrace_cont:
145 cmpldi 0,r0,NR_syscalls 145 cmpldi 0,r0,NR_syscalls
146 bge- syscall_enosys 146 bge- syscall_enosys
147 147
148 system_call: /* label this so stack traces look sane */ 148 system_call: /* label this so stack traces look sane */
149 /* 149 /*
150 * Need to vector to 32 Bit or default sys_call_table here, 150 * Need to vector to 32 Bit or default sys_call_table here,
151 * based on caller's run-mode / personality. 151 * based on caller's run-mode / personality.
152 */ 152 */
153 ld r11,.SYS_CALL_TABLE@toc(2) 153 ld r11,.SYS_CALL_TABLE@toc(2)
154 andi. r10,r10,_TIF_32BIT 154 andi. r10,r10,_TIF_32BIT
155 beq 15f 155 beq 15f
156 addi r11,r11,8 /* use 32-bit syscall entries */ 156 addi r11,r11,8 /* use 32-bit syscall entries */
157 clrldi r3,r3,32 157 clrldi r3,r3,32
158 clrldi r4,r4,32 158 clrldi r4,r4,32
159 clrldi r5,r5,32 159 clrldi r5,r5,32
160 clrldi r6,r6,32 160 clrldi r6,r6,32
161 clrldi r7,r7,32 161 clrldi r7,r7,32
162 clrldi r8,r8,32 162 clrldi r8,r8,32
163 15: 163 15:
164 slwi r0,r0,4 164 slwi r0,r0,4
165 ldx r10,r11,r0 /* Fetch system call handler [ptr] */ 165 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
166 mtctr r10 166 mtctr r10
167 bctrl /* Call handler */ 167 bctrl /* Call handler */
168 168
169 syscall_exit: 169 syscall_exit:
170 std r3,RESULT(r1) 170 std r3,RESULT(r1)
171 #ifdef SHOW_SYSCALLS 171 #ifdef SHOW_SYSCALLS
172 bl .do_show_syscall_exit 172 bl .do_show_syscall_exit
173 ld r3,RESULT(r1) 173 ld r3,RESULT(r1)
174 #endif 174 #endif
175 clrrdi r12,r1,THREAD_SHIFT 175 clrrdi r12,r1,THREAD_SHIFT
176 176
177 ld r8,_MSR(r1) 177 ld r8,_MSR(r1)
178 #ifdef CONFIG_PPC_BOOK3S 178 #ifdef CONFIG_PPC_BOOK3S
179 /* No MSR:RI on BookE */ 179 /* No MSR:RI on BookE */
180 andi. r10,r8,MSR_RI 180 andi. r10,r8,MSR_RI
181 beq- unrecov_restore 181 beq- unrecov_restore
182 #endif 182 #endif
183 183
184 /* Disable interrupts so current_thread_info()->flags can't change, 184 /* Disable interrupts so current_thread_info()->flags can't change,
185 * and so that we don't get interrupted after loading SRR0/1. 185 * and so that we don't get interrupted after loading SRR0/1.
186 */ 186 */
187 #ifdef CONFIG_PPC_BOOK3E 187 #ifdef CONFIG_PPC_BOOK3E
188 wrteei 0 188 wrteei 0
189 #else 189 #else
190 mfmsr r10 190 mfmsr r10
191 rldicl r10,r10,48,1 191 rldicl r10,r10,48,1
192 rotldi r10,r10,16 192 rotldi r10,r10,16
193 mtmsrd r10,1 193 mtmsrd r10,1
194 #endif /* CONFIG_PPC_BOOK3E */ 194 #endif /* CONFIG_PPC_BOOK3E */
195 195
196 ld r9,TI_FLAGS(r12) 196 ld r9,TI_FLAGS(r12)
197 li r11,-_LAST_ERRNO 197 li r11,-_LAST_ERRNO
198 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) 198 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
199 bne- syscall_exit_work 199 bne- syscall_exit_work
200 cmpld r3,r11 200 cmpld r3,r11
201 ld r5,_CCR(r1) 201 ld r5,_CCR(r1)
202 bge- syscall_error 202 bge- syscall_error
203 syscall_error_cont: 203 syscall_error_cont:
204 ld r7,_NIP(r1) 204 ld r7,_NIP(r1)
205 stdcx. r0,0,r1 /* to clear the reservation */ 205 stdcx. r0,0,r1 /* to clear the reservation */
206 andi. r6,r8,MSR_PR 206 andi. r6,r8,MSR_PR
207 ld r4,_LINK(r1) 207 ld r4,_LINK(r1)
208 /* 208 /*
209 * Clear RI before restoring r13. If we are returning to 209 * Clear RI before restoring r13. If we are returning to
210 * userspace and we take an exception after restoring r13, 210 * userspace and we take an exception after restoring r13,
211 * we end up corrupting the userspace r13 value. 211 * we end up corrupting the userspace r13 value.
212 */ 212 */
213 #ifdef CONFIG_PPC_BOOK3S 213 #ifdef CONFIG_PPC_BOOK3S
214 /* No MSR:RI on BookE */ 214 /* No MSR:RI on BookE */
215 li r12,MSR_RI 215 li r12,MSR_RI
216 andc r11,r10,r12 216 andc r11,r10,r12
217 mtmsrd r11,1 /* clear MSR.RI */ 217 mtmsrd r11,1 /* clear MSR.RI */
218 #endif /* CONFIG_PPC_BOOK3S */ 218 #endif /* CONFIG_PPC_BOOK3S */
219 219
220 beq- 1f 220 beq- 1f
221 ACCOUNT_CPU_USER_EXIT(r11, r12) 221 ACCOUNT_CPU_USER_EXIT(r11, r12)
222 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 222 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
223 1: ld r2,GPR2(r1) 223 1: ld r2,GPR2(r1)
224 ld r1,GPR1(r1) 224 ld r1,GPR1(r1)
225 mtlr r4 225 mtlr r4
226 mtcr r5 226 mtcr r5
227 mtspr SPRN_SRR0,r7 227 mtspr SPRN_SRR0,r7
228 mtspr SPRN_SRR1,r8 228 mtspr SPRN_SRR1,r8
229 RFI 229 RFI
230 b . /* prevent speculative execution */ 230 b . /* prevent speculative execution */
231 231
232 syscall_error: 232 syscall_error:
233 oris r5,r5,0x1000 /* Set SO bit in CR */ 233 oris r5,r5,0x1000 /* Set SO bit in CR */
234 neg r3,r3 234 neg r3,r3
235 std r5,_CCR(r1) 235 std r5,_CCR(r1)
236 b syscall_error_cont 236 b syscall_error_cont
237 237
238 /* Traced system call support */ 238 /* Traced system call support */
239 syscall_dotrace: 239 syscall_dotrace:
240 bl .save_nvgprs 240 bl .save_nvgprs
241 addi r3,r1,STACK_FRAME_OVERHEAD 241 addi r3,r1,STACK_FRAME_OVERHEAD
242 bl .do_syscall_trace_enter 242 bl .do_syscall_trace_enter
243 /* 243 /*
244 * Restore argument registers possibly just changed. 244 * Restore argument registers possibly just changed.
245 * We use the return value of do_syscall_trace_enter 245 * We use the return value of do_syscall_trace_enter
246 * for the call number to look up in the table (r0). 246 * for the call number to look up in the table (r0).
247 */ 247 */
248 mr r0,r3 248 mr r0,r3
249 ld r3,GPR3(r1) 249 ld r3,GPR3(r1)
250 ld r4,GPR4(r1) 250 ld r4,GPR4(r1)
251 ld r5,GPR5(r1) 251 ld r5,GPR5(r1)
252 ld r6,GPR6(r1) 252 ld r6,GPR6(r1)
253 ld r7,GPR7(r1) 253 ld r7,GPR7(r1)
254 ld r8,GPR8(r1) 254 ld r8,GPR8(r1)
255 addi r9,r1,STACK_FRAME_OVERHEAD 255 addi r9,r1,STACK_FRAME_OVERHEAD
256 clrrdi r10,r1,THREAD_SHIFT 256 clrrdi r10,r1,THREAD_SHIFT
257 ld r10,TI_FLAGS(r10) 257 ld r10,TI_FLAGS(r10)
258 b syscall_dotrace_cont 258 b syscall_dotrace_cont
259 259
260 syscall_enosys: 260 syscall_enosys:
261 li r3,-ENOSYS 261 li r3,-ENOSYS
262 b syscall_exit 262 b syscall_exit
263 263
264 syscall_exit_work: 264 syscall_exit_work:
265 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. 265 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
266 If TIF_NOERROR is set, just save r3 as it is. */ 266 If TIF_NOERROR is set, just save r3 as it is. */
267 267
268 andi. r0,r9,_TIF_RESTOREALL 268 andi. r0,r9,_TIF_RESTOREALL
269 beq+ 0f 269 beq+ 0f
270 REST_NVGPRS(r1) 270 REST_NVGPRS(r1)
271 b 2f 271 b 2f
272 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ 272 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
273 blt+ 1f 273 blt+ 1f
274 andi. r0,r9,_TIF_NOERROR 274 andi. r0,r9,_TIF_NOERROR
275 bne- 1f 275 bne- 1f
276 ld r5,_CCR(r1) 276 ld r5,_CCR(r1)
277 neg r3,r3 277 neg r3,r3
278 oris r5,r5,0x1000 /* Set SO bit in CR */ 278 oris r5,r5,0x1000 /* Set SO bit in CR */
279 std r5,_CCR(r1) 279 std r5,_CCR(r1)
280 1: std r3,GPR3(r1) 280 1: std r3,GPR3(r1)
281 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) 281 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
282 beq 4f 282 beq 4f
283 283
284 /* Clear per-syscall TIF flags if any are set. */ 284 /* Clear per-syscall TIF flags if any are set. */
285 285
286 li r11,_TIF_PERSYSCALL_MASK 286 li r11,_TIF_PERSYSCALL_MASK
287 addi r12,r12,TI_FLAGS 287 addi r12,r12,TI_FLAGS
288 3: ldarx r10,0,r12 288 3: ldarx r10,0,r12
289 andc r10,r10,r11 289 andc r10,r10,r11
290 stdcx. r10,0,r12 290 stdcx. r10,0,r12
291 bne- 3b 291 bne- 3b
292 subi r12,r12,TI_FLAGS 292 subi r12,r12,TI_FLAGS
293 293
294 4: /* Anything else left to do? */ 294 4: /* Anything else left to do? */
295 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 295 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
296 beq .ret_from_except_lite 296 beq .ret_from_except_lite
297 297
298 /* Re-enable interrupts */ 298 /* Re-enable interrupts */
299 #ifdef CONFIG_PPC_BOOK3E 299 #ifdef CONFIG_PPC_BOOK3E
300 wrteei 1 300 wrteei 1
301 #else 301 #else
302 mfmsr r10 302 mfmsr r10
303 ori r10,r10,MSR_EE 303 ori r10,r10,MSR_EE
304 mtmsrd r10,1 304 mtmsrd r10,1
305 #endif /* CONFIG_PPC_BOOK3E */ 305 #endif /* CONFIG_PPC_BOOK3E */
306 306
307 bl .save_nvgprs 307 bl .save_nvgprs
308 addi r3,r1,STACK_FRAME_OVERHEAD 308 addi r3,r1,STACK_FRAME_OVERHEAD
309 bl .do_syscall_trace_leave 309 bl .do_syscall_trace_leave
310 b .ret_from_except 310 b .ret_from_except
311 311
312 /* Save non-volatile GPRs, if not already saved. */ 312 /* Save non-volatile GPRs, if not already saved. */
313 _GLOBAL(save_nvgprs) 313 _GLOBAL(save_nvgprs)
314 ld r11,_TRAP(r1) 314 ld r11,_TRAP(r1)
315 andi. r0,r11,1 315 andi. r0,r11,1
316 beqlr- 316 beqlr-
317 SAVE_NVGPRS(r1) 317 SAVE_NVGPRS(r1)
318 clrrdi r0,r11,1 318 clrrdi r0,r11,1
319 std r0,_TRAP(r1) 319 std r0,_TRAP(r1)
320 blr 320 blr
321 321
322 322
323 /* 323 /*
324 * The sigsuspend and rt_sigsuspend system calls can call do_signal 324 * The sigsuspend and rt_sigsuspend system calls can call do_signal
325 * and thus put the process into the stopped state where we might 325 * and thus put the process into the stopped state where we might
326 * want to examine its user state with ptrace. Therefore we need 326 * want to examine its user state with ptrace. Therefore we need
327 * to save all the nonvolatile registers (r14 - r31) before calling 327 * to save all the nonvolatile registers (r14 - r31) before calling
328 * the C code. Similarly, fork, vfork and clone need the full 328 * the C code. Similarly, fork, vfork and clone need the full
329 * register state on the stack so that it can be copied to the child. 329 * register state on the stack so that it can be copied to the child.
330 */ 330 */
331 331
332 _GLOBAL(ppc_fork) 332 _GLOBAL(ppc_fork)
333 bl .save_nvgprs 333 bl .save_nvgprs
334 bl .sys_fork 334 bl .sys_fork
335 b syscall_exit 335 b syscall_exit
336 336
337 _GLOBAL(ppc_vfork) 337 _GLOBAL(ppc_vfork)
338 bl .save_nvgprs 338 bl .save_nvgprs
339 bl .sys_vfork 339 bl .sys_vfork
340 b syscall_exit 340 b syscall_exit
341 341
342 _GLOBAL(ppc_clone) 342 _GLOBAL(ppc_clone)
343 bl .save_nvgprs 343 bl .save_nvgprs
344 bl .sys_clone 344 bl .sys_clone
345 b syscall_exit 345 b syscall_exit
346 346
347 _GLOBAL(ppc32_swapcontext) 347 _GLOBAL(ppc32_swapcontext)
348 bl .save_nvgprs 348 bl .save_nvgprs
349 bl .compat_sys_swapcontext 349 bl .compat_sys_swapcontext
350 b syscall_exit 350 b syscall_exit
351 351
352 _GLOBAL(ppc64_swapcontext) 352 _GLOBAL(ppc64_swapcontext)
353 bl .save_nvgprs 353 bl .save_nvgprs
354 bl .sys_swapcontext 354 bl .sys_swapcontext
355 b syscall_exit 355 b syscall_exit
356 356
357 _GLOBAL(ret_from_fork) 357 _GLOBAL(ret_from_fork)
358 bl .schedule_tail 358 bl .schedule_tail
359 REST_NVGPRS(r1) 359 REST_NVGPRS(r1)
360 li r3,0 360 li r3,0
361 b syscall_exit 361 b syscall_exit
362 362
363 /* 363 /*
364 * This routine switches between two different tasks. The process 364 * This routine switches between two different tasks. The process
365 * state of one is saved on its kernel stack. Then the state 365 * state of one is saved on its kernel stack. Then the state
366 * of the other is restored from its kernel stack. The memory 366 * of the other is restored from its kernel stack. The memory
367 * management hardware is updated to the second process's state. 367 * management hardware is updated to the second process's state.
368 * Finally, we can return to the second process, via ret_from_except. 368 * Finally, we can return to the second process, via ret_from_except.
369 * On entry, r3 points to the THREAD for the current task, r4 369 * On entry, r3 points to the THREAD for the current task, r4
370 * points to the THREAD for the new task. 370 * points to the THREAD for the new task.
371 * 371 *
372 * Note: there are two ways to get to the "going out" portion 372 * Note: there are two ways to get to the "going out" portion
373 * of this code; either by coming in via the entry (_switch) 373 * of this code; either by coming in via the entry (_switch)
374 * or via "fork" which must set up an environment equivalent 374 * or via "fork" which must set up an environment equivalent
375 * to the "_switch" path. If you change this you'll have to change 375 * to the "_switch" path. If you change this you'll have to change
376 * the fork code also. 376 * the fork code also.
377 * 377 *
378 * The code which creates the new task context is in 'copy_thread' 378 * The code which creates the new task context is in 'copy_thread'
379 * in arch/powerpc/kernel/process.c 379 * in arch/powerpc/kernel/process.c
380 */ 380 */
381 .align 7 381 .align 7
382 _GLOBAL(_switch) 382 _GLOBAL(_switch)
383 mflr r0 383 mflr r0
384 std r0,16(r1) 384 std r0,16(r1)
385 stdu r1,-SWITCH_FRAME_SIZE(r1) 385 stdu r1,-SWITCH_FRAME_SIZE(r1)
386 /* r3-r13 are caller saved -- Cort */ 386 /* r3-r13 are caller saved -- Cort */
387 SAVE_8GPRS(14, r1) 387 SAVE_8GPRS(14, r1)
388 SAVE_10GPRS(22, r1) 388 SAVE_10GPRS(22, r1)
389 mflr r20 /* Return to switch caller */ 389 mflr r20 /* Return to switch caller */
390 mfmsr r22 390 mfmsr r22
391 li r0, MSR_FP 391 li r0, MSR_FP
392 #ifdef CONFIG_VSX 392 #ifdef CONFIG_VSX
393 BEGIN_FTR_SECTION 393 BEGIN_FTR_SECTION
394 oris r0,r0,MSR_VSX@h /* Disable VSX */ 394 oris r0,r0,MSR_VSX@h /* Disable VSX */
395 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 395 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
396 #endif /* CONFIG_VSX */ 396 #endif /* CONFIG_VSX */
397 #ifdef CONFIG_ALTIVEC 397 #ifdef CONFIG_ALTIVEC
398 BEGIN_FTR_SECTION 398 BEGIN_FTR_SECTION
399 oris r0,r0,MSR_VEC@h /* Disable altivec */ 399 oris r0,r0,MSR_VEC@h /* Disable altivec */
400 mfspr r24,SPRN_VRSAVE /* save vrsave register value */ 400 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
401 std r24,THREAD_VRSAVE(r3) 401 std r24,THREAD_VRSAVE(r3)
402 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 402 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
403 #endif /* CONFIG_ALTIVEC */ 403 #endif /* CONFIG_ALTIVEC */
404 and. r0,r0,r22 404 and. r0,r0,r22
405 beq+ 1f 405 beq+ 1f
406 andc r22,r22,r0 406 andc r22,r22,r0
407 MTMSRD(r22) 407 MTMSRD(r22)
408 isync 408 isync
409 1: std r20,_NIP(r1) 409 1: std r20,_NIP(r1)
410 mfcr r23 410 mfcr r23
411 std r23,_CCR(r1) 411 std r23,_CCR(r1)
412 std r1,KSP(r3) /* Set old stack pointer */ 412 std r1,KSP(r3) /* Set old stack pointer */
413 413
414 #ifdef CONFIG_SMP 414 #ifdef CONFIG_SMP
415 /* We need a sync somewhere here to make sure that if the 415 /* We need a sync somewhere here to make sure that if the
416 * previous task gets rescheduled on another CPU, it sees all 416 * previous task gets rescheduled on another CPU, it sees all
417 * stores it has performed on this one. 417 * stores it has performed on this one.
418 */ 418 */
419 sync 419 sync
420 #endif /* CONFIG_SMP */ 420 #endif /* CONFIG_SMP */
421 421
422 addi r6,r4,-THREAD /* Convert THREAD to 'current' */ 422 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
423 std r6,PACACURRENT(r13) /* Set new 'current' */ 423 std r6,PACACURRENT(r13) /* Set new 'current' */
424 424
425 ld r8,KSP(r4) /* new stack pointer */ 425 ld r8,KSP(r4) /* new stack pointer */
426 #ifdef CONFIG_PPC_BOOK3S 426 #ifdef CONFIG_PPC_BOOK3S
427 BEGIN_FTR_SECTION 427 BEGIN_FTR_SECTION
428 BEGIN_FTR_SECTION_NESTED(95) 428 BEGIN_FTR_SECTION_NESTED(95)
429 clrrdi r6,r8,28 /* get its ESID */ 429 clrrdi r6,r8,28 /* get its ESID */
430 clrrdi r9,r1,28 /* get current sp ESID */ 430 clrrdi r9,r1,28 /* get current sp ESID */
431 FTR_SECTION_ELSE_NESTED(95) 431 FTR_SECTION_ELSE_NESTED(95)
432 clrrdi r6,r8,40 /* get its 1T ESID */ 432 clrrdi r6,r8,40 /* get its 1T ESID */
433 clrrdi r9,r1,40 /* get current sp 1T ESID */ 433 clrrdi r9,r1,40 /* get current sp 1T ESID */
434 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) 434 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
435 FTR_SECTION_ELSE 435 FTR_SECTION_ELSE
436 b 2f 436 b 2f
437 ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) 437 ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
438 clrldi. r0,r6,2 /* is new ESID c00000000? */ 438 clrldi. r0,r6,2 /* is new ESID c00000000? */
439 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 439 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
440 cror eq,4*cr1+eq,eq 440 cror eq,4*cr1+eq,eq
441 beq 2f /* if yes, don't slbie it */ 441 beq 2f /* if yes, don't slbie it */
442 442
443 /* Bolt in the new stack SLB entry */ 443 /* Bolt in the new stack SLB entry */
444 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 444 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
445 oris r0,r6,(SLB_ESID_V)@h 445 oris r0,r6,(SLB_ESID_V)@h
446 ori r0,r0,(SLB_NUM_BOLTED-1)@l 446 ori r0,r0,(SLB_NUM_BOLTED-1)@l
447 BEGIN_FTR_SECTION 447 BEGIN_FTR_SECTION
448 li r9,MMU_SEGSIZE_1T /* insert B field */ 448 li r9,MMU_SEGSIZE_1T /* insert B field */
449 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h 449 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
450 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 450 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
451 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 451 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
452 452
453 /* Update the last bolted SLB. No write barriers are needed 453 /* Update the last bolted SLB. No write barriers are needed
454 * here, provided we only update the current CPU's SLB shadow 454 * here, provided we only update the current CPU's SLB shadow
455 * buffer. 455 * buffer.
456 */ 456 */
457 ld r9,PACA_SLBSHADOWPTR(r13) 457 ld r9,PACA_SLBSHADOWPTR(r13)
458 li r12,0 458 li r12,0
459 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 459 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
460 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 460 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
461 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 461 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
462 462
463 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when 463 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
464 * we have 1TB segments, the only CPUs known to have the errata 464 * we have 1TB segments, the only CPUs known to have the errata
465 * only support less than 1TB of system memory and we'll never 465 * only support less than 1TB of system memory and we'll never
466 * actually hit this code path. 466 * actually hit this code path.
467 */ 467 */
468 468
469 slbie r6 469 slbie r6
470 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 470 slbie r6 /* Workaround POWER5 < DD2.1 issue */
471 slbmte r7,r0 471 slbmte r7,r0
472 isync 472 isync
473 2: 473 2:
474 #endif /* !CONFIG_PPC_BOOK3S */ 474 #endif /* !CONFIG_PPC_BOOK3S */
475 475
476 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ 476 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
477 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE 477 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
478 because we don't need to leave the 288-byte ABI gap at the 478 because we don't need to leave the 288-byte ABI gap at the
479 top of the kernel stack. */ 479 top of the kernel stack. */
480 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE 480 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
481 481
482 mr r1,r8 /* start using new stack pointer */ 482 mr r1,r8 /* start using new stack pointer */
483 std r7,PACAKSAVE(r13) 483 std r7,PACAKSAVE(r13)
484 484
485 ld r6,_CCR(r1) 485 ld r6,_CCR(r1)
486 mtcrf 0xFF,r6 486 mtcrf 0xFF,r6
487 487
488 #ifdef CONFIG_ALTIVEC 488 #ifdef CONFIG_ALTIVEC
489 BEGIN_FTR_SECTION 489 BEGIN_FTR_SECTION
490 ld r0,THREAD_VRSAVE(r4) 490 ld r0,THREAD_VRSAVE(r4)
491 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ 491 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
492 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 492 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
493 #endif /* CONFIG_ALTIVEC */ 493 #endif /* CONFIG_ALTIVEC */
494 494
495 /* r3-r13 are destroyed -- Cort */ 495 /* r3-r13 are destroyed -- Cort */
496 REST_8GPRS(14, r1) 496 REST_8GPRS(14, r1)
497 REST_10GPRS(22, r1) 497 REST_10GPRS(22, r1)
498 498
499 /* convert old thread to its task_struct for return value */ 499 /* convert old thread to its task_struct for return value */
500 addi r3,r3,-THREAD 500 addi r3,r3,-THREAD
501 ld r7,_NIP(r1) /* Return to _switch caller in new task */ 501 ld r7,_NIP(r1) /* Return to _switch caller in new task */
502 mtlr r7 502 mtlr r7
503 addi r1,r1,SWITCH_FRAME_SIZE 503 addi r1,r1,SWITCH_FRAME_SIZE
504 blr 504 blr
505 505
506 .align 7 506 .align 7
507 _GLOBAL(ret_from_except) 507 _GLOBAL(ret_from_except)
508 ld r11,_TRAP(r1) 508 ld r11,_TRAP(r1)
509 andi. r0,r11,1 509 andi. r0,r11,1
510 bne .ret_from_except_lite 510 bne .ret_from_except_lite
511 REST_NVGPRS(r1) 511 REST_NVGPRS(r1)
512 512
513 _GLOBAL(ret_from_except_lite) 513 _GLOBAL(ret_from_except_lite)
514 /* 514 /*
515 * Disable interrupts so that current_thread_info()->flags 515 * Disable interrupts so that current_thread_info()->flags
516 * can't change between when we test it and when we return 516 * can't change between when we test it and when we return
517 * from the interrupt. 517 * from the interrupt.
518 */ 518 */
519 #ifdef CONFIG_PPC_BOOK3E 519 #ifdef CONFIG_PPC_BOOK3E
520 wrteei 0 520 wrteei 0
521 #else 521 #else
522 mfmsr r10 /* Get current interrupt state */ 522 mfmsr r10 /* Get current interrupt state */
523 rldicl r9,r10,48,1 /* clear MSR_EE */ 523 rldicl r9,r10,48,1 /* clear MSR_EE */
524 rotldi r9,r9,16 524 rotldi r9,r9,16
525 mtmsrd r9,1 /* Update machine state */ 525 mtmsrd r9,1 /* Update machine state */
526 #endif /* CONFIG_PPC_BOOK3E */ 526 #endif /* CONFIG_PPC_BOOK3E */
527 527
528 #ifdef CONFIG_PREEMPT 528 #ifdef CONFIG_PREEMPT
529 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ 529 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
530 li r0,_TIF_NEED_RESCHED /* bits to check */ 530 li r0,_TIF_NEED_RESCHED /* bits to check */
531 ld r3,_MSR(r1) 531 ld r3,_MSR(r1)
532 ld r4,TI_FLAGS(r9) 532 ld r4,TI_FLAGS(r9)
533 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ 533 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
534 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING 534 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
535 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ 535 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
536 bne do_work 536 bne do_work
537 537
538 #else /* !CONFIG_PREEMPT */ 538 #else /* !CONFIG_PREEMPT */
539 ld r3,_MSR(r1) /* Returning to user mode? */ 539 ld r3,_MSR(r1) /* Returning to user mode? */
540 andi. r3,r3,MSR_PR 540 andi. r3,r3,MSR_PR
541 beq restore /* if not, just restore regs and return */ 541 beq restore /* if not, just restore regs and return */
542 542
543 /* Check current_thread_info()->flags */ 543 /* Check current_thread_info()->flags */
544 clrrdi r9,r1,THREAD_SHIFT 544 clrrdi r9,r1,THREAD_SHIFT
545 ld r4,TI_FLAGS(r9) 545 ld r4,TI_FLAGS(r9)
546 andi. r0,r4,_TIF_USER_WORK_MASK 546 andi. r0,r4,_TIF_USER_WORK_MASK
547 bne do_work 547 bne do_work
548 #endif 548 #endif
549 549
550 restore: 550 restore:
551 BEGIN_FW_FTR_SECTION 551 BEGIN_FW_FTR_SECTION
552 ld r5,SOFTE(r1) 552 ld r5,SOFTE(r1)
553 FW_FTR_SECTION_ELSE 553 FW_FTR_SECTION_ELSE
554 b .Liseries_check_pending_irqs 554 b .Liseries_check_pending_irqs
555 ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) 555 ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
556 2: 556 2:
557 TRACE_AND_RESTORE_IRQ(r5); 557 TRACE_AND_RESTORE_IRQ(r5);
558 558
559 #ifdef CONFIG_PERF_EVENTS
560 /* check paca->perf_event_pending if we're enabling ints */
561 lbz r3,PACAPERFPEND(r13)
562 and. r3,r3,r5
563 beq 27f
564 bl .perf_event_do_pending
565 27:
566 #endif /* CONFIG_PERF_EVENTS */
567
568 /* extract EE bit and use it to restore paca->hard_enabled */ 559 /* extract EE bit and use it to restore paca->hard_enabled */
569 ld r3,_MSR(r1) 560 ld r3,_MSR(r1)
570 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ 561 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
571 stb r4,PACAHARDIRQEN(r13) 562 stb r4,PACAHARDIRQEN(r13)
572 563
573 #ifdef CONFIG_PPC_BOOK3E 564 #ifdef CONFIG_PPC_BOOK3E
574 b .exception_return_book3e 565 b .exception_return_book3e
575 #else 566 #else
576 ld r4,_CTR(r1) 567 ld r4,_CTR(r1)
577 ld r0,_LINK(r1) 568 ld r0,_LINK(r1)
578 mtctr r4 569 mtctr r4
579 mtlr r0 570 mtlr r0
580 ld r4,_XER(r1) 571 ld r4,_XER(r1)
581 mtspr SPRN_XER,r4 572 mtspr SPRN_XER,r4
582 573
583 REST_8GPRS(5, r1) 574 REST_8GPRS(5, r1)
584 575
585 andi. r0,r3,MSR_RI 576 andi. r0,r3,MSR_RI
586 beq- unrecov_restore 577 beq- unrecov_restore
587 578
588 stdcx. r0,0,r1 /* to clear the reservation */ 579 stdcx. r0,0,r1 /* to clear the reservation */
589 580
590 /* 581 /*
591 * Clear RI before restoring r13. If we are returning to 582 * Clear RI before restoring r13. If we are returning to
592 * userspace and we take an exception after restoring r13, 583 * userspace and we take an exception after restoring r13,
593 * we end up corrupting the userspace r13 value. 584 * we end up corrupting the userspace r13 value.
594 */ 585 */
595 mfmsr r4 586 mfmsr r4
596 andc r4,r4,r0 /* r0 contains MSR_RI here */ 587 andc r4,r4,r0 /* r0 contains MSR_RI here */
597 mtmsrd r4,1 588 mtmsrd r4,1
598 589
599 /* 590 /*
600 * r13 is our per cpu area, only restore it if we are returning to 591 * r13 is our per cpu area, only restore it if we are returning to
601 * userspace 592 * userspace
602 */ 593 */
603 andi. r0,r3,MSR_PR 594 andi. r0,r3,MSR_PR
604 beq 1f 595 beq 1f
605 ACCOUNT_CPU_USER_EXIT(r2, r4) 596 ACCOUNT_CPU_USER_EXIT(r2, r4)
606 REST_GPR(13, r1) 597 REST_GPR(13, r1)
607 1: 598 1:
608 mtspr SPRN_SRR1,r3 599 mtspr SPRN_SRR1,r3
609 600
610 ld r2,_CCR(r1) 601 ld r2,_CCR(r1)
611 mtcrf 0xFF,r2 602 mtcrf 0xFF,r2
612 ld r2,_NIP(r1) 603 ld r2,_NIP(r1)
613 mtspr SPRN_SRR0,r2 604 mtspr SPRN_SRR0,r2
614 605
615 ld r0,GPR0(r1) 606 ld r0,GPR0(r1)
616 ld r2,GPR2(r1) 607 ld r2,GPR2(r1)
617 ld r3,GPR3(r1) 608 ld r3,GPR3(r1)
618 ld r4,GPR4(r1) 609 ld r4,GPR4(r1)
619 ld r1,GPR1(r1) 610 ld r1,GPR1(r1)
620 611
621 rfid 612 rfid
622 b . /* prevent speculative execution */ 613 b . /* prevent speculative execution */
623 614
624 #endif /* CONFIG_PPC_BOOK3E */ 615 #endif /* CONFIG_PPC_BOOK3E */
625 616
626 .Liseries_check_pending_irqs: 617 .Liseries_check_pending_irqs:
627 #ifdef CONFIG_PPC_ISERIES 618 #ifdef CONFIG_PPC_ISERIES
628 ld r5,SOFTE(r1) 619 ld r5,SOFTE(r1)
629 cmpdi 0,r5,0 620 cmpdi 0,r5,0
630 beq 2b 621 beq 2b
631 /* Check for pending interrupts (iSeries) */ 622 /* Check for pending interrupts (iSeries) */
632 ld r3,PACALPPACAPTR(r13) 623 ld r3,PACALPPACAPTR(r13)
633 ld r3,LPPACAANYINT(r3) 624 ld r3,LPPACAANYINT(r3)
634 cmpdi r3,0 625 cmpdi r3,0
635 beq+ 2b /* skip do_IRQ if no interrupts */ 626 beq+ 2b /* skip do_IRQ if no interrupts */
636 627
637 li r3,0 628 li r3,0
638 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ 629 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
639 #ifdef CONFIG_TRACE_IRQFLAGS 630 #ifdef CONFIG_TRACE_IRQFLAGS
640 bl .trace_hardirqs_off 631 bl .trace_hardirqs_off
641 mfmsr r10 632 mfmsr r10
642 #endif 633 #endif
643 ori r10,r10,MSR_EE 634 ori r10,r10,MSR_EE
644 mtmsrd r10 /* hard-enable again */ 635 mtmsrd r10 /* hard-enable again */
645 addi r3,r1,STACK_FRAME_OVERHEAD 636 addi r3,r1,STACK_FRAME_OVERHEAD
646 bl .do_IRQ 637 bl .do_IRQ
647 b .ret_from_except_lite /* loop back and handle more */ 638 b .ret_from_except_lite /* loop back and handle more */
648 #endif 639 #endif
649 640
650 do_work: 641 do_work:
651 #ifdef CONFIG_PREEMPT 642 #ifdef CONFIG_PREEMPT
652 andi. r0,r3,MSR_PR /* Returning to user mode? */ 643 andi. r0,r3,MSR_PR /* Returning to user mode? */
653 bne user_work 644 bne user_work
654 /* Check that preempt_count() == 0 and interrupts are enabled */ 645 /* Check that preempt_count() == 0 and interrupts are enabled */
655 lwz r8,TI_PREEMPT(r9) 646 lwz r8,TI_PREEMPT(r9)
656 cmpwi cr1,r8,0 647 cmpwi cr1,r8,0
657 ld r0,SOFTE(r1) 648 ld r0,SOFTE(r1)
658 cmpdi r0,0 649 cmpdi r0,0
659 crandc eq,cr1*4+eq,eq 650 crandc eq,cr1*4+eq,eq
660 bne restore 651 bne restore
661 652
662 /* Here we are preempting the current task. 653 /* Here we are preempting the current task.
663 * 654 *
664 * Ensure interrupts are soft-disabled. We also properly mark 655 * Ensure interrupts are soft-disabled. We also properly mark
665 * the PACA to reflect the fact that they are hard-disabled 656 * the PACA to reflect the fact that they are hard-disabled
666 * and trace the change 657 * and trace the change
667 */ 658 */
668 li r0,0 659 li r0,0
669 stb r0,PACASOFTIRQEN(r13) 660 stb r0,PACASOFTIRQEN(r13)
670 stb r0,PACAHARDIRQEN(r13) 661 stb r0,PACAHARDIRQEN(r13)
671 TRACE_DISABLE_INTS 662 TRACE_DISABLE_INTS
672 663
673 /* Call the scheduler with soft IRQs off */ 664 /* Call the scheduler with soft IRQs off */
674 1: bl .preempt_schedule_irq 665 1: bl .preempt_schedule_irq
675 666
676 /* Hard-disable interrupts again (and update PACA) */ 667 /* Hard-disable interrupts again (and update PACA) */
677 #ifdef CONFIG_PPC_BOOK3E 668 #ifdef CONFIG_PPC_BOOK3E
678 wrteei 0 669 wrteei 0
679 #else 670 #else
680 mfmsr r10 671 mfmsr r10
681 rldicl r10,r10,48,1 672 rldicl r10,r10,48,1
682 rotldi r10,r10,16 673 rotldi r10,r10,16
683 mtmsrd r10,1 674 mtmsrd r10,1
684 #endif /* CONFIG_PPC_BOOK3E */ 675 #endif /* CONFIG_PPC_BOOK3E */
685 li r0,0 676 li r0,0
686 stb r0,PACAHARDIRQEN(r13) 677 stb r0,PACAHARDIRQEN(r13)
687 678
688 /* Re-test flags and eventually loop */ 679 /* Re-test flags and eventually loop */
689 clrrdi r9,r1,THREAD_SHIFT 680 clrrdi r9,r1,THREAD_SHIFT
690 ld r4,TI_FLAGS(r9) 681 ld r4,TI_FLAGS(r9)
691 andi. r0,r4,_TIF_NEED_RESCHED 682 andi. r0,r4,_TIF_NEED_RESCHED
692 bne 1b 683 bne 1b
693 b restore 684 b restore
694 685
695 user_work: 686 user_work:
696 #endif /* CONFIG_PREEMPT */ 687 #endif /* CONFIG_PREEMPT */
697 688
698 /* Enable interrupts */ 689 /* Enable interrupts */
699 #ifdef CONFIG_PPC_BOOK3E 690 #ifdef CONFIG_PPC_BOOK3E
700 wrteei 1 691 wrteei 1
701 #else 692 #else
702 ori r10,r10,MSR_EE 693 ori r10,r10,MSR_EE
703 mtmsrd r10,1 694 mtmsrd r10,1
704 #endif /* CONFIG_PPC_BOOK3E */ 695 #endif /* CONFIG_PPC_BOOK3E */
705 696
706 andi. r0,r4,_TIF_NEED_RESCHED 697 andi. r0,r4,_TIF_NEED_RESCHED
707 beq 1f 698 beq 1f
708 bl .schedule 699 bl .schedule
709 b .ret_from_except_lite 700 b .ret_from_except_lite
710 701
711 1: bl .save_nvgprs 702 1: bl .save_nvgprs
712 addi r3,r1,STACK_FRAME_OVERHEAD 703 addi r3,r1,STACK_FRAME_OVERHEAD
713 bl .do_signal 704 bl .do_signal
714 b .ret_from_except 705 b .ret_from_except
715 706
716 unrecov_restore: 707 unrecov_restore:
717 addi r3,r1,STACK_FRAME_OVERHEAD 708 addi r3,r1,STACK_FRAME_OVERHEAD
718 bl .unrecoverable_exception 709 bl .unrecoverable_exception
719 b unrecov_restore 710 b unrecov_restore
720 711
721 #ifdef CONFIG_PPC_RTAS 712 #ifdef CONFIG_PPC_RTAS
722 /* 713 /*
723 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be 714 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
724 * called with the MMU off. 715 * called with the MMU off.
725 * 716 *
726 * In addition, we need to be in 32b mode, at least for now. 717 * In addition, we need to be in 32b mode, at least for now.
727 * 718 *
728 * Note: r3 is an input parameter to rtas, so don't trash it... 719 * Note: r3 is an input parameter to rtas, so don't trash it...
729 */ 720 */
730 _GLOBAL(enter_rtas) 721 _GLOBAL(enter_rtas)
731 mflr r0 722 mflr r0
732 std r0,16(r1) 723 std r0,16(r1)
733 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ 724 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
734 725
735 /* Because RTAS is running in 32b mode, it clobbers the high order half 726 /* Because RTAS is running in 32b mode, it clobbers the high order half
736 * of all registers that it saves. We therefore save those registers 727 * of all registers that it saves. We therefore save those registers
737 * RTAS might touch to the stack. (r0, r3-r13 are caller saved) 728 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
738 */ 729 */
739 SAVE_GPR(2, r1) /* Save the TOC */ 730 SAVE_GPR(2, r1) /* Save the TOC */
740 SAVE_GPR(13, r1) /* Save paca */ 731 SAVE_GPR(13, r1) /* Save paca */
741 SAVE_8GPRS(14, r1) /* Save the non-volatiles */ 732 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
742 SAVE_10GPRS(22, r1) /* ditto */ 733 SAVE_10GPRS(22, r1) /* ditto */
743 734
744 mfcr r4 735 mfcr r4
745 std r4,_CCR(r1) 736 std r4,_CCR(r1)
746 mfctr r5 737 mfctr r5
747 std r5,_CTR(r1) 738 std r5,_CTR(r1)
748 mfspr r6,SPRN_XER 739 mfspr r6,SPRN_XER
749 std r6,_XER(r1) 740 std r6,_XER(r1)
750 mfdar r7 741 mfdar r7
751 std r7,_DAR(r1) 742 std r7,_DAR(r1)
752 mfdsisr r8 743 mfdsisr r8
753 std r8,_DSISR(r1) 744 std r8,_DSISR(r1)
754 745
755 /* Temporary workaround to clear CR until RTAS can be modified to 746 /* Temporary workaround to clear CR until RTAS can be modified to
756 * ignore all bits. 747 * ignore all bits.
757 */ 748 */
758 li r0,0 749 li r0,0
759 mtcr r0 750 mtcr r0
760 751
761 #ifdef CONFIG_BUG 752 #ifdef CONFIG_BUG
762 /* There is no way it is acceptable to get here with interrupts enabled, 753 /* There is no way it is acceptable to get here with interrupts enabled,
763 * check it with the asm equivalent of WARN_ON 754 * check it with the asm equivalent of WARN_ON
764 */ 755 */
765 lbz r0,PACASOFTIRQEN(r13) 756 lbz r0,PACASOFTIRQEN(r13)
766 1: tdnei r0,0 757 1: tdnei r0,0
767 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 758 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
768 #endif 759 #endif
769 760
770 /* Hard-disable interrupts */ 761 /* Hard-disable interrupts */
771 mfmsr r6 762 mfmsr r6
772 rldicl r7,r6,48,1 763 rldicl r7,r6,48,1
773 rotldi r7,r7,16 764 rotldi r7,r7,16
774 mtmsrd r7,1 765 mtmsrd r7,1
775 766
776 /* Unfortunately, the stack pointer and the MSR are also clobbered, 767 /* Unfortunately, the stack pointer and the MSR are also clobbered,
777 * so they are saved in the PACA which allows us to restore 768 * so they are saved in the PACA which allows us to restore
778 * our original state after RTAS returns. 769 * our original state after RTAS returns.
779 */ 770 */
780 std r1,PACAR1(r13) 771 std r1,PACAR1(r13)
781 std r6,PACASAVEDMSR(r13) 772 std r6,PACASAVEDMSR(r13)
782 773
783 /* Setup our real return addr */ 774 /* Setup our real return addr */
784 LOAD_REG_ADDR(r4,.rtas_return_loc) 775 LOAD_REG_ADDR(r4,.rtas_return_loc)
785 clrldi r4,r4,2 /* convert to realmode address */ 776 clrldi r4,r4,2 /* convert to realmode address */
786 mtlr r4 777 mtlr r4
787 778
788 li r0,0 779 li r0,0
789 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI 780 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
790 andc r0,r6,r0 781 andc r0,r6,r0
791 782
792 li r9,1 783 li r9,1
793 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) 784 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
794 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI 785 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
795 andc r6,r0,r9 786 andc r6,r0,r9
796 sync /* disable interrupts so SRR0/1 */ 787 sync /* disable interrupts so SRR0/1 */
797 mtmsrd r0 /* don't get trashed */ 788 mtmsrd r0 /* don't get trashed */
798 789
799 LOAD_REG_ADDR(r4, rtas) 790 LOAD_REG_ADDR(r4, rtas)
800 ld r5,RTASENTRY(r4) /* get the rtas->entry value */ 791 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
801 ld r4,RTASBASE(r4) /* get the rtas->base value */ 792 ld r4,RTASBASE(r4) /* get the rtas->base value */
802 793
803 mtspr SPRN_SRR0,r5 794 mtspr SPRN_SRR0,r5
804 mtspr SPRN_SRR1,r6 795 mtspr SPRN_SRR1,r6
805 rfid 796 rfid
806 b . /* prevent speculative execution */ 797 b . /* prevent speculative execution */
807 798
808 _STATIC(rtas_return_loc) 799 _STATIC(rtas_return_loc)
809 /* relocation is off at this point */ 800 /* relocation is off at this point */
810 mfspr r4,SPRN_SPRG_PACA /* Get PACA */ 801 mfspr r4,SPRN_SPRG_PACA /* Get PACA */
811 clrldi r4,r4,2 /* convert to realmode address */ 802 clrldi r4,r4,2 /* convert to realmode address */
812 803
813 bcl 20,31,$+4 804 bcl 20,31,$+4
814 0: mflr r3 805 0: mflr r3
815 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ 806 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
816 807
817 mfmsr r6 808 mfmsr r6
818 li r0,MSR_RI 809 li r0,MSR_RI
819 andc r6,r6,r0 810 andc r6,r6,r0
820 sync 811 sync
821 mtmsrd r6 812 mtmsrd r6
822 813
823 ld r1,PACAR1(r4) /* Restore our SP */ 814 ld r1,PACAR1(r4) /* Restore our SP */
824 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 815 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
825 816
826 mtspr SPRN_SRR0,r3 817 mtspr SPRN_SRR0,r3
827 mtspr SPRN_SRR1,r4 818 mtspr SPRN_SRR1,r4
828 rfid 819 rfid
829 b . /* prevent speculative execution */ 820 b . /* prevent speculative execution */
830 821
831 .align 3 822 .align 3
832 1: .llong .rtas_restore_regs 823 1: .llong .rtas_restore_regs
833 824
834 _STATIC(rtas_restore_regs) 825 _STATIC(rtas_restore_regs)
835 /* relocation is on at this point */ 826 /* relocation is on at this point */
836 REST_GPR(2, r1) /* Restore the TOC */ 827 REST_GPR(2, r1) /* Restore the TOC */
837 REST_GPR(13, r1) /* Restore paca */ 828 REST_GPR(13, r1) /* Restore paca */
838 REST_8GPRS(14, r1) /* Restore the non-volatiles */ 829 REST_8GPRS(14, r1) /* Restore the non-volatiles */
839 REST_10GPRS(22, r1) /* ditto */ 830 REST_10GPRS(22, r1) /* ditto */
840 831
841 mfspr r13,SPRN_SPRG_PACA 832 mfspr r13,SPRN_SPRG_PACA
842 833
843 ld r4,_CCR(r1) 834 ld r4,_CCR(r1)
844 mtcr r4 835 mtcr r4
845 ld r5,_CTR(r1) 836 ld r5,_CTR(r1)
846 mtctr r5 837 mtctr r5
847 ld r6,_XER(r1) 838 ld r6,_XER(r1)
848 mtspr SPRN_XER,r6 839 mtspr SPRN_XER,r6
849 ld r7,_DAR(r1) 840 ld r7,_DAR(r1)
850 mtdar r7 841 mtdar r7
851 ld r8,_DSISR(r1) 842 ld r8,_DSISR(r1)
852 mtdsisr r8 843 mtdsisr r8
853 844
854 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ 845 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
855 ld r0,16(r1) /* get return address */ 846 ld r0,16(r1) /* get return address */
856 847
857 mtlr r0 848 mtlr r0
858 blr /* return to caller */ 849 blr /* return to caller */
859 850
860 #endif /* CONFIG_PPC_RTAS */ 851 #endif /* CONFIG_PPC_RTAS */
861 852
862 _GLOBAL(enter_prom) 853 _GLOBAL(enter_prom)
863 mflr r0 854 mflr r0
864 std r0,16(r1) 855 std r0,16(r1)
865 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ 856 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
866 857
867 /* Because PROM is running in 32b mode, it clobbers the high order half 858 /* Because PROM is running in 32b mode, it clobbers the high order half
868 * of all registers that it saves. We therefore save those registers 859 * of all registers that it saves. We therefore save those registers
869 * PROM might touch to the stack. (r0, r3-r13 are caller saved) 860 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
870 */ 861 */
871 SAVE_GPR(2, r1) 862 SAVE_GPR(2, r1)
872 SAVE_GPR(13, r1) 863 SAVE_GPR(13, r1)
873 SAVE_8GPRS(14, r1) 864 SAVE_8GPRS(14, r1)
874 SAVE_10GPRS(22, r1) 865 SAVE_10GPRS(22, r1)
875 mfcr r10 866 mfcr r10
876 mfmsr r11 867 mfmsr r11
877 std r10,_CCR(r1) 868 std r10,_CCR(r1)
878 std r11,_MSR(r1) 869 std r11,_MSR(r1)
879 870
880 /* Get the PROM entrypoint */ 871 /* Get the PROM entrypoint */
881 mtlr r4 872 mtlr r4
882 873
883 /* Switch MSR to 32 bits mode 874 /* Switch MSR to 32 bits mode
884 */ 875 */
885 #ifdef CONFIG_PPC_BOOK3E 876 #ifdef CONFIG_PPC_BOOK3E
886 rlwinm r11,r11,0,1,31 877 rlwinm r11,r11,0,1,31
887 mtmsr r11 878 mtmsr r11
888 #else /* CONFIG_PPC_BOOK3E */ 879 #else /* CONFIG_PPC_BOOK3E */
889 mfmsr r11 880 mfmsr r11
890 li r12,1 881 li r12,1
891 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 882 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
892 andc r11,r11,r12 883 andc r11,r11,r12
893 li r12,1 884 li r12,1
894 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) 885 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
895 andc r11,r11,r12 886 andc r11,r11,r12
896 mtmsrd r11 887 mtmsrd r11
897 #endif /* CONFIG_PPC_BOOK3E */ 888 #endif /* CONFIG_PPC_BOOK3E */
898 isync 889 isync
899 890
900 /* Enter PROM here... */ 891 /* Enter PROM here... */
901 blrl 892 blrl
902 893
903 /* Just make sure that r1 top 32 bits didn't get 894 /* Just make sure that r1 top 32 bits didn't get
904 * corrupt by OF 895 * corrupt by OF
905 */ 896 */
906 rldicl r1,r1,0,32 897 rldicl r1,r1,0,32
907 898
908 /* Restore the MSR (back to 64 bits) */ 899 /* Restore the MSR (back to 64 bits) */
909 ld r0,_MSR(r1) 900 ld r0,_MSR(r1)
910 MTMSRD(r0) 901 MTMSRD(r0)
911 isync 902 isync
912 903
913 /* Restore other registers */ 904 /* Restore other registers */
914 REST_GPR(2, r1) 905 REST_GPR(2, r1)
915 REST_GPR(13, r1) 906 REST_GPR(13, r1)
916 REST_8GPRS(14, r1) 907 REST_8GPRS(14, r1)
917 REST_10GPRS(22, r1) 908 REST_10GPRS(22, r1)
918 ld r4,_CCR(r1) 909 ld r4,_CCR(r1)
919 mtcr r4 910 mtcr r4
920 911
921 addi r1,r1,PROM_FRAME_SIZE 912 addi r1,r1,PROM_FRAME_SIZE
922 ld r0,16(r1) 913 ld r0,16(r1)
923 mtlr r0 914 mtlr r0
924 blr 915 blr
925 916
926 #ifdef CONFIG_FUNCTION_TRACER 917 #ifdef CONFIG_FUNCTION_TRACER
927 #ifdef CONFIG_DYNAMIC_FTRACE 918 #ifdef CONFIG_DYNAMIC_FTRACE
928 _GLOBAL(mcount) 919 _GLOBAL(mcount)
929 _GLOBAL(_mcount) 920 _GLOBAL(_mcount)
930 blr 921 blr
931 922
932 _GLOBAL(ftrace_caller) 923 _GLOBAL(ftrace_caller)
933 /* Taken from output of objdump from lib64/glibc */ 924 /* Taken from output of objdump from lib64/glibc */
934 mflr r3 925 mflr r3
935 ld r11, 0(r1) 926 ld r11, 0(r1)
936 stdu r1, -112(r1) 927 stdu r1, -112(r1)
937 std r3, 128(r1) 928 std r3, 128(r1)
938 ld r4, 16(r11) 929 ld r4, 16(r11)
939 subi r3, r3, MCOUNT_INSN_SIZE 930 subi r3, r3, MCOUNT_INSN_SIZE
940 .globl ftrace_call 931 .globl ftrace_call
941 ftrace_call: 932 ftrace_call:
942 bl ftrace_stub 933 bl ftrace_stub
943 nop 934 nop
944 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 935 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
945 .globl ftrace_graph_call 936 .globl ftrace_graph_call
946 ftrace_graph_call: 937 ftrace_graph_call:
947 b ftrace_graph_stub 938 b ftrace_graph_stub
948 _GLOBAL(ftrace_graph_stub) 939 _GLOBAL(ftrace_graph_stub)
949 #endif 940 #endif
950 ld r0, 128(r1) 941 ld r0, 128(r1)
951 mtlr r0 942 mtlr r0
952 addi r1, r1, 112 943 addi r1, r1, 112
953 _GLOBAL(ftrace_stub) 944 _GLOBAL(ftrace_stub)
954 blr 945 blr
955 #else 946 #else
956 _GLOBAL(mcount) 947 _GLOBAL(mcount)
957 blr 948 blr
958 949
959 _GLOBAL(_mcount) 950 _GLOBAL(_mcount)
960 /* Taken from output of objdump from lib64/glibc */ 951 /* Taken from output of objdump from lib64/glibc */
961 mflr r3 952 mflr r3
962 ld r11, 0(r1) 953 ld r11, 0(r1)
963 stdu r1, -112(r1) 954 stdu r1, -112(r1)
964 std r3, 128(r1) 955 std r3, 128(r1)
965 ld r4, 16(r11) 956 ld r4, 16(r11)
966 957
967 subi r3, r3, MCOUNT_INSN_SIZE 958 subi r3, r3, MCOUNT_INSN_SIZE
968 LOAD_REG_ADDR(r5,ftrace_trace_function) 959 LOAD_REG_ADDR(r5,ftrace_trace_function)
969 ld r5,0(r5) 960 ld r5,0(r5)
970 ld r5,0(r5) 961 ld r5,0(r5)
971 mtctr r5 962 mtctr r5
972 bctrl 963 bctrl
973 nop 964 nop
974 965
975 966
976 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 967 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
977 b ftrace_graph_caller 968 b ftrace_graph_caller
978 #endif 969 #endif
979 ld r0, 128(r1) 970 ld r0, 128(r1)
980 mtlr r0 971 mtlr r0
981 addi r1, r1, 112 972 addi r1, r1, 112
982 _GLOBAL(ftrace_stub) 973 _GLOBAL(ftrace_stub)
983 blr 974 blr
984 975
985 #endif /* CONFIG_DYNAMIC_FTRACE */ 976 #endif /* CONFIG_DYNAMIC_FTRACE */
986 977
987 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 978 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
988 _GLOBAL(ftrace_graph_caller) 979 _GLOBAL(ftrace_graph_caller)
989 /* load r4 with local address */ 980 /* load r4 with local address */
990 ld r4, 128(r1) 981 ld r4, 128(r1)
991 subi r4, r4, MCOUNT_INSN_SIZE 982 subi r4, r4, MCOUNT_INSN_SIZE
992 983
993 /* get the parent address */ 984 /* get the parent address */
994 ld r11, 112(r1) 985 ld r11, 112(r1)
995 addi r3, r11, 16 986 addi r3, r11, 16
996 987
997 bl .prepare_ftrace_return 988 bl .prepare_ftrace_return
998 nop 989 nop
999 990
1000 ld r0, 128(r1) 991 ld r0, 128(r1)
1001 mtlr r0 992 mtlr r0
1002 addi r1, r1, 112 993 addi r1, r1, 112
1003 blr 994 blr
1004 995
1005 _GLOBAL(return_to_handler) 996 _GLOBAL(return_to_handler)
1006 /* need to save return values */ 997 /* need to save return values */
1007 std r4, -24(r1) 998 std r4, -24(r1)
1008 std r3, -16(r1) 999 std r3, -16(r1)
1009 std r31, -8(r1) 1000 std r31, -8(r1)
1010 mr r31, r1 1001 mr r31, r1
1011 stdu r1, -112(r1) 1002 stdu r1, -112(r1)
1012 1003
1013 bl .ftrace_return_to_handler 1004 bl .ftrace_return_to_handler
1014 nop 1005 nop
1015 1006
1016 /* return value has real return address */ 1007 /* return value has real return address */
1017 mtlr r3 1008 mtlr r3
1018 1009
1019 ld r1, 0(r1) 1010 ld r1, 0(r1)
1020 ld r4, -24(r1) 1011 ld r4, -24(r1)
1021 ld r3, -16(r1) 1012 ld r3, -16(r1)
1022 ld r31, -8(r1) 1013 ld r31, -8(r1)
1023 1014
1024 /* Jump back to real return address */ 1015 /* Jump back to real return address */
1025 blr 1016 blr
1026 1017
1027 _GLOBAL(mod_return_to_handler) 1018 _GLOBAL(mod_return_to_handler)
1028 /* need to save return values */ 1019 /* need to save return values */
1029 std r4, -32(r1) 1020 std r4, -32(r1)
1030 std r3, -24(r1) 1021 std r3, -24(r1)
1031 /* save TOC */ 1022 /* save TOC */
1032 std r2, -16(r1) 1023 std r2, -16(r1)
1033 std r31, -8(r1) 1024 std r31, -8(r1)
1034 mr r31, r1 1025 mr r31, r1
1035 stdu r1, -112(r1) 1026 stdu r1, -112(r1)
1036 1027
1037 /* 1028 /*
1038 * We are in a module using the module's TOC. 1029 * We are in a module using the module's TOC.
1039 * Switch to our TOC to run inside the core kernel. 1030 * Switch to our TOC to run inside the core kernel.
1040 */ 1031 */
1041 ld r2, PACATOC(r13) 1032 ld r2, PACATOC(r13)
1042 1033
1043 bl .ftrace_return_to_handler 1034 bl .ftrace_return_to_handler
1044 nop 1035 nop
1045 1036
1046 /* return value has real return address */ 1037 /* return value has real return address */
1047 mtlr r3 1038 mtlr r3
1048 1039
1049 ld r1, 0(r1) 1040 ld r1, 0(r1)
1050 ld r4, -32(r1) 1041 ld r4, -32(r1)
1051 ld r3, -24(r1) 1042 ld r3, -24(r1)
1052 ld r2, -16(r1) 1043 ld r2, -16(r1)
1053 ld r31, -8(r1) 1044 ld r31, -8(r1)
1054 1045
1055 /* Jump back to real return address */ 1046 /* Jump back to real return address */
1056 blr 1047 blr
1057 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1048 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1058 #endif /* CONFIG_FUNCTION_TRACER */ 1049 #endif /* CONFIG_FUNCTION_TRACER */
1059 1050
arch/powerpc/kernel/irq.c
1 /* 1 /*
2 * Derived from arch/i386/kernel/irq.c 2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds 3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas 4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan 7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras 8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 * 15 *
16 * This file contains the code used by various IRQ handling routines: 16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines 17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers 18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers 19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier. 20 * should be easier.
21 * 21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting 24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff 25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register. 26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references. 28 * to reduce code space and undefined function references.
29 */ 29 */
30 30
31 #undef DEBUG 31 #undef DEBUG
32 32
33 #include <linux/module.h> 33 #include <linux/module.h>
34 #include <linux/threads.h> 34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h> 35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h> 36 #include <linux/signal.h>
37 #include <linux/sched.h> 37 #include <linux/sched.h>
38 #include <linux/ptrace.h> 38 #include <linux/ptrace.h>
39 #include <linux/ioport.h> 39 #include <linux/ioport.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/timex.h> 41 #include <linux/timex.h>
42 #include <linux/init.h> 42 #include <linux/init.h>
43 #include <linux/slab.h> 43 #include <linux/slab.h>
44 #include <linux/delay.h> 44 #include <linux/delay.h>
45 #include <linux/irq.h> 45 #include <linux/irq.h>
46 #include <linux/seq_file.h> 46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h> 47 #include <linux/cpumask.h>
48 #include <linux/profile.h> 48 #include <linux/profile.h>
49 #include <linux/bitops.h> 49 #include <linux/bitops.h>
50 #include <linux/list.h> 50 #include <linux/list.h>
51 #include <linux/radix-tree.h> 51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h> 52 #include <linux/mutex.h>
53 #include <linux/bootmem.h> 53 #include <linux/bootmem.h>
54 #include <linux/pci.h> 54 #include <linux/pci.h>
55 #include <linux/debugfs.h> 55 #include <linux/debugfs.h>
56 #include <linux/perf_event.h>
57 56
58 #include <asm/uaccess.h> 57 #include <asm/uaccess.h>
59 #include <asm/system.h> 58 #include <asm/system.h>
60 #include <asm/io.h> 59 #include <asm/io.h>
61 #include <asm/pgtable.h> 60 #include <asm/pgtable.h>
62 #include <asm/irq.h> 61 #include <asm/irq.h>
63 #include <asm/cache.h> 62 #include <asm/cache.h>
64 #include <asm/prom.h> 63 #include <asm/prom.h>
65 #include <asm/ptrace.h> 64 #include <asm/ptrace.h>
66 #include <asm/machdep.h> 65 #include <asm/machdep.h>
67 #include <asm/udbg.h> 66 #include <asm/udbg.h>
68 #ifdef CONFIG_PPC64 67 #ifdef CONFIG_PPC64
69 #include <asm/paca.h> 68 #include <asm/paca.h>
70 #include <asm/firmware.h> 69 #include <asm/firmware.h>
71 #include <asm/lv1call.h> 70 #include <asm/lv1call.h>
72 #endif 71 #endif
73 #define CREATE_TRACE_POINTS 72 #define CREATE_TRACE_POINTS
74 #include <asm/trace.h> 73 #include <asm/trace.h>
75 74
76 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 75 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77 EXPORT_PER_CPU_SYMBOL(irq_stat); 76 EXPORT_PER_CPU_SYMBOL(irq_stat);
78 77
79 int __irq_offset_value; 78 int __irq_offset_value;
80 79
81 #ifdef CONFIG_PPC32 80 #ifdef CONFIG_PPC32
82 EXPORT_SYMBOL(__irq_offset_value); 81 EXPORT_SYMBOL(__irq_offset_value);
83 atomic_t ppc_n_lost_interrupts; 82 atomic_t ppc_n_lost_interrupts;
84 83
85 #ifdef CONFIG_TAU_INT 84 #ifdef CONFIG_TAU_INT
86 extern int tau_initialized; 85 extern int tau_initialized;
87 extern int tau_interrupts(int); 86 extern int tau_interrupts(int);
88 #endif 87 #endif
89 #endif /* CONFIG_PPC32 */ 88 #endif /* CONFIG_PPC32 */
90 89
91 #ifdef CONFIG_PPC64 90 #ifdef CONFIG_PPC64
92 91
93 #ifndef CONFIG_SPARSE_IRQ 92 #ifndef CONFIG_SPARSE_IRQ
94 EXPORT_SYMBOL(irq_desc); 93 EXPORT_SYMBOL(irq_desc);
95 #endif 94 #endif
96 95
97 int distribute_irqs = 1; 96 int distribute_irqs = 1;
98 97
99 static inline notrace unsigned long get_hard_enabled(void) 98 static inline notrace unsigned long get_hard_enabled(void)
100 { 99 {
101 unsigned long enabled; 100 unsigned long enabled;
102 101
103 __asm__ __volatile__("lbz %0,%1(13)" 102 __asm__ __volatile__("lbz %0,%1(13)"
104 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); 103 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
105 104
106 return enabled; 105 return enabled;
107 } 106 }
108 107
109 static inline notrace void set_soft_enabled(unsigned long enable) 108 static inline notrace void set_soft_enabled(unsigned long enable)
110 { 109 {
111 __asm__ __volatile__("stb %0,%1(13)" 110 __asm__ __volatile__("stb %0,%1(13)"
112 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
113 } 112 }
114 113
115 notrace void raw_local_irq_restore(unsigned long en) 114 notrace void raw_local_irq_restore(unsigned long en)
116 { 115 {
117 /* 116 /*
118 * get_paca()->soft_enabled = en; 117 * get_paca()->soft_enabled = en;
119 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? 118 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
120 * That was allowed before, and in such a case we do need to take care 119 * That was allowed before, and in such a case we do need to take care
121 * that gcc will set soft_enabled directly via r13, not choose to use 120 * that gcc will set soft_enabled directly via r13, not choose to use
122 * an intermediate register, lest we're preempted to a different cpu. 121 * an intermediate register, lest we're preempted to a different cpu.
123 */ 122 */
124 set_soft_enabled(en); 123 set_soft_enabled(en);
125 if (!en) 124 if (!en)
126 return; 125 return;
127 126
128 #ifdef CONFIG_PPC_STD_MMU_64 127 #ifdef CONFIG_PPC_STD_MMU_64
129 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 128 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
130 /* 129 /*
131 * Do we need to disable preemption here? Not really: in the 130 * Do we need to disable preemption here? Not really: in the
132 * unlikely event that we're preempted to a different cpu in 131 * unlikely event that we're preempted to a different cpu in
133 * between getting r13, loading its lppaca_ptr, and loading 132 * between getting r13, loading its lppaca_ptr, and loading
134 * its any_int, we might call iseries_handle_interrupts without 133 * its any_int, we might call iseries_handle_interrupts without
135 * an interrupt pending on the new cpu, but that's no disaster, 134 * an interrupt pending on the new cpu, but that's no disaster,
136 * is it? And the business of preempting us off the old cpu 135 * is it? And the business of preempting us off the old cpu
137 * would itself involve a local_irq_restore which handles the 136 * would itself involve a local_irq_restore which handles the
138 * interrupt to that cpu. 137 * interrupt to that cpu.
139 * 138 *
140 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" 139 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
141 * to avoid any preemption checking added into get_paca(). 140 * to avoid any preemption checking added into get_paca().
142 */ 141 */
143 if (local_paca->lppaca_ptr->int_dword.any_int) 142 if (local_paca->lppaca_ptr->int_dword.any_int)
144 iseries_handle_interrupts(); 143 iseries_handle_interrupts();
145 } 144 }
146 #endif /* CONFIG_PPC_STD_MMU_64 */ 145 #endif /* CONFIG_PPC_STD_MMU_64 */
147
148 if (test_perf_event_pending()) {
149 clear_perf_event_pending();
150 perf_event_do_pending();
151 }
152 146
153 /* 147 /*
154 * if (get_paca()->hard_enabled) return; 148 * if (get_paca()->hard_enabled) return;
155 * But again we need to take care that gcc gets hard_enabled directly 149 * But again we need to take care that gcc gets hard_enabled directly
156 * via r13, not choose to use an intermediate register, lest we're 150 * via r13, not choose to use an intermediate register, lest we're
157 * preempted to a different cpu in between the two instructions. 151 * preempted to a different cpu in between the two instructions.
158 */ 152 */
159 if (get_hard_enabled()) 153 if (get_hard_enabled())
160 return; 154 return;
161 155
162 /* 156 /*
163 * Need to hard-enable interrupts here. Since currently disabled, 157 * Need to hard-enable interrupts here. Since currently disabled,
164 * no need to take further asm precautions against preemption; but 158 * no need to take further asm precautions against preemption; but
165 * use local_paca instead of get_paca() to avoid preemption checking. 159 * use local_paca instead of get_paca() to avoid preemption checking.
166 */ 160 */
167 local_paca->hard_enabled = en; 161 local_paca->hard_enabled = en;
168 if ((int)mfspr(SPRN_DEC) < 0) 162 if ((int)mfspr(SPRN_DEC) < 0)
169 mtspr(SPRN_DEC, 1); 163 mtspr(SPRN_DEC, 1);
170 164
171 /* 165 /*
172 * Force the delivery of pending soft-disabled interrupts on PS3. 166 * Force the delivery of pending soft-disabled interrupts on PS3.
173 * Any HV call will have this side effect. 167 * Any HV call will have this side effect.
174 */ 168 */
175 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 169 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
176 u64 tmp; 170 u64 tmp;
177 lv1_get_version_info(&tmp); 171 lv1_get_version_info(&tmp);
178 } 172 }
179 173
180 __hard_irq_enable(); 174 __hard_irq_enable();
181 } 175 }
182 EXPORT_SYMBOL(raw_local_irq_restore); 176 EXPORT_SYMBOL(raw_local_irq_restore);
183 #endif /* CONFIG_PPC64 */ 177 #endif /* CONFIG_PPC64 */
184 178
185 static int show_other_interrupts(struct seq_file *p, int prec) 179 static int show_other_interrupts(struct seq_file *p, int prec)
186 { 180 {
187 int j; 181 int j;
188 182
189 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 183 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
190 if (tau_initialized) { 184 if (tau_initialized) {
191 seq_printf(p, "%*s: ", prec, "TAU"); 185 seq_printf(p, "%*s: ", prec, "TAU");
192 for_each_online_cpu(j) 186 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", tau_interrupts(j)); 187 seq_printf(p, "%10u ", tau_interrupts(j));
194 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 188 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
195 } 189 }
196 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 190 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
197 191
198 seq_printf(p, "%*s: ", prec, "LOC"); 192 seq_printf(p, "%*s: ", prec, "LOC");
199 for_each_online_cpu(j) 193 for_each_online_cpu(j)
200 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 194 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
201 seq_printf(p, " Local timer interrupts\n"); 195 seq_printf(p, " Local timer interrupts\n");
202 196
203 seq_printf(p, "%*s: ", prec, "SPU"); 197 seq_printf(p, "%*s: ", prec, "SPU");
204 for_each_online_cpu(j) 198 for_each_online_cpu(j)
205 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 199 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
206 seq_printf(p, " Spurious interrupts\n"); 200 seq_printf(p, " Spurious interrupts\n");
207 201
208 seq_printf(p, "%*s: ", prec, "CNT"); 202 seq_printf(p, "%*s: ", prec, "CNT");
209 for_each_online_cpu(j) 203 for_each_online_cpu(j)
210 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 204 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
211 seq_printf(p, " Performance monitoring interrupts\n"); 205 seq_printf(p, " Performance monitoring interrupts\n");
212 206
213 seq_printf(p, "%*s: ", prec, "MCE"); 207 seq_printf(p, "%*s: ", prec, "MCE");
214 for_each_online_cpu(j) 208 for_each_online_cpu(j)
215 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 209 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
216 seq_printf(p, " Machine check exceptions\n"); 210 seq_printf(p, " Machine check exceptions\n");
217 211
218 return 0; 212 return 0;
219 } 213 }
220 214
221 int show_interrupts(struct seq_file *p, void *v) 215 int show_interrupts(struct seq_file *p, void *v)
222 { 216 {
223 unsigned long flags, any_count = 0; 217 unsigned long flags, any_count = 0;
224 int i = *(loff_t *) v, j, prec; 218 int i = *(loff_t *) v, j, prec;
225 struct irqaction *action; 219 struct irqaction *action;
226 struct irq_desc *desc; 220 struct irq_desc *desc;
227 221
228 if (i > nr_irqs) 222 if (i > nr_irqs)
229 return 0; 223 return 0;
230 224
231 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 225 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
232 j *= 10; 226 j *= 10;
233 227
234 if (i == nr_irqs) 228 if (i == nr_irqs)
235 return show_other_interrupts(p, prec); 229 return show_other_interrupts(p, prec);
236 230
237 /* print header */ 231 /* print header */
238 if (i == 0) { 232 if (i == 0) {
239 seq_printf(p, "%*s", prec + 8, ""); 233 seq_printf(p, "%*s", prec + 8, "");
240 for_each_online_cpu(j) 234 for_each_online_cpu(j)
241 seq_printf(p, "CPU%-8d", j); 235 seq_printf(p, "CPU%-8d", j);
242 seq_putc(p, '\n'); 236 seq_putc(p, '\n');
243 } 237 }
244 238
245 desc = irq_to_desc(i); 239 desc = irq_to_desc(i);
246 if (!desc) 240 if (!desc)
247 return 0; 241 return 0;
248 242
249 raw_spin_lock_irqsave(&desc->lock, flags); 243 raw_spin_lock_irqsave(&desc->lock, flags);
250 for_each_online_cpu(j) 244 for_each_online_cpu(j)
251 any_count |= kstat_irqs_cpu(i, j); 245 any_count |= kstat_irqs_cpu(i, j);
252 action = desc->action; 246 action = desc->action;
253 if (!action && !any_count) 247 if (!action && !any_count)
254 goto out; 248 goto out;
255 249
256 seq_printf(p, "%*d: ", prec, i); 250 seq_printf(p, "%*d: ", prec, i);
257 for_each_online_cpu(j) 251 for_each_online_cpu(j)
258 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 252 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
259 253
260 if (desc->chip) 254 if (desc->chip)
261 seq_printf(p, " %-16s", desc->chip->name); 255 seq_printf(p, " %-16s", desc->chip->name);
262 else 256 else
263 seq_printf(p, " %-16s", "None"); 257 seq_printf(p, " %-16s", "None");
264 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); 258 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
265 259
266 if (action) { 260 if (action) {
267 seq_printf(p, " %s", action->name); 261 seq_printf(p, " %s", action->name);
268 while ((action = action->next) != NULL) 262 while ((action = action->next) != NULL)
269 seq_printf(p, ", %s", action->name); 263 seq_printf(p, ", %s", action->name);
270 } 264 }
271 265
272 seq_putc(p, '\n'); 266 seq_putc(p, '\n');
273 out: 267 out:
274 raw_spin_unlock_irqrestore(&desc->lock, flags); 268 raw_spin_unlock_irqrestore(&desc->lock, flags);
275 return 0; 269 return 0;
276 } 270 }
277 271
278 /* 272 /*
279 * /proc/stat helpers 273 * /proc/stat helpers
280 */ 274 */
281 u64 arch_irq_stat_cpu(unsigned int cpu) 275 u64 arch_irq_stat_cpu(unsigned int cpu)
282 { 276 {
283 u64 sum = per_cpu(irq_stat, cpu).timer_irqs; 277 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
284 278
285 sum += per_cpu(irq_stat, cpu).pmu_irqs; 279 sum += per_cpu(irq_stat, cpu).pmu_irqs;
286 sum += per_cpu(irq_stat, cpu).mce_exceptions; 280 sum += per_cpu(irq_stat, cpu).mce_exceptions;
287 sum += per_cpu(irq_stat, cpu).spurious_irqs; 281 sum += per_cpu(irq_stat, cpu).spurious_irqs;
288 282
289 return sum; 283 return sum;
290 } 284 }
291 285
292 #ifdef CONFIG_HOTPLUG_CPU 286 #ifdef CONFIG_HOTPLUG_CPU
293 void fixup_irqs(cpumask_t map) 287 void fixup_irqs(cpumask_t map)
294 { 288 {
295 struct irq_desc *desc; 289 struct irq_desc *desc;
296 unsigned int irq; 290 unsigned int irq;
297 static int warned; 291 static int warned;
298 292
299 for_each_irq(irq) { 293 for_each_irq(irq) {
300 cpumask_t mask; 294 cpumask_t mask;
301 295
302 desc = irq_to_desc(irq); 296 desc = irq_to_desc(irq);
303 if (desc && desc->status & IRQ_PER_CPU) 297 if (desc && desc->status & IRQ_PER_CPU)
304 continue; 298 continue;
305 299
306 cpumask_and(&mask, desc->affinity, &map); 300 cpumask_and(&mask, desc->affinity, &map);
307 if (any_online_cpu(mask) == NR_CPUS) { 301 if (any_online_cpu(mask) == NR_CPUS) {
308 printk("Breaking affinity for irq %i\n", irq); 302 printk("Breaking affinity for irq %i\n", irq);
309 mask = map; 303 mask = map;
310 } 304 }
311 if (desc->chip->set_affinity) 305 if (desc->chip->set_affinity)
312 desc->chip->set_affinity(irq, &mask); 306 desc->chip->set_affinity(irq, &mask);
313 else if (desc->action && !(warned++)) 307 else if (desc->action && !(warned++))
314 printk("Cannot set affinity for irq %i\n", irq); 308 printk("Cannot set affinity for irq %i\n", irq);
315 } 309 }
316 310
317 local_irq_enable(); 311 local_irq_enable();
318 mdelay(1); 312 mdelay(1);
319 local_irq_disable(); 313 local_irq_disable();
320 } 314 }
321 #endif 315 #endif
322 316
323 #ifdef CONFIG_IRQSTACKS 317 #ifdef CONFIG_IRQSTACKS
324 static inline void handle_one_irq(unsigned int irq) 318 static inline void handle_one_irq(unsigned int irq)
325 { 319 {
326 struct thread_info *curtp, *irqtp; 320 struct thread_info *curtp, *irqtp;
327 unsigned long saved_sp_limit; 321 unsigned long saved_sp_limit;
328 struct irq_desc *desc; 322 struct irq_desc *desc;
329 323
330 /* Switch to the irq stack to handle this */ 324 /* Switch to the irq stack to handle this */
331 curtp = current_thread_info(); 325 curtp = current_thread_info();
332 irqtp = hardirq_ctx[smp_processor_id()]; 326 irqtp = hardirq_ctx[smp_processor_id()];
333 327
334 if (curtp == irqtp) { 328 if (curtp == irqtp) {
335 /* We're already on the irq stack, just handle it */ 329 /* We're already on the irq stack, just handle it */
336 generic_handle_irq(irq); 330 generic_handle_irq(irq);
337 return; 331 return;
338 } 332 }
339 333
340 desc = irq_to_desc(irq); 334 desc = irq_to_desc(irq);
341 saved_sp_limit = current->thread.ksp_limit; 335 saved_sp_limit = current->thread.ksp_limit;
342 336
343 irqtp->task = curtp->task; 337 irqtp->task = curtp->task;
344 irqtp->flags = 0; 338 irqtp->flags = 0;
345 339
346 /* Copy the softirq bits in preempt_count so that the 340 /* Copy the softirq bits in preempt_count so that the
347 * softirq checks work in the hardirq context. */ 341 * softirq checks work in the hardirq context. */
348 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | 342 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
349 (curtp->preempt_count & SOFTIRQ_MASK); 343 (curtp->preempt_count & SOFTIRQ_MASK);
350 344
351 current->thread.ksp_limit = (unsigned long)irqtp + 345 current->thread.ksp_limit = (unsigned long)irqtp +
352 _ALIGN_UP(sizeof(struct thread_info), 16); 346 _ALIGN_UP(sizeof(struct thread_info), 16);
353 347
354 call_handle_irq(irq, desc, irqtp, desc->handle_irq); 348 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
355 current->thread.ksp_limit = saved_sp_limit; 349 current->thread.ksp_limit = saved_sp_limit;
356 irqtp->task = NULL; 350 irqtp->task = NULL;
357 351
358 /* Set any flag that may have been set on the 352 /* Set any flag that may have been set on the
359 * alternate stack 353 * alternate stack
360 */ 354 */
361 if (irqtp->flags) 355 if (irqtp->flags)
362 set_bits(irqtp->flags, &curtp->flags); 356 set_bits(irqtp->flags, &curtp->flags);
363 } 357 }
364 #else 358 #else
365 static inline void handle_one_irq(unsigned int irq) 359 static inline void handle_one_irq(unsigned int irq)
366 { 360 {
367 generic_handle_irq(irq); 361 generic_handle_irq(irq);
368 } 362 }
369 #endif 363 #endif
370 364
371 static inline void check_stack_overflow(void) 365 static inline void check_stack_overflow(void)
372 { 366 {
373 #ifdef CONFIG_DEBUG_STACKOVERFLOW 367 #ifdef CONFIG_DEBUG_STACKOVERFLOW
374 long sp; 368 long sp;
375 369
376 sp = __get_SP() & (THREAD_SIZE-1); 370 sp = __get_SP() & (THREAD_SIZE-1);
377 371
378 /* check for stack overflow: is there less than 2KB free? */ 372 /* check for stack overflow: is there less than 2KB free? */
379 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 373 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
380 printk("do_IRQ: stack overflow: %ld\n", 374 printk("do_IRQ: stack overflow: %ld\n",
381 sp - sizeof(struct thread_info)); 375 sp - sizeof(struct thread_info));
382 dump_stack(); 376 dump_stack();
383 } 377 }
384 #endif 378 #endif
385 } 379 }
386 380
387 void do_IRQ(struct pt_regs *regs) 381 void do_IRQ(struct pt_regs *regs)
388 { 382 {
389 struct pt_regs *old_regs = set_irq_regs(regs); 383 struct pt_regs *old_regs = set_irq_regs(regs);
390 unsigned int irq; 384 unsigned int irq;
391 385
392 trace_irq_entry(regs); 386 trace_irq_entry(regs);
393 387
394 irq_enter(); 388 irq_enter();
395 389
396 check_stack_overflow(); 390 check_stack_overflow();
397 391
398 irq = ppc_md.get_irq(); 392 irq = ppc_md.get_irq();
399 393
400 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 394 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
401 handle_one_irq(irq); 395 handle_one_irq(irq);
402 else if (irq != NO_IRQ_IGNORE) 396 else if (irq != NO_IRQ_IGNORE)
403 __get_cpu_var(irq_stat).spurious_irqs++; 397 __get_cpu_var(irq_stat).spurious_irqs++;
404 398
405 irq_exit(); 399 irq_exit();
406 set_irq_regs(old_regs); 400 set_irq_regs(old_regs);
407 401
408 #ifdef CONFIG_PPC_ISERIES 402 #ifdef CONFIG_PPC_ISERIES
409 if (firmware_has_feature(FW_FEATURE_ISERIES) && 403 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
410 get_lppaca()->int_dword.fields.decr_int) { 404 get_lppaca()->int_dword.fields.decr_int) {
411 get_lppaca()->int_dword.fields.decr_int = 0; 405 get_lppaca()->int_dword.fields.decr_int = 0;
412 /* Signal a fake decrementer interrupt */ 406 /* Signal a fake decrementer interrupt */
413 timer_interrupt(regs); 407 timer_interrupt(regs);
414 } 408 }
415 #endif 409 #endif
416 410
417 trace_irq_exit(regs); 411 trace_irq_exit(regs);
418 } 412 }
419 413
420 void __init init_IRQ(void) 414 void __init init_IRQ(void)
421 { 415 {
422 if (ppc_md.init_IRQ) 416 if (ppc_md.init_IRQ)
423 ppc_md.init_IRQ(); 417 ppc_md.init_IRQ();
424 418
425 exc_lvl_ctx_init(); 419 exc_lvl_ctx_init();
426 420
427 irq_ctx_init(); 421 irq_ctx_init();
428 } 422 }
429 423
430 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 424 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
431 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 425 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
432 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 426 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
433 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; 427 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
434 428
435 void exc_lvl_ctx_init(void) 429 void exc_lvl_ctx_init(void)
436 { 430 {
437 struct thread_info *tp; 431 struct thread_info *tp;
438 int i; 432 int i;
439 433
440 for_each_possible_cpu(i) { 434 for_each_possible_cpu(i) {
441 memset((void *)critirq_ctx[i], 0, THREAD_SIZE); 435 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
442 tp = critirq_ctx[i]; 436 tp = critirq_ctx[i];
443 tp->cpu = i; 437 tp->cpu = i;
444 tp->preempt_count = 0; 438 tp->preempt_count = 0;
445 439
446 #ifdef CONFIG_BOOKE 440 #ifdef CONFIG_BOOKE
447 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); 441 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
448 tp = dbgirq_ctx[i]; 442 tp = dbgirq_ctx[i];
449 tp->cpu = i; 443 tp->cpu = i;
450 tp->preempt_count = 0; 444 tp->preempt_count = 0;
451 445
452 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); 446 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
453 tp = mcheckirq_ctx[i]; 447 tp = mcheckirq_ctx[i];
454 tp->cpu = i; 448 tp->cpu = i;
455 tp->preempt_count = HARDIRQ_OFFSET; 449 tp->preempt_count = HARDIRQ_OFFSET;
456 #endif 450 #endif
457 } 451 }
458 } 452 }
459 #endif 453 #endif
460 454
461 #ifdef CONFIG_IRQSTACKS 455 #ifdef CONFIG_IRQSTACKS
462 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 456 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
463 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 457 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
464 458
465 void irq_ctx_init(void) 459 void irq_ctx_init(void)
466 { 460 {
467 struct thread_info *tp; 461 struct thread_info *tp;
468 int i; 462 int i;
469 463
470 for_each_possible_cpu(i) { 464 for_each_possible_cpu(i) {
471 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 465 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
472 tp = softirq_ctx[i]; 466 tp = softirq_ctx[i];
473 tp->cpu = i; 467 tp->cpu = i;
474 tp->preempt_count = 0; 468 tp->preempt_count = 0;
475 469
476 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 470 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
477 tp = hardirq_ctx[i]; 471 tp = hardirq_ctx[i];
478 tp->cpu = i; 472 tp->cpu = i;
479 tp->preempt_count = HARDIRQ_OFFSET; 473 tp->preempt_count = HARDIRQ_OFFSET;
480 } 474 }
481 } 475 }
482 476
483 static inline void do_softirq_onstack(void) 477 static inline void do_softirq_onstack(void)
484 { 478 {
485 struct thread_info *curtp, *irqtp; 479 struct thread_info *curtp, *irqtp;
486 unsigned long saved_sp_limit = current->thread.ksp_limit; 480 unsigned long saved_sp_limit = current->thread.ksp_limit;
487 481
488 curtp = current_thread_info(); 482 curtp = current_thread_info();
489 irqtp = softirq_ctx[smp_processor_id()]; 483 irqtp = softirq_ctx[smp_processor_id()];
490 irqtp->task = curtp->task; 484 irqtp->task = curtp->task;
491 current->thread.ksp_limit = (unsigned long)irqtp + 485 current->thread.ksp_limit = (unsigned long)irqtp +
492 _ALIGN_UP(sizeof(struct thread_info), 16); 486 _ALIGN_UP(sizeof(struct thread_info), 16);
493 call_do_softirq(irqtp); 487 call_do_softirq(irqtp);
494 current->thread.ksp_limit = saved_sp_limit; 488 current->thread.ksp_limit = saved_sp_limit;
495 irqtp->task = NULL; 489 irqtp->task = NULL;
496 } 490 }
497 491
498 #else 492 #else
499 #define do_softirq_onstack() __do_softirq() 493 #define do_softirq_onstack() __do_softirq()
500 #endif /* CONFIG_IRQSTACKS */ 494 #endif /* CONFIG_IRQSTACKS */
501 495
502 void do_softirq(void) 496 void do_softirq(void)
503 { 497 {
504 unsigned long flags; 498 unsigned long flags;
505 499
506 if (in_interrupt()) 500 if (in_interrupt())
507 return; 501 return;
508 502
509 local_irq_save(flags); 503 local_irq_save(flags);
510 504
511 if (local_softirq_pending()) 505 if (local_softirq_pending())
512 do_softirq_onstack(); 506 do_softirq_onstack();
513 507
514 local_irq_restore(flags); 508 local_irq_restore(flags);
515 } 509 }
516 510
517 511
518 /* 512 /*
519 * IRQ controller and virtual interrupts 513 * IRQ controller and virtual interrupts
520 */ 514 */
521 515
522 static LIST_HEAD(irq_hosts); 516 static LIST_HEAD(irq_hosts);
523 static DEFINE_RAW_SPINLOCK(irq_big_lock); 517 static DEFINE_RAW_SPINLOCK(irq_big_lock);
524 static unsigned int revmap_trees_allocated; 518 static unsigned int revmap_trees_allocated;
525 static DEFINE_MUTEX(revmap_trees_mutex); 519 static DEFINE_MUTEX(revmap_trees_mutex);
526 struct irq_map_entry irq_map[NR_IRQS]; 520 struct irq_map_entry irq_map[NR_IRQS];
527 static unsigned int irq_virq_count = NR_IRQS; 521 static unsigned int irq_virq_count = NR_IRQS;
528 static struct irq_host *irq_default_host; 522 static struct irq_host *irq_default_host;
529 523
530 irq_hw_number_t virq_to_hw(unsigned int virq) 524 irq_hw_number_t virq_to_hw(unsigned int virq)
531 { 525 {
532 return irq_map[virq].hwirq; 526 return irq_map[virq].hwirq;
533 } 527 }
534 EXPORT_SYMBOL_GPL(virq_to_hw); 528 EXPORT_SYMBOL_GPL(virq_to_hw);
535 529
536 static int default_irq_host_match(struct irq_host *h, struct device_node *np) 530 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
537 { 531 {
538 return h->of_node != NULL && h->of_node == np; 532 return h->of_node != NULL && h->of_node == np;
539 } 533 }
540 534
541 struct irq_host *irq_alloc_host(struct device_node *of_node, 535 struct irq_host *irq_alloc_host(struct device_node *of_node,
542 unsigned int revmap_type, 536 unsigned int revmap_type,
543 unsigned int revmap_arg, 537 unsigned int revmap_arg,
544 struct irq_host_ops *ops, 538 struct irq_host_ops *ops,
545 irq_hw_number_t inval_irq) 539 irq_hw_number_t inval_irq)
546 { 540 {
547 struct irq_host *host; 541 struct irq_host *host;
548 unsigned int size = sizeof(struct irq_host); 542 unsigned int size = sizeof(struct irq_host);
549 unsigned int i; 543 unsigned int i;
550 unsigned int *rmap; 544 unsigned int *rmap;
551 unsigned long flags; 545 unsigned long flags;
552 546
553 /* Allocate structure and revmap table if using linear mapping */ 547 /* Allocate structure and revmap table if using linear mapping */
554 if (revmap_type == IRQ_HOST_MAP_LINEAR) 548 if (revmap_type == IRQ_HOST_MAP_LINEAR)
555 size += revmap_arg * sizeof(unsigned int); 549 size += revmap_arg * sizeof(unsigned int);
556 host = zalloc_maybe_bootmem(size, GFP_KERNEL); 550 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
557 if (host == NULL) 551 if (host == NULL)
558 return NULL; 552 return NULL;
559 553
560 /* Fill structure */ 554 /* Fill structure */
561 host->revmap_type = revmap_type; 555 host->revmap_type = revmap_type;
562 host->inval_irq = inval_irq; 556 host->inval_irq = inval_irq;
563 host->ops = ops; 557 host->ops = ops;
564 host->of_node = of_node_get(of_node); 558 host->of_node = of_node_get(of_node);
565 559
566 if (host->ops->match == NULL) 560 if (host->ops->match == NULL)
567 host->ops->match = default_irq_host_match; 561 host->ops->match = default_irq_host_match;
568 562
569 raw_spin_lock_irqsave(&irq_big_lock, flags); 563 raw_spin_lock_irqsave(&irq_big_lock, flags);
570 564
571 /* If it's a legacy controller, check for duplicates and 565 /* If it's a legacy controller, check for duplicates and
572 * mark it as allocated (we use irq 0 host pointer for that 566 * mark it as allocated (we use irq 0 host pointer for that
573 */ 567 */
574 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 568 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
575 if (irq_map[0].host != NULL) { 569 if (irq_map[0].host != NULL) {
576 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 570 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
577 /* If we are early boot, we can't free the structure, 571 /* If we are early boot, we can't free the structure,
578 * too bad... 572 * too bad...
579 * this will be fixed once slab is made available early 573 * this will be fixed once slab is made available early
580 * instead of the current cruft 574 * instead of the current cruft
581 */ 575 */
582 if (mem_init_done) 576 if (mem_init_done)
583 kfree(host); 577 kfree(host);
584 return NULL; 578 return NULL;
585 } 579 }
586 irq_map[0].host = host; 580 irq_map[0].host = host;
587 } 581 }
588 582
589 list_add(&host->link, &irq_hosts); 583 list_add(&host->link, &irq_hosts);
590 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 584 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
591 585
592 /* Additional setups per revmap type */ 586 /* Additional setups per revmap type */
593 switch(revmap_type) { 587 switch(revmap_type) {
594 case IRQ_HOST_MAP_LEGACY: 588 case IRQ_HOST_MAP_LEGACY:
595 /* 0 is always the invalid number for legacy */ 589 /* 0 is always the invalid number for legacy */
596 host->inval_irq = 0; 590 host->inval_irq = 0;
597 /* setup us as the host for all legacy interrupts */ 591 /* setup us as the host for all legacy interrupts */
598 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 592 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
599 irq_map[i].hwirq = i; 593 irq_map[i].hwirq = i;
600 smp_wmb(); 594 smp_wmb();
601 irq_map[i].host = host; 595 irq_map[i].host = host;
602 smp_wmb(); 596 smp_wmb();
603 597
604 /* Clear norequest flags */ 598 /* Clear norequest flags */
605 irq_to_desc(i)->status &= ~IRQ_NOREQUEST; 599 irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
606 600
607 /* Legacy flags are left to default at this point, 601 /* Legacy flags are left to default at this point,
608 * one can then use irq_create_mapping() to 602 * one can then use irq_create_mapping() to
609 * explicitly change them 603 * explicitly change them
610 */ 604 */
611 ops->map(host, i, i); 605 ops->map(host, i, i);
612 } 606 }
613 break; 607 break;
614 case IRQ_HOST_MAP_LINEAR: 608 case IRQ_HOST_MAP_LINEAR:
615 rmap = (unsigned int *)(host + 1); 609 rmap = (unsigned int *)(host + 1);
616 for (i = 0; i < revmap_arg; i++) 610 for (i = 0; i < revmap_arg; i++)
617 rmap[i] = NO_IRQ; 611 rmap[i] = NO_IRQ;
618 host->revmap_data.linear.size = revmap_arg; 612 host->revmap_data.linear.size = revmap_arg;
619 smp_wmb(); 613 smp_wmb();
620 host->revmap_data.linear.revmap = rmap; 614 host->revmap_data.linear.revmap = rmap;
621 break; 615 break;
622 default: 616 default:
623 break; 617 break;
624 } 618 }
625 619
626 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); 620 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
627 621
628 return host; 622 return host;
629 } 623 }
630 624
631 struct irq_host *irq_find_host(struct device_node *node) 625 struct irq_host *irq_find_host(struct device_node *node)
632 { 626 {
633 struct irq_host *h, *found = NULL; 627 struct irq_host *h, *found = NULL;
634 unsigned long flags; 628 unsigned long flags;
635 629
636 /* We might want to match the legacy controller last since 630 /* We might want to match the legacy controller last since
637 * it might potentially be set to match all interrupts in 631 * it might potentially be set to match all interrupts in
638 * the absence of a device node. This isn't a problem so far 632 * the absence of a device node. This isn't a problem so far
639 * yet though... 633 * yet though...
640 */ 634 */
641 raw_spin_lock_irqsave(&irq_big_lock, flags); 635 raw_spin_lock_irqsave(&irq_big_lock, flags);
642 list_for_each_entry(h, &irq_hosts, link) 636 list_for_each_entry(h, &irq_hosts, link)
643 if (h->ops->match(h, node)) { 637 if (h->ops->match(h, node)) {
644 found = h; 638 found = h;
645 break; 639 break;
646 } 640 }
647 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 641 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
648 return found; 642 return found;
649 } 643 }
650 EXPORT_SYMBOL_GPL(irq_find_host); 644 EXPORT_SYMBOL_GPL(irq_find_host);
651 645
652 void irq_set_default_host(struct irq_host *host) 646 void irq_set_default_host(struct irq_host *host)
653 { 647 {
654 pr_debug("irq: Default host set to @0x%p\n", host); 648 pr_debug("irq: Default host set to @0x%p\n", host);
655 649
656 irq_default_host = host; 650 irq_default_host = host;
657 } 651 }
658 652
659 void irq_set_virq_count(unsigned int count) 653 void irq_set_virq_count(unsigned int count)
660 { 654 {
661 pr_debug("irq: Trying to set virq count to %d\n", count); 655 pr_debug("irq: Trying to set virq count to %d\n", count);
662 656
663 BUG_ON(count < NUM_ISA_INTERRUPTS); 657 BUG_ON(count < NUM_ISA_INTERRUPTS);
664 if (count < NR_IRQS) 658 if (count < NR_IRQS)
665 irq_virq_count = count; 659 irq_virq_count = count;
666 } 660 }
667 661
668 static int irq_setup_virq(struct irq_host *host, unsigned int virq, 662 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
669 irq_hw_number_t hwirq) 663 irq_hw_number_t hwirq)
670 { 664 {
671 struct irq_desc *desc; 665 struct irq_desc *desc;
672 666
673 desc = irq_to_desc_alloc_node(virq, 0); 667 desc = irq_to_desc_alloc_node(virq, 0);
674 if (!desc) { 668 if (!desc) {
675 pr_debug("irq: -> allocating desc failed\n"); 669 pr_debug("irq: -> allocating desc failed\n");
676 goto error; 670 goto error;
677 } 671 }
678 672
679 /* Clear IRQ_NOREQUEST flag */ 673 /* Clear IRQ_NOREQUEST flag */
680 desc->status &= ~IRQ_NOREQUEST; 674 desc->status &= ~IRQ_NOREQUEST;
681 675
682 /* map it */ 676 /* map it */
683 smp_wmb(); 677 smp_wmb();
684 irq_map[virq].hwirq = hwirq; 678 irq_map[virq].hwirq = hwirq;
685 smp_mb(); 679 smp_mb();
686 680
687 if (host->ops->map(host, virq, hwirq)) { 681 if (host->ops->map(host, virq, hwirq)) {
688 pr_debug("irq: -> mapping failed, freeing\n"); 682 pr_debug("irq: -> mapping failed, freeing\n");
689 goto error; 683 goto error;
690 } 684 }
691 685
692 return 0; 686 return 0;
693 687
694 error: 688 error:
695 irq_free_virt(virq, 1); 689 irq_free_virt(virq, 1);
696 return -1; 690 return -1;
697 } 691 }
698 692
699 unsigned int irq_create_direct_mapping(struct irq_host *host) 693 unsigned int irq_create_direct_mapping(struct irq_host *host)
700 { 694 {
701 unsigned int virq; 695 unsigned int virq;
702 696
703 if (host == NULL) 697 if (host == NULL)
704 host = irq_default_host; 698 host = irq_default_host;
705 699
706 BUG_ON(host == NULL); 700 BUG_ON(host == NULL);
707 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); 701 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
708 702
709 virq = irq_alloc_virt(host, 1, 0); 703 virq = irq_alloc_virt(host, 1, 0);
710 if (virq == NO_IRQ) { 704 if (virq == NO_IRQ) {
711 pr_debug("irq: create_direct virq allocation failed\n"); 705 pr_debug("irq: create_direct virq allocation failed\n");
712 return NO_IRQ; 706 return NO_IRQ;
713 } 707 }
714 708
715 pr_debug("irq: create_direct obtained virq %d\n", virq); 709 pr_debug("irq: create_direct obtained virq %d\n", virq);
716 710
717 if (irq_setup_virq(host, virq, virq)) 711 if (irq_setup_virq(host, virq, virq))
718 return NO_IRQ; 712 return NO_IRQ;
719 713
720 return virq; 714 return virq;
721 } 715 }
722 716
723 unsigned int irq_create_mapping(struct irq_host *host, 717 unsigned int irq_create_mapping(struct irq_host *host,
724 irq_hw_number_t hwirq) 718 irq_hw_number_t hwirq)
725 { 719 {
726 unsigned int virq, hint; 720 unsigned int virq, hint;
727 721
728 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); 722 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
729 723
730 /* Look for default host if nececssary */ 724 /* Look for default host if nececssary */
731 if (host == NULL) 725 if (host == NULL)
732 host = irq_default_host; 726 host = irq_default_host;
733 if (host == NULL) { 727 if (host == NULL) {
734 printk(KERN_WARNING "irq_create_mapping called for" 728 printk(KERN_WARNING "irq_create_mapping called for"
735 " NULL host, hwirq=%lx\n", hwirq); 729 " NULL host, hwirq=%lx\n", hwirq);
736 WARN_ON(1); 730 WARN_ON(1);
737 return NO_IRQ; 731 return NO_IRQ;
738 } 732 }
739 pr_debug("irq: -> using host @%p\n", host); 733 pr_debug("irq: -> using host @%p\n", host);
740 734
741 /* Check if mapping already exist, if it does, call 735 /* Check if mapping already exist, if it does, call
742 * host->ops->map() to update the flags 736 * host->ops->map() to update the flags
743 */ 737 */
744 virq = irq_find_mapping(host, hwirq); 738 virq = irq_find_mapping(host, hwirq);
745 if (virq != NO_IRQ) { 739 if (virq != NO_IRQ) {
746 if (host->ops->remap) 740 if (host->ops->remap)
747 host->ops->remap(host, virq, hwirq); 741 host->ops->remap(host, virq, hwirq);
748 pr_debug("irq: -> existing mapping on virq %d\n", virq); 742 pr_debug("irq: -> existing mapping on virq %d\n", virq);
749 return virq; 743 return virq;
750 } 744 }
751 745
752 /* Get a virtual interrupt number */ 746 /* Get a virtual interrupt number */
753 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 747 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
754 /* Handle legacy */ 748 /* Handle legacy */
755 virq = (unsigned int)hwirq; 749 virq = (unsigned int)hwirq;
756 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) 750 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
757 return NO_IRQ; 751 return NO_IRQ;
758 return virq; 752 return virq;
759 } else { 753 } else {
760 /* Allocate a virtual interrupt number */ 754 /* Allocate a virtual interrupt number */
761 hint = hwirq % irq_virq_count; 755 hint = hwirq % irq_virq_count;
762 virq = irq_alloc_virt(host, 1, hint); 756 virq = irq_alloc_virt(host, 1, hint);
763 if (virq == NO_IRQ) { 757 if (virq == NO_IRQ) {
764 pr_debug("irq: -> virq allocation failed\n"); 758 pr_debug("irq: -> virq allocation failed\n");
765 return NO_IRQ; 759 return NO_IRQ;
766 } 760 }
767 } 761 }
768 762
769 if (irq_setup_virq(host, virq, hwirq)) 763 if (irq_setup_virq(host, virq, hwirq))
770 return NO_IRQ; 764 return NO_IRQ;
771 765
772 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", 766 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
773 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 767 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
774 768
775 return virq; 769 return virq;
776 } 770 }
777 EXPORT_SYMBOL_GPL(irq_create_mapping); 771 EXPORT_SYMBOL_GPL(irq_create_mapping);
778 772
779 unsigned int irq_create_of_mapping(struct device_node *controller, 773 unsigned int irq_create_of_mapping(struct device_node *controller,
780 const u32 *intspec, unsigned int intsize) 774 const u32 *intspec, unsigned int intsize)
781 { 775 {
782 struct irq_host *host; 776 struct irq_host *host;
783 irq_hw_number_t hwirq; 777 irq_hw_number_t hwirq;
784 unsigned int type = IRQ_TYPE_NONE; 778 unsigned int type = IRQ_TYPE_NONE;
785 unsigned int virq; 779 unsigned int virq;
786 780
787 if (controller == NULL) 781 if (controller == NULL)
788 host = irq_default_host; 782 host = irq_default_host;
789 else 783 else
790 host = irq_find_host(controller); 784 host = irq_find_host(controller);
791 if (host == NULL) { 785 if (host == NULL) {
792 printk(KERN_WARNING "irq: no irq host found for %s !\n", 786 printk(KERN_WARNING "irq: no irq host found for %s !\n",
793 controller->full_name); 787 controller->full_name);
794 return NO_IRQ; 788 return NO_IRQ;
795 } 789 }
796 790
797 /* If host has no translation, then we assume interrupt line */ 791 /* If host has no translation, then we assume interrupt line */
798 if (host->ops->xlate == NULL) 792 if (host->ops->xlate == NULL)
799 hwirq = intspec[0]; 793 hwirq = intspec[0];
800 else { 794 else {
801 if (host->ops->xlate(host, controller, intspec, intsize, 795 if (host->ops->xlate(host, controller, intspec, intsize,
802 &hwirq, &type)) 796 &hwirq, &type))
803 return NO_IRQ; 797 return NO_IRQ;
804 } 798 }
805 799
806 /* Create mapping */ 800 /* Create mapping */
807 virq = irq_create_mapping(host, hwirq); 801 virq = irq_create_mapping(host, hwirq);
808 if (virq == NO_IRQ) 802 if (virq == NO_IRQ)
809 return virq; 803 return virq;
810 804
811 /* Set type if specified and different than the current one */ 805 /* Set type if specified and different than the current one */
812 if (type != IRQ_TYPE_NONE && 806 if (type != IRQ_TYPE_NONE &&
813 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) 807 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
814 set_irq_type(virq, type); 808 set_irq_type(virq, type);
815 return virq; 809 return virq;
816 } 810 }
817 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 811 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
818 812
819 unsigned int irq_of_parse_and_map(struct device_node *dev, int index) 813 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
820 { 814 {
821 struct of_irq oirq; 815 struct of_irq oirq;
822 816
823 if (of_irq_map_one(dev, index, &oirq)) 817 if (of_irq_map_one(dev, index, &oirq))
824 return NO_IRQ; 818 return NO_IRQ;
825 819
826 return irq_create_of_mapping(oirq.controller, oirq.specifier, 820 return irq_create_of_mapping(oirq.controller, oirq.specifier,
827 oirq.size); 821 oirq.size);
828 } 822 }
829 EXPORT_SYMBOL_GPL(irq_of_parse_and_map); 823 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
830 824
831 void irq_dispose_mapping(unsigned int virq) 825 void irq_dispose_mapping(unsigned int virq)
832 { 826 {
833 struct irq_host *host; 827 struct irq_host *host;
834 irq_hw_number_t hwirq; 828 irq_hw_number_t hwirq;
835 829
836 if (virq == NO_IRQ) 830 if (virq == NO_IRQ)
837 return; 831 return;
838 832
839 host = irq_map[virq].host; 833 host = irq_map[virq].host;
840 WARN_ON (host == NULL); 834 WARN_ON (host == NULL);
841 if (host == NULL) 835 if (host == NULL)
842 return; 836 return;
843 837
844 /* Never unmap legacy interrupts */ 838 /* Never unmap legacy interrupts */
845 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 839 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
846 return; 840 return;
847 841
848 /* remove chip and handler */ 842 /* remove chip and handler */
849 set_irq_chip_and_handler(virq, NULL, NULL); 843 set_irq_chip_and_handler(virq, NULL, NULL);
850 844
851 /* Make sure it's completed */ 845 /* Make sure it's completed */
852 synchronize_irq(virq); 846 synchronize_irq(virq);
853 847
854 /* Tell the PIC about it */ 848 /* Tell the PIC about it */
855 if (host->ops->unmap) 849 if (host->ops->unmap)
856 host->ops->unmap(host, virq); 850 host->ops->unmap(host, virq);
857 smp_mb(); 851 smp_mb();
858 852
859 /* Clear reverse map */ 853 /* Clear reverse map */
860 hwirq = irq_map[virq].hwirq; 854 hwirq = irq_map[virq].hwirq;
861 switch(host->revmap_type) { 855 switch(host->revmap_type) {
862 case IRQ_HOST_MAP_LINEAR: 856 case IRQ_HOST_MAP_LINEAR:
863 if (hwirq < host->revmap_data.linear.size) 857 if (hwirq < host->revmap_data.linear.size)
864 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 858 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
865 break; 859 break;
866 case IRQ_HOST_MAP_TREE: 860 case IRQ_HOST_MAP_TREE:
867 /* 861 /*
868 * Check if radix tree allocated yet, if not then nothing to 862 * Check if radix tree allocated yet, if not then nothing to
869 * remove. 863 * remove.
870 */ 864 */
871 smp_rmb(); 865 smp_rmb();
872 if (revmap_trees_allocated < 1) 866 if (revmap_trees_allocated < 1)
873 break; 867 break;
874 mutex_lock(&revmap_trees_mutex); 868 mutex_lock(&revmap_trees_mutex);
875 radix_tree_delete(&host->revmap_data.tree, hwirq); 869 radix_tree_delete(&host->revmap_data.tree, hwirq);
876 mutex_unlock(&revmap_trees_mutex); 870 mutex_unlock(&revmap_trees_mutex);
877 break; 871 break;
878 } 872 }
879 873
880 /* Destroy map */ 874 /* Destroy map */
881 smp_mb(); 875 smp_mb();
882 irq_map[virq].hwirq = host->inval_irq; 876 irq_map[virq].hwirq = host->inval_irq;
883 877
884 /* Set some flags */ 878 /* Set some flags */
885 irq_to_desc(virq)->status |= IRQ_NOREQUEST; 879 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
886 880
887 /* Free it */ 881 /* Free it */
888 irq_free_virt(virq, 1); 882 irq_free_virt(virq, 1);
889 } 883 }
890 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 884 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
891 885
892 unsigned int irq_find_mapping(struct irq_host *host, 886 unsigned int irq_find_mapping(struct irq_host *host,
893 irq_hw_number_t hwirq) 887 irq_hw_number_t hwirq)
894 { 888 {
895 unsigned int i; 889 unsigned int i;
896 unsigned int hint = hwirq % irq_virq_count; 890 unsigned int hint = hwirq % irq_virq_count;
897 891
898 /* Look for default host if nececssary */ 892 /* Look for default host if nececssary */
899 if (host == NULL) 893 if (host == NULL)
900 host = irq_default_host; 894 host = irq_default_host;
901 if (host == NULL) 895 if (host == NULL)
902 return NO_IRQ; 896 return NO_IRQ;
903 897
904 /* legacy -> bail early */ 898 /* legacy -> bail early */
905 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 899 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
906 return hwirq; 900 return hwirq;
907 901
908 /* Slow path does a linear search of the map */ 902 /* Slow path does a linear search of the map */
909 if (hint < NUM_ISA_INTERRUPTS) 903 if (hint < NUM_ISA_INTERRUPTS)
910 hint = NUM_ISA_INTERRUPTS; 904 hint = NUM_ISA_INTERRUPTS;
911 i = hint; 905 i = hint;
912 do { 906 do {
913 if (irq_map[i].host == host && 907 if (irq_map[i].host == host &&
914 irq_map[i].hwirq == hwirq) 908 irq_map[i].hwirq == hwirq)
915 return i; 909 return i;
916 i++; 910 i++;
917 if (i >= irq_virq_count) 911 if (i >= irq_virq_count)
918 i = NUM_ISA_INTERRUPTS; 912 i = NUM_ISA_INTERRUPTS;
919 } while(i != hint); 913 } while(i != hint);
920 return NO_IRQ; 914 return NO_IRQ;
921 } 915 }
922 EXPORT_SYMBOL_GPL(irq_find_mapping); 916 EXPORT_SYMBOL_GPL(irq_find_mapping);
923 917
924 918
925 unsigned int irq_radix_revmap_lookup(struct irq_host *host, 919 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
926 irq_hw_number_t hwirq) 920 irq_hw_number_t hwirq)
927 { 921 {
928 struct irq_map_entry *ptr; 922 struct irq_map_entry *ptr;
929 unsigned int virq; 923 unsigned int virq;
930 924
931 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 925 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
932 926
933 /* 927 /*
934 * Check if the radix tree exists and has bee initialized. 928 * Check if the radix tree exists and has bee initialized.
935 * If not, we fallback to slow mode 929 * If not, we fallback to slow mode
936 */ 930 */
937 if (revmap_trees_allocated < 2) 931 if (revmap_trees_allocated < 2)
938 return irq_find_mapping(host, hwirq); 932 return irq_find_mapping(host, hwirq);
939 933
940 /* Now try to resolve */ 934 /* Now try to resolve */
941 /* 935 /*
942 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 936 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
943 * as it's referencing an entry in the static irq_map table. 937 * as it's referencing an entry in the static irq_map table.
944 */ 938 */
945 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 939 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
946 940
947 /* 941 /*
948 * If found in radix tree, then fine. 942 * If found in radix tree, then fine.
949 * Else fallback to linear lookup - this should not happen in practice 943 * Else fallback to linear lookup - this should not happen in practice
950 * as it means that we failed to insert the node in the radix tree. 944 * as it means that we failed to insert the node in the radix tree.
951 */ 945 */
952 if (ptr) 946 if (ptr)
953 virq = ptr - irq_map; 947 virq = ptr - irq_map;
954 else 948 else
955 virq = irq_find_mapping(host, hwirq); 949 virq = irq_find_mapping(host, hwirq);
956 950
957 return virq; 951 return virq;
958 } 952 }
959 953
960 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 954 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
961 irq_hw_number_t hwirq) 955 irq_hw_number_t hwirq)
962 { 956 {
963 957
964 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 958 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
965 959
966 /* 960 /*
967 * Check if the radix tree exists yet. 961 * Check if the radix tree exists yet.
968 * If not, then the irq will be inserted into the tree when it gets 962 * If not, then the irq will be inserted into the tree when it gets
969 * initialized. 963 * initialized.
970 */ 964 */
971 smp_rmb(); 965 smp_rmb();
972 if (revmap_trees_allocated < 1) 966 if (revmap_trees_allocated < 1)
973 return; 967 return;
974 968
975 if (virq != NO_IRQ) { 969 if (virq != NO_IRQ) {
976 mutex_lock(&revmap_trees_mutex); 970 mutex_lock(&revmap_trees_mutex);
977 radix_tree_insert(&host->revmap_data.tree, hwirq, 971 radix_tree_insert(&host->revmap_data.tree, hwirq,
978 &irq_map[virq]); 972 &irq_map[virq]);
979 mutex_unlock(&revmap_trees_mutex); 973 mutex_unlock(&revmap_trees_mutex);
980 } 974 }
981 } 975 }
982 976
983 unsigned int irq_linear_revmap(struct irq_host *host, 977 unsigned int irq_linear_revmap(struct irq_host *host,
984 irq_hw_number_t hwirq) 978 irq_hw_number_t hwirq)
985 { 979 {
986 unsigned int *revmap; 980 unsigned int *revmap;
987 981
988 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 982 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
989 983
990 /* Check revmap bounds */ 984 /* Check revmap bounds */
991 if (unlikely(hwirq >= host->revmap_data.linear.size)) 985 if (unlikely(hwirq >= host->revmap_data.linear.size))
992 return irq_find_mapping(host, hwirq); 986 return irq_find_mapping(host, hwirq);
993 987
994 /* Check if revmap was allocated */ 988 /* Check if revmap was allocated */
995 revmap = host->revmap_data.linear.revmap; 989 revmap = host->revmap_data.linear.revmap;
996 if (unlikely(revmap == NULL)) 990 if (unlikely(revmap == NULL))
997 return irq_find_mapping(host, hwirq); 991 return irq_find_mapping(host, hwirq);
998 992
999 /* Fill up revmap with slow path if no mapping found */ 993 /* Fill up revmap with slow path if no mapping found */
1000 if (unlikely(revmap[hwirq] == NO_IRQ)) 994 if (unlikely(revmap[hwirq] == NO_IRQ))
1001 revmap[hwirq] = irq_find_mapping(host, hwirq); 995 revmap[hwirq] = irq_find_mapping(host, hwirq);
1002 996
1003 return revmap[hwirq]; 997 return revmap[hwirq];
1004 } 998 }
1005 999
1006 unsigned int irq_alloc_virt(struct irq_host *host, 1000 unsigned int irq_alloc_virt(struct irq_host *host,
1007 unsigned int count, 1001 unsigned int count,
1008 unsigned int hint) 1002 unsigned int hint)
1009 { 1003 {
1010 unsigned long flags; 1004 unsigned long flags;
1011 unsigned int i, j, found = NO_IRQ; 1005 unsigned int i, j, found = NO_IRQ;
1012 1006
1013 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 1007 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1014 return NO_IRQ; 1008 return NO_IRQ;
1015 1009
1016 raw_spin_lock_irqsave(&irq_big_lock, flags); 1010 raw_spin_lock_irqsave(&irq_big_lock, flags);
1017 1011
1018 /* Use hint for 1 interrupt if any */ 1012 /* Use hint for 1 interrupt if any */
1019 if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 1013 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1020 hint < irq_virq_count && irq_map[hint].host == NULL) { 1014 hint < irq_virq_count && irq_map[hint].host == NULL) {
1021 found = hint; 1015 found = hint;
1022 goto hint_found; 1016 goto hint_found;
1023 } 1017 }
1024 1018
1025 /* Look for count consecutive numbers in the allocatable 1019 /* Look for count consecutive numbers in the allocatable
1026 * (non-legacy) space 1020 * (non-legacy) space
1027 */ 1021 */
1028 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { 1022 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1029 if (irq_map[i].host != NULL) 1023 if (irq_map[i].host != NULL)
1030 j = 0; 1024 j = 0;
1031 else 1025 else
1032 j++; 1026 j++;
1033 1027
1034 if (j == count) { 1028 if (j == count) {
1035 found = i - count + 1; 1029 found = i - count + 1;
1036 break; 1030 break;
1037 } 1031 }
1038 } 1032 }
1039 if (found == NO_IRQ) { 1033 if (found == NO_IRQ) {
1040 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1034 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1041 return NO_IRQ; 1035 return NO_IRQ;
1042 } 1036 }
1043 hint_found: 1037 hint_found:
1044 for (i = found; i < (found + count); i++) { 1038 for (i = found; i < (found + count); i++) {
1045 irq_map[i].hwirq = host->inval_irq; 1039 irq_map[i].hwirq = host->inval_irq;
1046 smp_wmb(); 1040 smp_wmb();
1047 irq_map[i].host = host; 1041 irq_map[i].host = host;
1048 } 1042 }
1049 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1043 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1050 return found; 1044 return found;
1051 } 1045 }
1052 1046
1053 void irq_free_virt(unsigned int virq, unsigned int count) 1047 void irq_free_virt(unsigned int virq, unsigned int count)
1054 { 1048 {
1055 unsigned long flags; 1049 unsigned long flags;
1056 unsigned int i; 1050 unsigned int i;
1057 1051
1058 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1052 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1059 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1053 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1060 1054
1061 raw_spin_lock_irqsave(&irq_big_lock, flags); 1055 raw_spin_lock_irqsave(&irq_big_lock, flags);
1062 for (i = virq; i < (virq + count); i++) { 1056 for (i = virq; i < (virq + count); i++) {
1063 struct irq_host *host; 1057 struct irq_host *host;
1064 1058
1065 if (i < NUM_ISA_INTERRUPTS || 1059 if (i < NUM_ISA_INTERRUPTS ||
1066 (virq + count) > irq_virq_count) 1060 (virq + count) > irq_virq_count)
1067 continue; 1061 continue;
1068 1062
1069 host = irq_map[i].host; 1063 host = irq_map[i].host;
1070 irq_map[i].hwirq = host->inval_irq; 1064 irq_map[i].hwirq = host->inval_irq;
1071 smp_wmb(); 1065 smp_wmb();
1072 irq_map[i].host = NULL; 1066 irq_map[i].host = NULL;
1073 } 1067 }
1074 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1068 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1075 } 1069 }
1076 1070
1077 int arch_early_irq_init(void) 1071 int arch_early_irq_init(void)
1078 { 1072 {
1079 struct irq_desc *desc; 1073 struct irq_desc *desc;
1080 int i; 1074 int i;
1081 1075
1082 for (i = 0; i < NR_IRQS; i++) { 1076 for (i = 0; i < NR_IRQS; i++) {
1083 desc = irq_to_desc(i); 1077 desc = irq_to_desc(i);
1084 if (desc) 1078 if (desc)
1085 desc->status |= IRQ_NOREQUEST; 1079 desc->status |= IRQ_NOREQUEST;
1086 } 1080 }
1087 1081
1088 return 0; 1082 return 0;
1089 } 1083 }
1090 1084
1091 int arch_init_chip_data(struct irq_desc *desc, int node) 1085 int arch_init_chip_data(struct irq_desc *desc, int node)
1092 { 1086 {
1093 desc->status |= IRQ_NOREQUEST; 1087 desc->status |= IRQ_NOREQUEST;
1094 return 0; 1088 return 0;
1095 } 1089 }
1096 1090
1097 /* We need to create the radix trees late */ 1091 /* We need to create the radix trees late */
1098 static int irq_late_init(void) 1092 static int irq_late_init(void)
1099 { 1093 {
1100 struct irq_host *h; 1094 struct irq_host *h;
1101 unsigned int i; 1095 unsigned int i;
1102 1096
1103 /* 1097 /*
1104 * No mutual exclusion with respect to accessors of the tree is needed 1098 * No mutual exclusion with respect to accessors of the tree is needed
1105 * here as the synchronization is done via the state variable 1099 * here as the synchronization is done via the state variable
1106 * revmap_trees_allocated. 1100 * revmap_trees_allocated.
1107 */ 1101 */
1108 list_for_each_entry(h, &irq_hosts, link) { 1102 list_for_each_entry(h, &irq_hosts, link) {
1109 if (h->revmap_type == IRQ_HOST_MAP_TREE) 1103 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1110 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); 1104 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1111 } 1105 }
1112 1106
1113 /* 1107 /*
1114 * Make sure the radix trees inits are visible before setting 1108 * Make sure the radix trees inits are visible before setting
1115 * the flag 1109 * the flag
1116 */ 1110 */
1117 smp_wmb(); 1111 smp_wmb();
1118 revmap_trees_allocated = 1; 1112 revmap_trees_allocated = 1;
1119 1113
1120 /* 1114 /*
1121 * Insert the reverse mapping for those interrupts already present 1115 * Insert the reverse mapping for those interrupts already present
1122 * in irq_map[]. 1116 * in irq_map[].
1123 */ 1117 */
1124 mutex_lock(&revmap_trees_mutex); 1118 mutex_lock(&revmap_trees_mutex);
1125 for (i = 0; i < irq_virq_count; i++) { 1119 for (i = 0; i < irq_virq_count; i++) {
1126 if (irq_map[i].host && 1120 if (irq_map[i].host &&
1127 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) 1121 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1128 radix_tree_insert(&irq_map[i].host->revmap_data.tree, 1122 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1129 irq_map[i].hwirq, &irq_map[i]); 1123 irq_map[i].hwirq, &irq_map[i]);
1130 } 1124 }
1131 mutex_unlock(&revmap_trees_mutex); 1125 mutex_unlock(&revmap_trees_mutex);
1132 1126
1133 /* 1127 /*
1134 * Make sure the radix trees insertions are visible before setting 1128 * Make sure the radix trees insertions are visible before setting
1135 * the flag 1129 * the flag
1136 */ 1130 */
1137 smp_wmb(); 1131 smp_wmb();
1138 revmap_trees_allocated = 2; 1132 revmap_trees_allocated = 2;
1139 1133
1140 return 0; 1134 return 0;
1141 } 1135 }
1142 arch_initcall(irq_late_init); 1136 arch_initcall(irq_late_init);
1143 1137
1144 #ifdef CONFIG_VIRQ_DEBUG 1138 #ifdef CONFIG_VIRQ_DEBUG
1145 static int virq_debug_show(struct seq_file *m, void *private) 1139 static int virq_debug_show(struct seq_file *m, void *private)
1146 { 1140 {
1147 unsigned long flags; 1141 unsigned long flags;
1148 struct irq_desc *desc; 1142 struct irq_desc *desc;
1149 const char *p; 1143 const char *p;
1150 char none[] = "none"; 1144 char none[] = "none";
1151 int i; 1145 int i;
1152 1146
1153 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", 1147 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1154 "chip name", "host name"); 1148 "chip name", "host name");
1155 1149
1156 for (i = 1; i < nr_irqs; i++) { 1150 for (i = 1; i < nr_irqs; i++) {
1157 desc = irq_to_desc(i); 1151 desc = irq_to_desc(i);
1158 if (!desc) 1152 if (!desc)
1159 continue; 1153 continue;
1160 1154
1161 raw_spin_lock_irqsave(&desc->lock, flags); 1155 raw_spin_lock_irqsave(&desc->lock, flags);
1162 1156
1163 if (desc->action && desc->action->handler) { 1157 if (desc->action && desc->action->handler) {
1164 seq_printf(m, "%5d ", i); 1158 seq_printf(m, "%5d ", i);
1165 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1159 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1166 1160
1167 if (desc->chip && desc->chip->name) 1161 if (desc->chip && desc->chip->name)
1168 p = desc->chip->name; 1162 p = desc->chip->name;
1169 else 1163 else
1170 p = none; 1164 p = none;
1171 seq_printf(m, "%-15s ", p); 1165 seq_printf(m, "%-15s ", p);
1172 1166
1173 if (irq_map[i].host && irq_map[i].host->of_node) 1167 if (irq_map[i].host && irq_map[i].host->of_node)
1174 p = irq_map[i].host->of_node->full_name; 1168 p = irq_map[i].host->of_node->full_name;
1175 else 1169 else
1176 p = none; 1170 p = none;
1177 seq_printf(m, "%s\n", p); 1171 seq_printf(m, "%s\n", p);
1178 } 1172 }
1179 1173
1180 raw_spin_unlock_irqrestore(&desc->lock, flags); 1174 raw_spin_unlock_irqrestore(&desc->lock, flags);
1181 } 1175 }
1182 1176
1183 return 0; 1177 return 0;
1184 } 1178 }
1185 1179
1186 static int virq_debug_open(struct inode *inode, struct file *file) 1180 static int virq_debug_open(struct inode *inode, struct file *file)
1187 { 1181 {
1188 return single_open(file, virq_debug_show, inode->i_private); 1182 return single_open(file, virq_debug_show, inode->i_private);
1189 } 1183 }
1190 1184
1191 static const struct file_operations virq_debug_fops = { 1185 static const struct file_operations virq_debug_fops = {
1192 .open = virq_debug_open, 1186 .open = virq_debug_open,
1193 .read = seq_read, 1187 .read = seq_read,
1194 .llseek = seq_lseek, 1188 .llseek = seq_lseek,
1195 .release = single_release, 1189 .release = single_release,
1196 }; 1190 };
1197 1191
1198 static int __init irq_debugfs_init(void) 1192 static int __init irq_debugfs_init(void)
1199 { 1193 {
1200 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, 1194 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1201 NULL, &virq_debug_fops) == NULL) 1195 NULL, &virq_debug_fops) == NULL)
1202 return -ENOMEM; 1196 return -ENOMEM;
1203 1197
1204 return 0; 1198 return 0;
1205 } 1199 }
1206 __initcall(irq_debugfs_init); 1200 __initcall(irq_debugfs_init);
1207 #endif /* CONFIG_VIRQ_DEBUG */ 1201 #endif /* CONFIG_VIRQ_DEBUG */
1208 1202
1209 #ifdef CONFIG_PPC64 1203 #ifdef CONFIG_PPC64
1210 static int __init setup_noirqdistrib(char *str) 1204 static int __init setup_noirqdistrib(char *str)
1211 { 1205 {
1212 distribute_irqs = 0; 1206 distribute_irqs = 0;
1213 return 1; 1207 return 1;
1214 } 1208 }
1215 1209
1216 __setup("noirqdistrib", setup_noirqdistrib); 1210 __setup("noirqdistrib", setup_noirqdistrib);
1217 #endif /* CONFIG_PPC64 */ 1211 #endif /* CONFIG_PPC64 */
1218 1212
arch/powerpc/kernel/time.c
1 /* 1 /*
2 * Common time routines among all ppc machines. 2 * Common time routines among all ppc machines.
3 * 3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac. 5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 * 8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing 10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized 11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep 12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK). 13 * on SMP then, nap and doze are OK).
14 * 14 *
15 * Speeded up do_gettimeofday by getting rid of references to 15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 * 17 *
18 * TODO (not necessarily in this file): 18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency 19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase 20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.) 21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get 22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs 23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name. 24 * a new timestamp format and a good name.
25 * 25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills 27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 * 28 *
29 * This program is free software; you can redistribute it and/or 29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License 30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version 31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version. 32 * 2 of the License, or (at your option) any later version.
33 */ 33 */
34 34
35 #include <linux/errno.h> 35 #include <linux/errno.h>
36 #include <linux/module.h> 36 #include <linux/module.h>
37 #include <linux/sched.h> 37 #include <linux/sched.h>
38 #include <linux/kernel.h> 38 #include <linux/kernel.h>
39 #include <linux/param.h> 39 #include <linux/param.h>
40 #include <linux/string.h> 40 #include <linux/string.h>
41 #include <linux/mm.h> 41 #include <linux/mm.h>
42 #include <linux/interrupt.h> 42 #include <linux/interrupt.h>
43 #include <linux/timex.h> 43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h> 44 #include <linux/kernel_stat.h>
45 #include <linux/time.h> 45 #include <linux/time.h>
46 #include <linux/init.h> 46 #include <linux/init.h>
47 #include <linux/profile.h> 47 #include <linux/profile.h>
48 #include <linux/cpu.h> 48 #include <linux/cpu.h>
49 #include <linux/security.h> 49 #include <linux/security.h>
50 #include <linux/percpu.h> 50 #include <linux/percpu.h>
51 #include <linux/rtc.h> 51 #include <linux/rtc.h>
52 #include <linux/jiffies.h> 52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h> 53 #include <linux/posix-timers.h>
54 #include <linux/irq.h> 54 #include <linux/irq.h>
55 #include <linux/delay.h> 55 #include <linux/delay.h>
56 #include <linux/perf_event.h> 56 #include <linux/perf_event.h>
57 #include <asm/trace.h> 57 #include <asm/trace.h>
58 58
59 #include <asm/io.h> 59 #include <asm/io.h>
60 #include <asm/processor.h> 60 #include <asm/processor.h>
61 #include <asm/nvram.h> 61 #include <asm/nvram.h>
62 #include <asm/cache.h> 62 #include <asm/cache.h>
63 #include <asm/machdep.h> 63 #include <asm/machdep.h>
64 #include <asm/uaccess.h> 64 #include <asm/uaccess.h>
65 #include <asm/time.h> 65 #include <asm/time.h>
66 #include <asm/prom.h> 66 #include <asm/prom.h>
67 #include <asm/irq.h> 67 #include <asm/irq.h>
68 #include <asm/div64.h> 68 #include <asm/div64.h>
69 #include <asm/smp.h> 69 #include <asm/smp.h>
70 #include <asm/vdso_datapage.h> 70 #include <asm/vdso_datapage.h>
71 #include <asm/firmware.h> 71 #include <asm/firmware.h>
72 #include <asm/cputime.h> 72 #include <asm/cputime.h>
73 #ifdef CONFIG_PPC_ISERIES 73 #ifdef CONFIG_PPC_ISERIES
74 #include <asm/iseries/it_lp_queue.h> 74 #include <asm/iseries/it_lp_queue.h>
75 #include <asm/iseries/hv_call_xm.h> 75 #include <asm/iseries/hv_call_xm.h>
76 #endif 76 #endif
77 77
78 /* powerpc clocksource/clockevent code */ 78 /* powerpc clocksource/clockevent code */
79 79
80 #include <linux/clockchips.h> 80 #include <linux/clockchips.h>
81 #include <linux/clocksource.h> 81 #include <linux/clocksource.h>
82 82
83 static cycle_t rtc_read(struct clocksource *); 83 static cycle_t rtc_read(struct clocksource *);
84 static struct clocksource clocksource_rtc = { 84 static struct clocksource clocksource_rtc = {
85 .name = "rtc", 85 .name = "rtc",
86 .rating = 400, 86 .rating = 400,
87 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
88 .mask = CLOCKSOURCE_MASK(64), 88 .mask = CLOCKSOURCE_MASK(64),
89 .shift = 22, 89 .shift = 22,
90 .mult = 0, /* To be filled in */ 90 .mult = 0, /* To be filled in */
91 .read = rtc_read, 91 .read = rtc_read,
92 }; 92 };
93 93
94 static cycle_t timebase_read(struct clocksource *); 94 static cycle_t timebase_read(struct clocksource *);
95 static struct clocksource clocksource_timebase = { 95 static struct clocksource clocksource_timebase = {
96 .name = "timebase", 96 .name = "timebase",
97 .rating = 400, 97 .rating = 400,
98 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 98 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
99 .mask = CLOCKSOURCE_MASK(64), 99 .mask = CLOCKSOURCE_MASK(64),
100 .shift = 22, 100 .shift = 22,
101 .mult = 0, /* To be filled in */ 101 .mult = 0, /* To be filled in */
102 .read = timebase_read, 102 .read = timebase_read,
103 }; 103 };
104 104
105 #define DECREMENTER_MAX 0x7fffffff 105 #define DECREMENTER_MAX 0x7fffffff
106 106
107 static int decrementer_set_next_event(unsigned long evt, 107 static int decrementer_set_next_event(unsigned long evt,
108 struct clock_event_device *dev); 108 struct clock_event_device *dev);
109 static void decrementer_set_mode(enum clock_event_mode mode, 109 static void decrementer_set_mode(enum clock_event_mode mode,
110 struct clock_event_device *dev); 110 struct clock_event_device *dev);
111 111
112 static struct clock_event_device decrementer_clockevent = { 112 static struct clock_event_device decrementer_clockevent = {
113 .name = "decrementer", 113 .name = "decrementer",
114 .rating = 200, 114 .rating = 200,
115 .shift = 0, /* To be filled in */ 115 .shift = 0, /* To be filled in */
116 .mult = 0, /* To be filled in */ 116 .mult = 0, /* To be filled in */
117 .irq = 0, 117 .irq = 0,
118 .set_next_event = decrementer_set_next_event, 118 .set_next_event = decrementer_set_next_event,
119 .set_mode = decrementer_set_mode, 119 .set_mode = decrementer_set_mode,
120 .features = CLOCK_EVT_FEAT_ONESHOT, 120 .features = CLOCK_EVT_FEAT_ONESHOT,
121 }; 121 };
122 122
123 struct decrementer_clock { 123 struct decrementer_clock {
124 struct clock_event_device event; 124 struct clock_event_device event;
125 u64 next_tb; 125 u64 next_tb;
126 }; 126 };
127 127
128 static DEFINE_PER_CPU(struct decrementer_clock, decrementers); 128 static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
129 129
130 #ifdef CONFIG_PPC_ISERIES 130 #ifdef CONFIG_PPC_ISERIES
131 static unsigned long __initdata iSeries_recal_titan; 131 static unsigned long __initdata iSeries_recal_titan;
132 static signed long __initdata iSeries_recal_tb; 132 static signed long __initdata iSeries_recal_tb;
133 133
134 /* Forward declaration is only needed for iSereis compiles */ 134 /* Forward declaration is only needed for iSereis compiles */
135 static void __init clocksource_init(void); 135 static void __init clocksource_init(void);
136 #endif 136 #endif
137 137
138 #define XSEC_PER_SEC (1024*1024) 138 #define XSEC_PER_SEC (1024*1024)
139 139
140 #ifdef CONFIG_PPC64 140 #ifdef CONFIG_PPC64
141 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 141 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
142 #else 142 #else
143 /* compute ((xsec << 12) * max) >> 32 */ 143 /* compute ((xsec << 12) * max) >> 32 */
144 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 144 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
145 #endif 145 #endif
146 146
147 unsigned long tb_ticks_per_jiffy; 147 unsigned long tb_ticks_per_jiffy;
148 unsigned long tb_ticks_per_usec = 100; /* sane default */ 148 unsigned long tb_ticks_per_usec = 100; /* sane default */
149 EXPORT_SYMBOL(tb_ticks_per_usec); 149 EXPORT_SYMBOL(tb_ticks_per_usec);
150 unsigned long tb_ticks_per_sec; 150 unsigned long tb_ticks_per_sec;
151 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 151 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
152 u64 tb_to_xs; 152 u64 tb_to_xs;
153 unsigned tb_to_us; 153 unsigned tb_to_us;
154 154
155 #define TICKLEN_SCALE NTP_SCALE_SHIFT 155 #define TICKLEN_SCALE NTP_SCALE_SHIFT
156 static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 156 static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
157 static u64 ticklen_to_xs; /* 0.64 fraction */ 157 static u64 ticklen_to_xs; /* 0.64 fraction */
158 158
159 /* If last_tick_len corresponds to about 1/HZ seconds, then 159 /* If last_tick_len corresponds to about 1/HZ seconds, then
160 last_tick_len << TICKLEN_SHIFT will be about 2^63. */ 160 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
161 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) 161 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
162 162
163 DEFINE_SPINLOCK(rtc_lock); 163 DEFINE_SPINLOCK(rtc_lock);
164 EXPORT_SYMBOL_GPL(rtc_lock); 164 EXPORT_SYMBOL_GPL(rtc_lock);
165 165
166 static u64 tb_to_ns_scale __read_mostly; 166 static u64 tb_to_ns_scale __read_mostly;
167 static unsigned tb_to_ns_shift __read_mostly; 167 static unsigned tb_to_ns_shift __read_mostly;
168 static unsigned long boot_tb __read_mostly; 168 static unsigned long boot_tb __read_mostly;
169 169
170 extern struct timezone sys_tz; 170 extern struct timezone sys_tz;
171 static long timezone_offset; 171 static long timezone_offset;
172 172
173 unsigned long ppc_proc_freq; 173 unsigned long ppc_proc_freq;
174 EXPORT_SYMBOL(ppc_proc_freq); 174 EXPORT_SYMBOL(ppc_proc_freq);
175 unsigned long ppc_tb_freq; 175 unsigned long ppc_tb_freq;
176 176
177 static u64 tb_last_jiffy __cacheline_aligned_in_smp; 177 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
178 static DEFINE_PER_CPU(u64, last_jiffy); 178 static DEFINE_PER_CPU(u64, last_jiffy);
179 179
180 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 180 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
181 /* 181 /*
182 * Factors for converting from cputime_t (timebase ticks) to 182 * Factors for converting from cputime_t (timebase ticks) to
183 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). 183 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
184 * These are all stored as 0.64 fixed-point binary fractions. 184 * These are all stored as 0.64 fixed-point binary fractions.
185 */ 185 */
186 u64 __cputime_jiffies_factor; 186 u64 __cputime_jiffies_factor;
187 EXPORT_SYMBOL(__cputime_jiffies_factor); 187 EXPORT_SYMBOL(__cputime_jiffies_factor);
188 u64 __cputime_msec_factor; 188 u64 __cputime_msec_factor;
189 EXPORT_SYMBOL(__cputime_msec_factor); 189 EXPORT_SYMBOL(__cputime_msec_factor);
190 u64 __cputime_sec_factor; 190 u64 __cputime_sec_factor;
191 EXPORT_SYMBOL(__cputime_sec_factor); 191 EXPORT_SYMBOL(__cputime_sec_factor);
192 u64 __cputime_clockt_factor; 192 u64 __cputime_clockt_factor;
193 EXPORT_SYMBOL(__cputime_clockt_factor); 193 EXPORT_SYMBOL(__cputime_clockt_factor);
194 DEFINE_PER_CPU(unsigned long, cputime_last_delta); 194 DEFINE_PER_CPU(unsigned long, cputime_last_delta);
195 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); 195 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
196 196
197 cputime_t cputime_one_jiffy; 197 cputime_t cputime_one_jiffy;
198 198
199 static void calc_cputime_factors(void) 199 static void calc_cputime_factors(void)
200 { 200 {
201 struct div_result res; 201 struct div_result res;
202 202
203 div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 203 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
204 __cputime_jiffies_factor = res.result_low; 204 __cputime_jiffies_factor = res.result_low;
205 div128_by_32(1000, 0, tb_ticks_per_sec, &res); 205 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
206 __cputime_msec_factor = res.result_low; 206 __cputime_msec_factor = res.result_low;
207 div128_by_32(1, 0, tb_ticks_per_sec, &res); 207 div128_by_32(1, 0, tb_ticks_per_sec, &res);
208 __cputime_sec_factor = res.result_low; 208 __cputime_sec_factor = res.result_low;
209 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 209 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
210 __cputime_clockt_factor = res.result_low; 210 __cputime_clockt_factor = res.result_low;
211 } 211 }
212 212
213 /* 213 /*
214 * Read the PURR on systems that have it, otherwise the timebase. 214 * Read the PURR on systems that have it, otherwise the timebase.
215 */ 215 */
216 static u64 read_purr(void) 216 static u64 read_purr(void)
217 { 217 {
218 if (cpu_has_feature(CPU_FTR_PURR)) 218 if (cpu_has_feature(CPU_FTR_PURR))
219 return mfspr(SPRN_PURR); 219 return mfspr(SPRN_PURR);
220 return mftb(); 220 return mftb();
221 } 221 }
222 222
223 /* 223 /*
224 * Read the SPURR on systems that have it, otherwise the purr 224 * Read the SPURR on systems that have it, otherwise the purr
225 */ 225 */
226 static u64 read_spurr(u64 purr) 226 static u64 read_spurr(u64 purr)
227 { 227 {
228 /* 228 /*
229 * cpus without PURR won't have a SPURR 229 * cpus without PURR won't have a SPURR
230 * We already know the former when we use this, so tell gcc 230 * We already know the former when we use this, so tell gcc
231 */ 231 */
232 if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) 232 if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
233 return mfspr(SPRN_SPURR); 233 return mfspr(SPRN_SPURR);
234 return purr; 234 return purr;
235 } 235 }
236 236
237 /* 237 /*
238 * Account time for a transition between system, hard irq 238 * Account time for a transition between system, hard irq
239 * or soft irq state. 239 * or soft irq state.
240 */ 240 */
241 void account_system_vtime(struct task_struct *tsk) 241 void account_system_vtime(struct task_struct *tsk)
242 { 242 {
243 u64 now, nowscaled, delta, deltascaled, sys_time; 243 u64 now, nowscaled, delta, deltascaled, sys_time;
244 unsigned long flags; 244 unsigned long flags;
245 245
246 local_irq_save(flags); 246 local_irq_save(flags);
247 now = read_purr(); 247 now = read_purr();
248 nowscaled = read_spurr(now); 248 nowscaled = read_spurr(now);
249 delta = now - get_paca()->startpurr; 249 delta = now - get_paca()->startpurr;
250 deltascaled = nowscaled - get_paca()->startspurr; 250 deltascaled = nowscaled - get_paca()->startspurr;
251 get_paca()->startpurr = now; 251 get_paca()->startpurr = now;
252 get_paca()->startspurr = nowscaled; 252 get_paca()->startspurr = nowscaled;
253 if (!in_interrupt()) { 253 if (!in_interrupt()) {
254 /* deltascaled includes both user and system time. 254 /* deltascaled includes both user and system time.
255 * Hence scale it based on the purr ratio to estimate 255 * Hence scale it based on the purr ratio to estimate
256 * the system time */ 256 * the system time */
257 sys_time = get_paca()->system_time; 257 sys_time = get_paca()->system_time;
258 if (get_paca()->user_time) 258 if (get_paca()->user_time)
259 deltascaled = deltascaled * sys_time / 259 deltascaled = deltascaled * sys_time /
260 (sys_time + get_paca()->user_time); 260 (sys_time + get_paca()->user_time);
261 delta += sys_time; 261 delta += sys_time;
262 get_paca()->system_time = 0; 262 get_paca()->system_time = 0;
263 } 263 }
264 if (in_irq() || idle_task(smp_processor_id()) != tsk) 264 if (in_irq() || idle_task(smp_processor_id()) != tsk)
265 account_system_time(tsk, 0, delta, deltascaled); 265 account_system_time(tsk, 0, delta, deltascaled);
266 else 266 else
267 account_idle_time(delta); 267 account_idle_time(delta);
268 __get_cpu_var(cputime_last_delta) = delta; 268 __get_cpu_var(cputime_last_delta) = delta;
269 __get_cpu_var(cputime_scaled_last_delta) = deltascaled; 269 __get_cpu_var(cputime_scaled_last_delta) = deltascaled;
270 local_irq_restore(flags); 270 local_irq_restore(flags);
271 } 271 }
272 EXPORT_SYMBOL_GPL(account_system_vtime); 272 EXPORT_SYMBOL_GPL(account_system_vtime);
273 273
274 /* 274 /*
275 * Transfer the user and system times accumulated in the paca 275 * Transfer the user and system times accumulated in the paca
276 * by the exception entry and exit code to the generic process 276 * by the exception entry and exit code to the generic process
277 * user and system time records. 277 * user and system time records.
278 * Must be called with interrupts disabled. 278 * Must be called with interrupts disabled.
279 */ 279 */
280 void account_process_tick(struct task_struct *tsk, int user_tick) 280 void account_process_tick(struct task_struct *tsk, int user_tick)
281 { 281 {
282 cputime_t utime, utimescaled; 282 cputime_t utime, utimescaled;
283 283
284 utime = get_paca()->user_time; 284 utime = get_paca()->user_time;
285 get_paca()->user_time = 0; 285 get_paca()->user_time = 0;
286 utimescaled = cputime_to_scaled(utime); 286 utimescaled = cputime_to_scaled(utime);
287 account_user_time(tsk, utime, utimescaled); 287 account_user_time(tsk, utime, utimescaled);
288 } 288 }
289 289
290 /* 290 /*
291 * Stuff for accounting stolen time. 291 * Stuff for accounting stolen time.
292 */ 292 */
293 struct cpu_purr_data { 293 struct cpu_purr_data {
294 int initialized; /* thread is running */ 294 int initialized; /* thread is running */
295 u64 tb; /* last TB value read */ 295 u64 tb; /* last TB value read */
296 u64 purr; /* last PURR value read */ 296 u64 purr; /* last PURR value read */
297 u64 spurr; /* last SPURR value read */ 297 u64 spurr; /* last SPURR value read */
298 }; 298 };
299 299
300 /* 300 /*
301 * Each entry in the cpu_purr_data array is manipulated only by its 301 * Each entry in the cpu_purr_data array is manipulated only by its
302 * "owner" cpu -- usually in the timer interrupt but also occasionally 302 * "owner" cpu -- usually in the timer interrupt but also occasionally
303 * in process context for cpu online. As long as cpus do not touch 303 * in process context for cpu online. As long as cpus do not touch
304 * each others' cpu_purr_data, disabling local interrupts is 304 * each others' cpu_purr_data, disabling local interrupts is
305 * sufficient to serialize accesses. 305 * sufficient to serialize accesses.
306 */ 306 */
307 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); 307 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
308 308
309 static void snapshot_tb_and_purr(void *data) 309 static void snapshot_tb_and_purr(void *data)
310 { 310 {
311 unsigned long flags; 311 unsigned long flags;
312 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 312 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
313 313
314 local_irq_save(flags); 314 local_irq_save(flags);
315 p->tb = get_tb_or_rtc(); 315 p->tb = get_tb_or_rtc();
316 p->purr = mfspr(SPRN_PURR); 316 p->purr = mfspr(SPRN_PURR);
317 wmb(); 317 wmb();
318 p->initialized = 1; 318 p->initialized = 1;
319 local_irq_restore(flags); 319 local_irq_restore(flags);
320 } 320 }
321 321
322 /* 322 /*
323 * Called during boot when all cpus have come up. 323 * Called during boot when all cpus have come up.
324 */ 324 */
325 void snapshot_timebases(void) 325 void snapshot_timebases(void)
326 { 326 {
327 if (!cpu_has_feature(CPU_FTR_PURR)) 327 if (!cpu_has_feature(CPU_FTR_PURR))
328 return; 328 return;
329 on_each_cpu(snapshot_tb_and_purr, NULL, 1); 329 on_each_cpu(snapshot_tb_and_purr, NULL, 1);
330 } 330 }
331 331
332 /* 332 /*
333 * Must be called with interrupts disabled. 333 * Must be called with interrupts disabled.
334 */ 334 */
335 void calculate_steal_time(void) 335 void calculate_steal_time(void)
336 { 336 {
337 u64 tb, purr; 337 u64 tb, purr;
338 s64 stolen; 338 s64 stolen;
339 struct cpu_purr_data *pme; 339 struct cpu_purr_data *pme;
340 340
341 pme = &__get_cpu_var(cpu_purr_data); 341 pme = &__get_cpu_var(cpu_purr_data);
342 if (!pme->initialized) 342 if (!pme->initialized)
343 return; /* !CPU_FTR_PURR or early in early boot */ 343 return; /* !CPU_FTR_PURR or early in early boot */
344 tb = mftb(); 344 tb = mftb();
345 purr = mfspr(SPRN_PURR); 345 purr = mfspr(SPRN_PURR);
346 stolen = (tb - pme->tb) - (purr - pme->purr); 346 stolen = (tb - pme->tb) - (purr - pme->purr);
347 if (stolen > 0) { 347 if (stolen > 0) {
348 if (idle_task(smp_processor_id()) != current) 348 if (idle_task(smp_processor_id()) != current)
349 account_steal_time(stolen); 349 account_steal_time(stolen);
350 else 350 else
351 account_idle_time(stolen); 351 account_idle_time(stolen);
352 } 352 }
353 pme->tb = tb; 353 pme->tb = tb;
354 pme->purr = purr; 354 pme->purr = purr;
355 } 355 }
356 356
357 #ifdef CONFIG_PPC_SPLPAR 357 #ifdef CONFIG_PPC_SPLPAR
358 /* 358 /*
359 * Must be called before the cpu is added to the online map when 359 * Must be called before the cpu is added to the online map when
360 * a cpu is being brought up at runtime. 360 * a cpu is being brought up at runtime.
361 */ 361 */
362 static void snapshot_purr(void) 362 static void snapshot_purr(void)
363 { 363 {
364 struct cpu_purr_data *pme; 364 struct cpu_purr_data *pme;
365 unsigned long flags; 365 unsigned long flags;
366 366
367 if (!cpu_has_feature(CPU_FTR_PURR)) 367 if (!cpu_has_feature(CPU_FTR_PURR))
368 return; 368 return;
369 local_irq_save(flags); 369 local_irq_save(flags);
370 pme = &__get_cpu_var(cpu_purr_data); 370 pme = &__get_cpu_var(cpu_purr_data);
371 pme->tb = mftb(); 371 pme->tb = mftb();
372 pme->purr = mfspr(SPRN_PURR); 372 pme->purr = mfspr(SPRN_PURR);
373 pme->initialized = 1; 373 pme->initialized = 1;
374 local_irq_restore(flags); 374 local_irq_restore(flags);
375 } 375 }
376 376
377 #endif /* CONFIG_PPC_SPLPAR */ 377 #endif /* CONFIG_PPC_SPLPAR */
378 378
379 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 379 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
380 #define calc_cputime_factors() 380 #define calc_cputime_factors()
381 #define calculate_steal_time() do { } while (0) 381 #define calculate_steal_time() do { } while (0)
382 #endif 382 #endif
383 383
384 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) 384 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
385 #define snapshot_purr() do { } while (0) 385 #define snapshot_purr() do { } while (0)
386 #endif 386 #endif
387 387
388 /* 388 /*
389 * Called when a cpu comes up after the system has finished booting, 389 * Called when a cpu comes up after the system has finished booting,
390 * i.e. as a result of a hotplug cpu action. 390 * i.e. as a result of a hotplug cpu action.
391 */ 391 */
392 void snapshot_timebase(void) 392 void snapshot_timebase(void)
393 { 393 {
394 __get_cpu_var(last_jiffy) = get_tb_or_rtc(); 394 __get_cpu_var(last_jiffy) = get_tb_or_rtc();
395 snapshot_purr(); 395 snapshot_purr();
396 } 396 }
397 397
398 void __delay(unsigned long loops) 398 void __delay(unsigned long loops)
399 { 399 {
400 unsigned long start; 400 unsigned long start;
401 int diff; 401 int diff;
402 402
403 if (__USE_RTC()) { 403 if (__USE_RTC()) {
404 start = get_rtcl(); 404 start = get_rtcl();
405 do { 405 do {
406 /* the RTCL register wraps at 1000000000 */ 406 /* the RTCL register wraps at 1000000000 */
407 diff = get_rtcl() - start; 407 diff = get_rtcl() - start;
408 if (diff < 0) 408 if (diff < 0)
409 diff += 1000000000; 409 diff += 1000000000;
410 } while (diff < loops); 410 } while (diff < loops);
411 } else { 411 } else {
412 start = get_tbl(); 412 start = get_tbl();
413 while (get_tbl() - start < loops) 413 while (get_tbl() - start < loops)
414 HMT_low(); 414 HMT_low();
415 HMT_medium(); 415 HMT_medium();
416 } 416 }
417 } 417 }
418 EXPORT_SYMBOL(__delay); 418 EXPORT_SYMBOL(__delay);
419 419
420 void udelay(unsigned long usecs) 420 void udelay(unsigned long usecs)
421 { 421 {
422 __delay(tb_ticks_per_usec * usecs); 422 __delay(tb_ticks_per_usec * usecs);
423 } 423 }
424 EXPORT_SYMBOL(udelay); 424 EXPORT_SYMBOL(udelay);
425 425
426 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 426 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
427 u64 new_tb_to_xs) 427 u64 new_tb_to_xs)
428 { 428 {
429 /* 429 /*
430 * tb_update_count is used to allow the userspace gettimeofday code 430 * tb_update_count is used to allow the userspace gettimeofday code
431 * to assure itself that it sees a consistent view of the tb_to_xs and 431 * to assure itself that it sees a consistent view of the tb_to_xs and
432 * stamp_xsec variables. It reads the tb_update_count, then reads 432 * stamp_xsec variables. It reads the tb_update_count, then reads
433 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 433 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
434 * the two values of tb_update_count match and are even then the 434 * the two values of tb_update_count match and are even then the
435 * tb_to_xs and stamp_xsec values are consistent. If not, then it 435 * tb_to_xs and stamp_xsec values are consistent. If not, then it
436 * loops back and reads them again until this criteria is met. 436 * loops back and reads them again until this criteria is met.
437 * We expect the caller to have done the first increment of 437 * We expect the caller to have done the first increment of
438 * vdso_data->tb_update_count already. 438 * vdso_data->tb_update_count already.
439 */ 439 */
440 vdso_data->tb_orig_stamp = new_tb_stamp; 440 vdso_data->tb_orig_stamp = new_tb_stamp;
441 vdso_data->stamp_xsec = new_stamp_xsec; 441 vdso_data->stamp_xsec = new_stamp_xsec;
442 vdso_data->tb_to_xs = new_tb_to_xs; 442 vdso_data->tb_to_xs = new_tb_to_xs;
443 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 443 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
444 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 444 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
445 vdso_data->stamp_xtime = xtime; 445 vdso_data->stamp_xtime = xtime;
446 smp_wmb(); 446 smp_wmb();
447 ++(vdso_data->tb_update_count); 447 ++(vdso_data->tb_update_count);
448 } 448 }
449 449
450 #ifdef CONFIG_SMP 450 #ifdef CONFIG_SMP
451 unsigned long profile_pc(struct pt_regs *regs) 451 unsigned long profile_pc(struct pt_regs *regs)
452 { 452 {
453 unsigned long pc = instruction_pointer(regs); 453 unsigned long pc = instruction_pointer(regs);
454 454
455 if (in_lock_functions(pc)) 455 if (in_lock_functions(pc))
456 return regs->link; 456 return regs->link;
457 457
458 return pc; 458 return pc;
459 } 459 }
460 EXPORT_SYMBOL(profile_pc); 460 EXPORT_SYMBOL(profile_pc);
461 #endif 461 #endif
462 462
463 #ifdef CONFIG_PPC_ISERIES 463 #ifdef CONFIG_PPC_ISERIES
464 464
465 /* 465 /*
466 * This function recalibrates the timebase based on the 49-bit time-of-day 466 * This function recalibrates the timebase based on the 49-bit time-of-day
467 * value in the Titan chip. The Titan is much more accurate than the value 467 * value in the Titan chip. The Titan is much more accurate than the value
468 * returned by the service processor for the timebase frequency. 468 * returned by the service processor for the timebase frequency.
469 */ 469 */
470 470
471 static int __init iSeries_tb_recal(void) 471 static int __init iSeries_tb_recal(void)
472 { 472 {
473 struct div_result divres; 473 struct div_result divres;
474 unsigned long titan, tb; 474 unsigned long titan, tb;
475 475
476 /* Make sure we only run on iSeries */ 476 /* Make sure we only run on iSeries */
477 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 477 if (!firmware_has_feature(FW_FEATURE_ISERIES))
478 return -ENODEV; 478 return -ENODEV;
479 479
480 tb = get_tb(); 480 tb = get_tb();
481 titan = HvCallXm_loadTod(); 481 titan = HvCallXm_loadTod();
482 if ( iSeries_recal_titan ) { 482 if ( iSeries_recal_titan ) {
483 unsigned long tb_ticks = tb - iSeries_recal_tb; 483 unsigned long tb_ticks = tb - iSeries_recal_tb;
484 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 484 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
485 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 485 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
486 unsigned long new_tb_ticks_per_jiffy = 486 unsigned long new_tb_ticks_per_jiffy =
487 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ); 487 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
488 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 488 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
489 char sign = '+'; 489 char sign = '+';
490 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 490 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
491 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 491 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
492 492
493 if ( tick_diff < 0 ) { 493 if ( tick_diff < 0 ) {
494 tick_diff = -tick_diff; 494 tick_diff = -tick_diff;
495 sign = '-'; 495 sign = '-';
496 } 496 }
497 if ( tick_diff ) { 497 if ( tick_diff ) {
498 if ( tick_diff < tb_ticks_per_jiffy/25 ) { 498 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
499 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 499 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
500 new_tb_ticks_per_jiffy, sign, tick_diff ); 500 new_tb_ticks_per_jiffy, sign, tick_diff );
501 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 501 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
502 tb_ticks_per_sec = new_tb_ticks_per_sec; 502 tb_ticks_per_sec = new_tb_ticks_per_sec;
503 calc_cputime_factors(); 503 calc_cputime_factors();
504 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 504 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
505 tb_to_xs = divres.result_low; 505 tb_to_xs = divres.result_low;
506 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 506 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
507 vdso_data->tb_to_xs = tb_to_xs; 507 vdso_data->tb_to_xs = tb_to_xs;
508 setup_cputime_one_jiffy(); 508 setup_cputime_one_jiffy();
509 } 509 }
510 else { 510 else {
511 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 511 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
512 " new tb_ticks_per_jiffy = %lu\n" 512 " new tb_ticks_per_jiffy = %lu\n"
513 " old tb_ticks_per_jiffy = %lu\n", 513 " old tb_ticks_per_jiffy = %lu\n",
514 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 514 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
515 } 515 }
516 } 516 }
517 } 517 }
518 iSeries_recal_titan = titan; 518 iSeries_recal_titan = titan;
519 iSeries_recal_tb = tb; 519 iSeries_recal_tb = tb;
520 520
521 /* Called here as now we know accurate values for the timebase */ 521 /* Called here as now we know accurate values for the timebase */
522 clocksource_init(); 522 clocksource_init();
523 return 0; 523 return 0;
524 } 524 }
525 late_initcall(iSeries_tb_recal); 525 late_initcall(iSeries_tb_recal);
526 526
527 /* Called from platform early init */ 527 /* Called from platform early init */
528 void __init iSeries_time_init_early(void) 528 void __init iSeries_time_init_early(void)
529 { 529 {
530 iSeries_recal_tb = get_tb(); 530 iSeries_recal_tb = get_tb();
531 iSeries_recal_titan = HvCallXm_loadTod(); 531 iSeries_recal_titan = HvCallXm_loadTod();
532 } 532 }
533 #endif /* CONFIG_PPC_ISERIES */ 533 #endif /* CONFIG_PPC_ISERIES */
534 534
535 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) 535 #ifdef CONFIG_PERF_EVENTS
536 DEFINE_PER_CPU(u8, perf_event_pending);
537 536
538 void set_perf_event_pending(void) 537 /*
538 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
539 */
540 #ifdef CONFIG_PPC64
541 static inline unsigned long test_perf_event_pending(void)
539 { 542 {
540 get_cpu_var(perf_event_pending) = 1; 543 unsigned long x;
541 set_dec(1); 544
542 put_cpu_var(perf_event_pending); 545 asm volatile("lbz %0,%1(13)"
546 : "=r" (x)
547 : "i" (offsetof(struct paca_struct, perf_event_pending)));
548 return x;
543 } 549 }
544 550
551 static inline void set_perf_event_pending_flag(void)
552 {
553 asm volatile("stb %0,%1(13)" : :
554 "r" (1),
555 "i" (offsetof(struct paca_struct, perf_event_pending)));
556 }
557
558 static inline void clear_perf_event_pending(void)
559 {
560 asm volatile("stb %0,%1(13)" : :
561 "r" (0),
562 "i" (offsetof(struct paca_struct, perf_event_pending)));
563 }
564
565 #else /* 32-bit */
566
567 DEFINE_PER_CPU(u8, perf_event_pending);
568
569 #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
545 #define test_perf_event_pending() __get_cpu_var(perf_event_pending) 570 #define test_perf_event_pending() __get_cpu_var(perf_event_pending)
546 #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 571 #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
547 572
548 #else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 573 #endif /* 32 vs 64 bit */
549 574
575 void set_perf_event_pending(void)
576 {
577 preempt_disable();
578 set_perf_event_pending_flag();
579 set_dec(1);
580 preempt_enable();
581 }
582
583 #else /* CONFIG_PERF_EVENTS */
584
550 #define test_perf_event_pending() 0 585 #define test_perf_event_pending() 0
551 #define clear_perf_event_pending() 586 #define clear_perf_event_pending()
552 587
553 #endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 588 #endif /* CONFIG_PERF_EVENTS */
554 589
555 /* 590 /*
556 * For iSeries shared processors, we have to let the hypervisor 591 * For iSeries shared processors, we have to let the hypervisor
557 * set the hardware decrementer. We set a virtual decrementer 592 * set the hardware decrementer. We set a virtual decrementer
558 * in the lppaca and call the hypervisor if the virtual 593 * in the lppaca and call the hypervisor if the virtual
559 * decrementer is less than the current value in the hardware 594 * decrementer is less than the current value in the hardware
560 * decrementer. (almost always the new decrementer value will 595 * decrementer. (almost always the new decrementer value will
561 * be greater than the current hardware decementer so the hypervisor 596 * be greater than the current hardware decementer so the hypervisor
562 * call will not be needed) 597 * call will not be needed)
563 */ 598 */
564 599
565 /* 600 /*
566 * timer_interrupt - gets called when the decrementer overflows, 601 * timer_interrupt - gets called when the decrementer overflows,
567 * with interrupts disabled. 602 * with interrupts disabled.
568 */ 603 */
569 void timer_interrupt(struct pt_regs * regs) 604 void timer_interrupt(struct pt_regs * regs)
570 { 605 {
571 struct pt_regs *old_regs; 606 struct pt_regs *old_regs;
572 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); 607 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
573 struct clock_event_device *evt = &decrementer->event; 608 struct clock_event_device *evt = &decrementer->event;
574 u64 now; 609 u64 now;
575 610
576 trace_timer_interrupt_entry(regs); 611 trace_timer_interrupt_entry(regs);
577 612
578 __get_cpu_var(irq_stat).timer_irqs++; 613 __get_cpu_var(irq_stat).timer_irqs++;
579 614
580 /* Ensure a positive value is written to the decrementer, or else 615 /* Ensure a positive value is written to the decrementer, or else
581 * some CPUs will continuue to take decrementer exceptions */ 616 * some CPUs will continuue to take decrementer exceptions */
582 set_dec(DECREMENTER_MAX); 617 set_dec(DECREMENTER_MAX);
583 618
584 #ifdef CONFIG_PPC32 619 #ifdef CONFIG_PPC32
585 if (test_perf_event_pending()) {
586 clear_perf_event_pending();
587 perf_event_do_pending();
588 }
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 620 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 621 do_IRQ(regs);
591 #endif 622 #endif
592 623
593 now = get_tb_or_rtc(); 624 now = get_tb_or_rtc();
594 if (now < decrementer->next_tb) { 625 if (now < decrementer->next_tb) {
595 /* not time for this event yet */ 626 /* not time for this event yet */
596 now = decrementer->next_tb - now; 627 now = decrementer->next_tb - now;
597 if (now <= DECREMENTER_MAX) 628 if (now <= DECREMENTER_MAX)
598 set_dec((int)now); 629 set_dec((int)now);
599 trace_timer_interrupt_exit(regs); 630 trace_timer_interrupt_exit(regs);
600 return; 631 return;
601 } 632 }
602 old_regs = set_irq_regs(regs); 633 old_regs = set_irq_regs(regs);
603 irq_enter(); 634 irq_enter();
604 635
605 calculate_steal_time(); 636 calculate_steal_time();
637
638 if (test_perf_event_pending()) {
639 clear_perf_event_pending();
640 perf_event_do_pending();
641 }
606 642
607 #ifdef CONFIG_PPC_ISERIES 643 #ifdef CONFIG_PPC_ISERIES
608 if (firmware_has_feature(FW_FEATURE_ISERIES)) 644 if (firmware_has_feature(FW_FEATURE_ISERIES))
609 get_lppaca()->int_dword.fields.decr_int = 0; 645 get_lppaca()->int_dword.fields.decr_int = 0;
610 #endif 646 #endif
611 647
612 if (evt->event_handler) 648 if (evt->event_handler)
613 evt->event_handler(evt); 649 evt->event_handler(evt);
614 650
615 #ifdef CONFIG_PPC_ISERIES 651 #ifdef CONFIG_PPC_ISERIES
616 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 652 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
617 process_hvlpevents(); 653 process_hvlpevents();
618 #endif 654 #endif
619 655
620 #ifdef CONFIG_PPC64 656 #ifdef CONFIG_PPC64
621 /* collect purr register values often, for accurate calculations */ 657 /* collect purr register values often, for accurate calculations */
622 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 658 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
623 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 659 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
624 cu->current_tb = mfspr(SPRN_PURR); 660 cu->current_tb = mfspr(SPRN_PURR);
625 } 661 }
626 #endif 662 #endif
627 663
628 irq_exit(); 664 irq_exit();
629 set_irq_regs(old_regs); 665 set_irq_regs(old_regs);
630 666
631 trace_timer_interrupt_exit(regs); 667 trace_timer_interrupt_exit(regs);
632 } 668 }
633 669
634 void wakeup_decrementer(void) 670 void wakeup_decrementer(void)
635 { 671 {
636 unsigned long ticks; 672 unsigned long ticks;
637 673
638 /* 674 /*
639 * The timebase gets saved on sleep and restored on wakeup, 675 * The timebase gets saved on sleep and restored on wakeup,
640 * so all we need to do is to reset the decrementer. 676 * so all we need to do is to reset the decrementer.
641 */ 677 */
642 ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); 678 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
643 if (ticks < tb_ticks_per_jiffy) 679 if (ticks < tb_ticks_per_jiffy)
644 ticks = tb_ticks_per_jiffy - ticks; 680 ticks = tb_ticks_per_jiffy - ticks;
645 else 681 else
646 ticks = 1; 682 ticks = 1;
647 set_dec(ticks); 683 set_dec(ticks);
648 } 684 }
649 685
650 #ifdef CONFIG_SUSPEND 686 #ifdef CONFIG_SUSPEND
651 void generic_suspend_disable_irqs(void) 687 void generic_suspend_disable_irqs(void)
652 { 688 {
653 preempt_disable(); 689 preempt_disable();
654 690
655 /* Disable the decrementer, so that it doesn't interfere 691 /* Disable the decrementer, so that it doesn't interfere
656 * with suspending. 692 * with suspending.
657 */ 693 */
658 694
659 set_dec(0x7fffffff); 695 set_dec(0x7fffffff);
660 local_irq_disable(); 696 local_irq_disable();
661 set_dec(0x7fffffff); 697 set_dec(0x7fffffff);
662 } 698 }
663 699
664 void generic_suspend_enable_irqs(void) 700 void generic_suspend_enable_irqs(void)
665 { 701 {
666 wakeup_decrementer(); 702 wakeup_decrementer();
667 703
668 local_irq_enable(); 704 local_irq_enable();
669 preempt_enable(); 705 preempt_enable();
670 } 706 }
671 707
672 /* Overrides the weak version in kernel/power/main.c */ 708 /* Overrides the weak version in kernel/power/main.c */
673 void arch_suspend_disable_irqs(void) 709 void arch_suspend_disable_irqs(void)
674 { 710 {
675 if (ppc_md.suspend_disable_irqs) 711 if (ppc_md.suspend_disable_irqs)
676 ppc_md.suspend_disable_irqs(); 712 ppc_md.suspend_disable_irqs();
677 generic_suspend_disable_irqs(); 713 generic_suspend_disable_irqs();
678 } 714 }
679 715
680 /* Overrides the weak version in kernel/power/main.c */ 716 /* Overrides the weak version in kernel/power/main.c */
681 void arch_suspend_enable_irqs(void) 717 void arch_suspend_enable_irqs(void)
682 { 718 {
683 generic_suspend_enable_irqs(); 719 generic_suspend_enable_irqs();
684 if (ppc_md.suspend_enable_irqs) 720 if (ppc_md.suspend_enable_irqs)
685 ppc_md.suspend_enable_irqs(); 721 ppc_md.suspend_enable_irqs();
686 } 722 }
687 #endif 723 #endif
688 724
689 #ifdef CONFIG_SMP 725 #ifdef CONFIG_SMP
690 void __init smp_space_timers(unsigned int max_cpus) 726 void __init smp_space_timers(unsigned int max_cpus)
691 { 727 {
692 int i; 728 int i;
693 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); 729 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
694 730
695 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 731 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
696 previous_tb -= tb_ticks_per_jiffy; 732 previous_tb -= tb_ticks_per_jiffy;
697 733
698 for_each_possible_cpu(i) { 734 for_each_possible_cpu(i) {
699 if (i == boot_cpuid) 735 if (i == boot_cpuid)
700 continue; 736 continue;
701 per_cpu(last_jiffy, i) = previous_tb; 737 per_cpu(last_jiffy, i) = previous_tb;
702 } 738 }
703 } 739 }
704 #endif 740 #endif
705 741
706 /* 742 /*
707 * Scheduler clock - returns current time in nanosec units. 743 * Scheduler clock - returns current time in nanosec units.
708 * 744 *
709 * Note: mulhdu(a, b) (multiply high double unsigned) returns 745 * Note: mulhdu(a, b) (multiply high double unsigned) returns
710 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 746 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
711 * are 64-bit unsigned numbers. 747 * are 64-bit unsigned numbers.
712 */ 748 */
713 unsigned long long sched_clock(void) 749 unsigned long long sched_clock(void)
714 { 750 {
715 if (__USE_RTC()) 751 if (__USE_RTC())
716 return get_rtc(); 752 return get_rtc();
717 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 753 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
718 } 754 }
719 755
720 static int __init get_freq(char *name, int cells, unsigned long *val) 756 static int __init get_freq(char *name, int cells, unsigned long *val)
721 { 757 {
722 struct device_node *cpu; 758 struct device_node *cpu;
723 const unsigned int *fp; 759 const unsigned int *fp;
724 int found = 0; 760 int found = 0;
725 761
726 /* The cpu node should have timebase and clock frequency properties */ 762 /* The cpu node should have timebase and clock frequency properties */
727 cpu = of_find_node_by_type(NULL, "cpu"); 763 cpu = of_find_node_by_type(NULL, "cpu");
728 764
729 if (cpu) { 765 if (cpu) {
730 fp = of_get_property(cpu, name, NULL); 766 fp = of_get_property(cpu, name, NULL);
731 if (fp) { 767 if (fp) {
732 found = 1; 768 found = 1;
733 *val = of_read_ulong(fp, cells); 769 *val = of_read_ulong(fp, cells);
734 } 770 }
735 771
736 of_node_put(cpu); 772 of_node_put(cpu);
737 } 773 }
738 774
739 return found; 775 return found;
740 } 776 }
741 777
742 /* should become __cpuinit when secondary_cpu_time_init also is */ 778 /* should become __cpuinit when secondary_cpu_time_init also is */
743 void start_cpu_decrementer(void) 779 void start_cpu_decrementer(void)
744 { 780 {
745 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 781 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
746 /* Clear any pending timer interrupts */ 782 /* Clear any pending timer interrupts */
747 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 783 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
748 784
749 /* Enable decrementer interrupt */ 785 /* Enable decrementer interrupt */
750 mtspr(SPRN_TCR, TCR_DIE); 786 mtspr(SPRN_TCR, TCR_DIE);
751 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ 787 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
752 } 788 }
753 789
754 void __init generic_calibrate_decr(void) 790 void __init generic_calibrate_decr(void)
755 { 791 {
756 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 792 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
757 793
758 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 794 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
759 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 795 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
760 796
761 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 797 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
762 "(not found)\n"); 798 "(not found)\n");
763 } 799 }
764 800
765 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 801 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
766 802
767 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 803 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
768 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 804 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
769 805
770 printk(KERN_ERR "WARNING: Estimating processor frequency " 806 printk(KERN_ERR "WARNING: Estimating processor frequency "
771 "(not found)\n"); 807 "(not found)\n");
772 } 808 }
773 } 809 }
774 810
775 int update_persistent_clock(struct timespec now) 811 int update_persistent_clock(struct timespec now)
776 { 812 {
777 struct rtc_time tm; 813 struct rtc_time tm;
778 814
779 if (!ppc_md.set_rtc_time) 815 if (!ppc_md.set_rtc_time)
780 return 0; 816 return 0;
781 817
782 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 818 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
783 tm.tm_year -= 1900; 819 tm.tm_year -= 1900;
784 tm.tm_mon -= 1; 820 tm.tm_mon -= 1;
785 821
786 return ppc_md.set_rtc_time(&tm); 822 return ppc_md.set_rtc_time(&tm);
787 } 823 }
788 824
789 static void __read_persistent_clock(struct timespec *ts) 825 static void __read_persistent_clock(struct timespec *ts)
790 { 826 {
791 struct rtc_time tm; 827 struct rtc_time tm;
792 static int first = 1; 828 static int first = 1;
793 829
794 ts->tv_nsec = 0; 830 ts->tv_nsec = 0;
795 /* XXX this is a litle fragile but will work okay in the short term */ 831 /* XXX this is a litle fragile but will work okay in the short term */
796 if (first) { 832 if (first) {
797 first = 0; 833 first = 0;
798 if (ppc_md.time_init) 834 if (ppc_md.time_init)
799 timezone_offset = ppc_md.time_init(); 835 timezone_offset = ppc_md.time_init();
800 836
801 /* get_boot_time() isn't guaranteed to be safe to call late */ 837 /* get_boot_time() isn't guaranteed to be safe to call late */
802 if (ppc_md.get_boot_time) { 838 if (ppc_md.get_boot_time) {
803 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; 839 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
804 return; 840 return;
805 } 841 }
806 } 842 }
807 if (!ppc_md.get_rtc_time) { 843 if (!ppc_md.get_rtc_time) {
808 ts->tv_sec = 0; 844 ts->tv_sec = 0;
809 return; 845 return;
810 } 846 }
811 ppc_md.get_rtc_time(&tm); 847 ppc_md.get_rtc_time(&tm);
812 848
813 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 849 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
814 tm.tm_hour, tm.tm_min, tm.tm_sec); 850 tm.tm_hour, tm.tm_min, tm.tm_sec);
815 } 851 }
816 852
817 void read_persistent_clock(struct timespec *ts) 853 void read_persistent_clock(struct timespec *ts)
818 { 854 {
819 __read_persistent_clock(ts); 855 __read_persistent_clock(ts);
820 856
821 /* Sanitize it in case real time clock is set below EPOCH */ 857 /* Sanitize it in case real time clock is set below EPOCH */
822 if (ts->tv_sec < 0) { 858 if (ts->tv_sec < 0) {
823 ts->tv_sec = 0; 859 ts->tv_sec = 0;
824 ts->tv_nsec = 0; 860 ts->tv_nsec = 0;
825 } 861 }
826 862
827 } 863 }
828 864
829 /* clocksource code */ 865 /* clocksource code */
830 static cycle_t rtc_read(struct clocksource *cs) 866 static cycle_t rtc_read(struct clocksource *cs)
831 { 867 {
832 return (cycle_t)get_rtc(); 868 return (cycle_t)get_rtc();
833 } 869 }
834 870
835 static cycle_t timebase_read(struct clocksource *cs) 871 static cycle_t timebase_read(struct clocksource *cs)
836 { 872 {
837 return (cycle_t)get_tb(); 873 return (cycle_t)get_tb();
838 } 874 }
839 875
840 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, 876 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
841 u32 mult) 877 u32 mult)
842 { 878 {
843 u64 t2x, stamp_xsec; 879 u64 t2x, stamp_xsec;
844 880
845 if (clock != &clocksource_timebase) 881 if (clock != &clocksource_timebase)
846 return; 882 return;
847 883
848 /* Make userspace gettimeofday spin until we're done. */ 884 /* Make userspace gettimeofday spin until we're done. */
849 ++vdso_data->tb_update_count; 885 ++vdso_data->tb_update_count;
850 smp_mb(); 886 smp_mb();
851 887
852 /* XXX this assumes clock->shift == 22 */ 888 /* XXX this assumes clock->shift == 22 */
853 /* 4611686018 ~= 2^(20+64-22) / 1e9 */ 889 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
854 t2x = (u64) mult * 4611686018ULL; 890 t2x = (u64) mult * 4611686018ULL;
855 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; 891 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
856 do_div(stamp_xsec, 1000000000); 892 do_div(stamp_xsec, 1000000000);
857 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; 893 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
858 update_gtod(clock->cycle_last, stamp_xsec, t2x); 894 update_gtod(clock->cycle_last, stamp_xsec, t2x);
859 } 895 }
860 896
861 void update_vsyscall_tz(void) 897 void update_vsyscall_tz(void)
862 { 898 {
863 /* Make userspace gettimeofday spin until we're done. */ 899 /* Make userspace gettimeofday spin until we're done. */
864 ++vdso_data->tb_update_count; 900 ++vdso_data->tb_update_count;
865 smp_mb(); 901 smp_mb();
866 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 902 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
867 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 903 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
868 smp_mb(); 904 smp_mb();
869 ++vdso_data->tb_update_count; 905 ++vdso_data->tb_update_count;
870 } 906 }
871 907
872 static void __init clocksource_init(void) 908 static void __init clocksource_init(void)
873 { 909 {
874 struct clocksource *clock; 910 struct clocksource *clock;
875 911
876 if (__USE_RTC()) 912 if (__USE_RTC())
877 clock = &clocksource_rtc; 913 clock = &clocksource_rtc;
878 else 914 else
879 clock = &clocksource_timebase; 915 clock = &clocksource_timebase;
880 916
881 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); 917 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
882 918
883 if (clocksource_register(clock)) { 919 if (clocksource_register(clock)) {
884 printk(KERN_ERR "clocksource: %s is already registered\n", 920 printk(KERN_ERR "clocksource: %s is already registered\n",
885 clock->name); 921 clock->name);
886 return; 922 return;
887 } 923 }
888 924
889 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", 925 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
890 clock->name, clock->mult, clock->shift); 926 clock->name, clock->mult, clock->shift);
891 } 927 }
892 928
893 static int decrementer_set_next_event(unsigned long evt, 929 static int decrementer_set_next_event(unsigned long evt,
894 struct clock_event_device *dev) 930 struct clock_event_device *dev)
895 { 931 {
896 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; 932 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
897 set_dec(evt); 933 set_dec(evt);
898 return 0; 934 return 0;
899 } 935 }
900 936
901 static void decrementer_set_mode(enum clock_event_mode mode, 937 static void decrementer_set_mode(enum clock_event_mode mode,
902 struct clock_event_device *dev) 938 struct clock_event_device *dev)
903 { 939 {
904 if (mode != CLOCK_EVT_MODE_ONESHOT) 940 if (mode != CLOCK_EVT_MODE_ONESHOT)
905 decrementer_set_next_event(DECREMENTER_MAX, dev); 941 decrementer_set_next_event(DECREMENTER_MAX, dev);
906 } 942 }
907 943
908 static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec, 944 static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
909 int shift) 945 int shift)
910 { 946 {
911 uint64_t tmp = ((uint64_t)ticks) << shift; 947 uint64_t tmp = ((uint64_t)ticks) << shift;
912 948
913 do_div(tmp, nsec); 949 do_div(tmp, nsec);
914 return tmp; 950 return tmp;
915 } 951 }
916 952
917 static void __init setup_clockevent_multiplier(unsigned long hz) 953 static void __init setup_clockevent_multiplier(unsigned long hz)
918 { 954 {
919 u64 mult, shift = 32; 955 u64 mult, shift = 32;
920 956
921 while (1) { 957 while (1) {
922 mult = div_sc64(hz, NSEC_PER_SEC, shift); 958 mult = div_sc64(hz, NSEC_PER_SEC, shift);
923 if (mult && (mult >> 32UL) == 0UL) 959 if (mult && (mult >> 32UL) == 0UL)
924 break; 960 break;
925 961
926 shift--; 962 shift--;
927 } 963 }
928 964
929 decrementer_clockevent.shift = shift; 965 decrementer_clockevent.shift = shift;
930 decrementer_clockevent.mult = mult; 966 decrementer_clockevent.mult = mult;
931 } 967 }
932 968
933 static void register_decrementer_clockevent(int cpu) 969 static void register_decrementer_clockevent(int cpu)
934 { 970 {
935 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 971 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
936 972
937 *dec = decrementer_clockevent; 973 *dec = decrementer_clockevent;
938 dec->cpumask = cpumask_of(cpu); 974 dec->cpumask = cpumask_of(cpu);
939 975
940 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", 976 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
941 dec->name, dec->mult, dec->shift, cpu); 977 dec->name, dec->mult, dec->shift, cpu);
942 978
943 clockevents_register_device(dec); 979 clockevents_register_device(dec);
944 } 980 }
945 981
946 static void __init init_decrementer_clockevent(void) 982 static void __init init_decrementer_clockevent(void)
947 { 983 {
948 int cpu = smp_processor_id(); 984 int cpu = smp_processor_id();
949 985
950 setup_clockevent_multiplier(ppc_tb_freq); 986 setup_clockevent_multiplier(ppc_tb_freq);
951 decrementer_clockevent.max_delta_ns = 987 decrementer_clockevent.max_delta_ns =
952 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); 988 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
953 decrementer_clockevent.min_delta_ns = 989 decrementer_clockevent.min_delta_ns =
954 clockevent_delta2ns(2, &decrementer_clockevent); 990 clockevent_delta2ns(2, &decrementer_clockevent);
955 991
956 register_decrementer_clockevent(cpu); 992 register_decrementer_clockevent(cpu);
957 } 993 }
958 994
959 void secondary_cpu_time_init(void) 995 void secondary_cpu_time_init(void)
960 { 996 {
961 /* Start the decrementer on CPUs that have manual control 997 /* Start the decrementer on CPUs that have manual control
962 * such as BookE 998 * such as BookE
963 */ 999 */
964 start_cpu_decrementer(); 1000 start_cpu_decrementer();
965 1001
966 /* FIME: Should make unrelatred change to move snapshot_timebase 1002 /* FIME: Should make unrelatred change to move snapshot_timebase
967 * call here ! */ 1003 * call here ! */
968 register_decrementer_clockevent(smp_processor_id()); 1004 register_decrementer_clockevent(smp_processor_id());
969 } 1005 }
970 1006
971 /* This function is only called on the boot processor */ 1007 /* This function is only called on the boot processor */
972 void __init time_init(void) 1008 void __init time_init(void)
973 { 1009 {
974 unsigned long flags; 1010 unsigned long flags;
975 struct div_result res; 1011 struct div_result res;
976 u64 scale, x; 1012 u64 scale, x;
977 unsigned shift; 1013 unsigned shift;
978 1014
979 if (__USE_RTC()) { 1015 if (__USE_RTC()) {
980 /* 601 processor: dec counts down by 128 every 128ns */ 1016 /* 601 processor: dec counts down by 128 every 128ns */
981 ppc_tb_freq = 1000000000; 1017 ppc_tb_freq = 1000000000;
982 tb_last_jiffy = get_rtcl(); 1018 tb_last_jiffy = get_rtcl();
983 } else { 1019 } else {
984 /* Normal PowerPC with timebase register */ 1020 /* Normal PowerPC with timebase register */
985 ppc_md.calibrate_decr(); 1021 ppc_md.calibrate_decr();
986 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 1022 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
987 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 1023 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
988 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 1024 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
989 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 1025 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
990 tb_last_jiffy = get_tb(); 1026 tb_last_jiffy = get_tb();
991 } 1027 }
992 1028
993 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 1029 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
994 tb_ticks_per_sec = ppc_tb_freq; 1030 tb_ticks_per_sec = ppc_tb_freq;
995 tb_ticks_per_usec = ppc_tb_freq / 1000000; 1031 tb_ticks_per_usec = ppc_tb_freq / 1000000;
996 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 1032 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
997 calc_cputime_factors(); 1033 calc_cputime_factors();
998 setup_cputime_one_jiffy(); 1034 setup_cputime_one_jiffy();
999 1035
1000 /* 1036 /*
1001 * Calculate the length of each tick in ns. It will not be 1037 * Calculate the length of each tick in ns. It will not be
1002 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. 1038 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
1003 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, 1039 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
1004 * rounded up. 1040 * rounded up.
1005 */ 1041 */
1006 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; 1042 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
1007 do_div(x, ppc_tb_freq); 1043 do_div(x, ppc_tb_freq);
1008 tick_nsec = x; 1044 tick_nsec = x;
1009 last_tick_len = x << TICKLEN_SCALE; 1045 last_tick_len = x << TICKLEN_SCALE;
1010 1046
1011 /* 1047 /*
1012 * Compute ticklen_to_xs, which is a factor which gets multiplied 1048 * Compute ticklen_to_xs, which is a factor which gets multiplied
1013 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. 1049 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
1014 * It is computed as: 1050 * It is computed as:
1015 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) 1051 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
1016 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT 1052 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
1017 * which turns out to be N = 51 - SHIFT_HZ. 1053 * which turns out to be N = 51 - SHIFT_HZ.
1018 * This gives the result as a 0.64 fixed-point fraction. 1054 * This gives the result as a 0.64 fixed-point fraction.
1019 * That value is reduced by an offset amounting to 1 xsec per 1055 * That value is reduced by an offset amounting to 1 xsec per
1020 * 2^31 timebase ticks to avoid problems with time going backwards 1056 * 2^31 timebase ticks to avoid problems with time going backwards
1021 * by 1 xsec when we do timer_recalc_offset due to losing the 1057 * by 1 xsec when we do timer_recalc_offset due to losing the
1022 * fractional xsec. That offset is equal to ppc_tb_freq/2^51 1058 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
1023 * since there are 2^20 xsec in a second. 1059 * since there are 2^20 xsec in a second.
1024 */ 1060 */
1025 div128_by_32((1ULL << 51) - ppc_tb_freq, 0, 1061 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
1026 tb_ticks_per_jiffy << SHIFT_HZ, &res); 1062 tb_ticks_per_jiffy << SHIFT_HZ, &res);
1027 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); 1063 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
1028 ticklen_to_xs = res.result_low; 1064 ticklen_to_xs = res.result_low;
1029 1065
1030 /* Compute tb_to_xs from tick_nsec */ 1066 /* Compute tb_to_xs from tick_nsec */
1031 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); 1067 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
1032 1068
1033 /* 1069 /*
1034 * Compute scale factor for sched_clock. 1070 * Compute scale factor for sched_clock.
1035 * The calibrate_decr() function has set tb_ticks_per_sec, 1071 * The calibrate_decr() function has set tb_ticks_per_sec,
1036 * which is the timebase frequency. 1072 * which is the timebase frequency.
1037 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 1073 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1038 * the 128-bit result as a 64.64 fixed-point number. 1074 * the 128-bit result as a 64.64 fixed-point number.
1039 * We then shift that number right until it is less than 1.0, 1075 * We then shift that number right until it is less than 1.0,
1040 * giving us the scale factor and shift count to use in 1076 * giving us the scale factor and shift count to use in
1041 * sched_clock(). 1077 * sched_clock().
1042 */ 1078 */
1043 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 1079 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1044 scale = res.result_low; 1080 scale = res.result_low;
1045 for (shift = 0; res.result_high != 0; ++shift) { 1081 for (shift = 0; res.result_high != 0; ++shift) {
1046 scale = (scale >> 1) | (res.result_high << 63); 1082 scale = (scale >> 1) | (res.result_high << 63);
1047 res.result_high >>= 1; 1083 res.result_high >>= 1;
1048 } 1084 }
1049 tb_to_ns_scale = scale; 1085 tb_to_ns_scale = scale;
1050 tb_to_ns_shift = shift; 1086 tb_to_ns_shift = shift;
1051 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 1087 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1052 boot_tb = get_tb_or_rtc(); 1088 boot_tb = get_tb_or_rtc();
1053 1089
1054 write_seqlock_irqsave(&xtime_lock, flags); 1090 write_seqlock_irqsave(&xtime_lock, flags);
1055 1091
1056 /* If platform provided a timezone (pmac), we correct the time */ 1092 /* If platform provided a timezone (pmac), we correct the time */
1057 if (timezone_offset) { 1093 if (timezone_offset) {
1058 sys_tz.tz_minuteswest = -timezone_offset / 60; 1094 sys_tz.tz_minuteswest = -timezone_offset / 60;
1059 sys_tz.tz_dsttime = 0; 1095 sys_tz.tz_dsttime = 0;
1060 } 1096 }
1061 1097
1062 vdso_data->tb_orig_stamp = tb_last_jiffy; 1098 vdso_data->tb_orig_stamp = tb_last_jiffy;
1063 vdso_data->tb_update_count = 0; 1099 vdso_data->tb_update_count = 0;
1064 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 1100 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1065 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 1101 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1066 vdso_data->tb_to_xs = tb_to_xs; 1102 vdso_data->tb_to_xs = tb_to_xs;
1067 1103
1068 write_sequnlock_irqrestore(&xtime_lock, flags); 1104 write_sequnlock_irqrestore(&xtime_lock, flags);
1069 1105
1070 /* Start the decrementer on CPUs that have manual control 1106 /* Start the decrementer on CPUs that have manual control
1071 * such as BookE 1107 * such as BookE
1072 */ 1108 */
1073 start_cpu_decrementer(); 1109 start_cpu_decrementer();
1074 1110
1075 /* Register the clocksource, if we're not running on iSeries */ 1111 /* Register the clocksource, if we're not running on iSeries */
1076 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 1112 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1077 clocksource_init(); 1113 clocksource_init();
1078 1114
1079 init_decrementer_clockevent(); 1115 init_decrementer_clockevent();
1080 } 1116 }
1081 1117
1082 1118
1083 #define FEBRUARY 2 1119 #define FEBRUARY 2
1084 #define STARTOFTIME 1970 1120 #define STARTOFTIME 1970
1085 #define SECDAY 86400L 1121 #define SECDAY 86400L
1086 #define SECYR (SECDAY * 365) 1122 #define SECYR (SECDAY * 365)
1087 #define leapyear(year) ((year) % 4 == 0 && \ 1123 #define leapyear(year) ((year) % 4 == 0 && \
1088 ((year) % 100 != 0 || (year) % 400 == 0)) 1124 ((year) % 100 != 0 || (year) % 400 == 0))
1089 #define days_in_year(a) (leapyear(a) ? 366 : 365) 1125 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1090 #define days_in_month(a) (month_days[(a) - 1]) 1126 #define days_in_month(a) (month_days[(a) - 1])
1091 1127
1092 static int month_days[12] = { 1128 static int month_days[12] = {
1093 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1129 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1094 }; 1130 };
1095 1131
1096 /* 1132 /*
1097 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 1133 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1098 */ 1134 */
1099 void GregorianDay(struct rtc_time * tm) 1135 void GregorianDay(struct rtc_time * tm)
1100 { 1136 {
1101 int leapsToDate; 1137 int leapsToDate;
1102 int lastYear; 1138 int lastYear;
1103 int day; 1139 int day;
1104 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 1140 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1105 1141
1106 lastYear = tm->tm_year - 1; 1142 lastYear = tm->tm_year - 1;
1107 1143
1108 /* 1144 /*
1109 * Number of leap corrections to apply up to end of last year 1145 * Number of leap corrections to apply up to end of last year
1110 */ 1146 */
1111 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 1147 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1112 1148
1113 /* 1149 /*
1114 * This year is a leap year if it is divisible by 4 except when it is 1150 * This year is a leap year if it is divisible by 4 except when it is
1115 * divisible by 100 unless it is divisible by 400 1151 * divisible by 100 unless it is divisible by 400
1116 * 1152 *
1117 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 1153 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1118 */ 1154 */
1119 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 1155 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1120 1156
1121 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 1157 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1122 tm->tm_mday; 1158 tm->tm_mday;
1123 1159
1124 tm->tm_wday = day % 7; 1160 tm->tm_wday = day % 7;
1125 } 1161 }
1126 1162
1127 void to_tm(int tim, struct rtc_time * tm) 1163 void to_tm(int tim, struct rtc_time * tm)
1128 { 1164 {
1129 register int i; 1165 register int i;
1130 register long hms, day; 1166 register long hms, day;
1131 1167
1132 day = tim / SECDAY; 1168 day = tim / SECDAY;
1133 hms = tim % SECDAY; 1169 hms = tim % SECDAY;
1134 1170
1135 /* Hours, minutes, seconds are easy */ 1171 /* Hours, minutes, seconds are easy */
1136 tm->tm_hour = hms / 3600; 1172 tm->tm_hour = hms / 3600;
1137 tm->tm_min = (hms % 3600) / 60; 1173 tm->tm_min = (hms % 3600) / 60;
1138 tm->tm_sec = (hms % 3600) % 60; 1174 tm->tm_sec = (hms % 3600) % 60;
1139 1175
1140 /* Number of years in days */ 1176 /* Number of years in days */
1141 for (i = STARTOFTIME; day >= days_in_year(i); i++) 1177 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1142 day -= days_in_year(i); 1178 day -= days_in_year(i);
1143 tm->tm_year = i; 1179 tm->tm_year = i;
1144 1180
1145 /* Number of months in days left */ 1181 /* Number of months in days left */
1146 if (leapyear(tm->tm_year)) 1182 if (leapyear(tm->tm_year))
1147 days_in_month(FEBRUARY) = 29; 1183 days_in_month(FEBRUARY) = 29;
1148 for (i = 1; day >= days_in_month(i); i++) 1184 for (i = 1; day >= days_in_month(i); i++)
1149 day -= days_in_month(i); 1185 day -= days_in_month(i);
1150 days_in_month(FEBRUARY) = 28; 1186 days_in_month(FEBRUARY) = 28;
1151 tm->tm_mon = i; 1187 tm->tm_mon = i;
1152 1188
1153 /* Days are what is left over (+1) from all that. */ 1189 /* Days are what is left over (+1) from all that. */
1154 tm->tm_mday = day + 1; 1190 tm->tm_mday = day + 1;
1155 1191
1156 /* 1192 /*
1157 * Determine the day of week 1193 * Determine the day of week
1158 */ 1194 */
1159 GregorianDay(tm); 1195 GregorianDay(tm);
1160 } 1196 }
1161 1197
1162 /* Auxiliary function to compute scaling factors */ 1198 /* Auxiliary function to compute scaling factors */
1163 /* Actually the choice of a timebase running at 1/4 the of the bus 1199 /* Actually the choice of a timebase running at 1/4 the of the bus
1164 * frequency giving resolution of a few tens of nanoseconds is quite nice. 1200 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1165 * It makes this computation very precise (27-28 bits typically) which 1201 * It makes this computation very precise (27-28 bits typically) which
1166 * is optimistic considering the stability of most processor clock 1202 * is optimistic considering the stability of most processor clock
1167 * oscillators and the precision with which the timebase frequency 1203 * oscillators and the precision with which the timebase frequency
1168 * is measured but does not harm. 1204 * is measured but does not harm.
1169 */ 1205 */
1170 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) 1206 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1171 { 1207 {
1172 unsigned mlt=0, tmp, err; 1208 unsigned mlt=0, tmp, err;
1173 /* No concern for performance, it's done once: use a stupid 1209 /* No concern for performance, it's done once: use a stupid
1174 * but safe and compact method to find the multiplier. 1210 * but safe and compact method to find the multiplier.
1175 */ 1211 */
1176 1212
1177 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 1213 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1178 if (mulhwu(inscale, mlt|tmp) < outscale) 1214 if (mulhwu(inscale, mlt|tmp) < outscale)
1179 mlt |= tmp; 1215 mlt |= tmp;
1180 } 1216 }
1181 1217
1182 /* We might still be off by 1 for the best approximation. 1218 /* We might still be off by 1 for the best approximation.
1183 * A side effect of this is that if outscale is too large 1219 * A side effect of this is that if outscale is too large
1184 * the returned value will be zero. 1220 * the returned value will be zero.
1185 * Many corner cases have been checked and seem to work, 1221 * Many corner cases have been checked and seem to work,
1186 * some might have been forgotten in the test however. 1222 * some might have been forgotten in the test however.
1187 */ 1223 */
1188 1224
1189 err = inscale * (mlt+1); 1225 err = inscale * (mlt+1);
1190 if (err <= inscale/2) 1226 if (err <= inscale/2)
1191 mlt++; 1227 mlt++;
1192 return mlt; 1228 return mlt;
1193 } 1229 }
1194 1230
1195 /* 1231 /*
1196 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1232 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1197 * result. 1233 * result.
1198 */ 1234 */
1199 void div128_by_32(u64 dividend_high, u64 dividend_low, 1235 void div128_by_32(u64 dividend_high, u64 dividend_low,
1200 unsigned divisor, struct div_result *dr) 1236 unsigned divisor, struct div_result *dr)
1201 { 1237 {
1202 unsigned long a, b, c, d; 1238 unsigned long a, b, c, d;
1203 unsigned long w, x, y, z; 1239 unsigned long w, x, y, z;
1204 u64 ra, rb, rc; 1240 u64 ra, rb, rc;
1205 1241
1206 a = dividend_high >> 32; 1242 a = dividend_high >> 32;
1207 b = dividend_high & 0xffffffff; 1243 b = dividend_high & 0xffffffff;
1208 c = dividend_low >> 32; 1244 c = dividend_low >> 32;
1209 d = dividend_low & 0xffffffff; 1245 d = dividend_low & 0xffffffff;
1210 1246
1211 w = a / divisor; 1247 w = a / divisor;
1212 ra = ((u64)(a - (w * divisor)) << 32) + b; 1248 ra = ((u64)(a - (w * divisor)) << 32) + b;
1213 1249
1214 rb = ((u64) do_div(ra, divisor) << 32) + c; 1250 rb = ((u64) do_div(ra, divisor) << 32) + c;
1215 x = ra; 1251 x = ra;
1216 1252
1217 rc = ((u64) do_div(rb, divisor) << 32) + d; 1253 rc = ((u64) do_div(rb, divisor) << 32) + d;
1218 y = rb; 1254 y = rb;
1219 1255
1220 do_div(rc, divisor); 1256 do_div(rc, divisor);
1221 z = rc; 1257 z = rc;
1222 1258
1223 dr->result_high = ((u64)w << 32) + x; 1259 dr->result_high = ((u64)w << 32) + x;
1224 dr->result_low = ((u64)y << 32) + z; 1260 dr->result_low = ((u64)y << 32) + z;
1225 1261
1226 } 1262 }
1227 1263
1228 /* We don't need to calibrate delay, we use the CPU timebase for that */ 1264 /* We don't need to calibrate delay, we use the CPU timebase for that */
1229 void calibrate_delay(void) 1265 void calibrate_delay(void)
1230 { 1266 {
1231 /* Some generic code (such as spinlock debug) use loops_per_jiffy 1267 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1232 * as the number of __delay(1) in a jiffy, so make it so 1268 * as the number of __delay(1) in a jiffy, so make it so
1233 */ 1269 */
1234 loops_per_jiffy = tb_ticks_per_jiffy; 1270 loops_per_jiffy = tb_ticks_per_jiffy;
1235 } 1271 }
1236 1272
1237 static int __init rtc_init(void) 1273 static int __init rtc_init(void)
1238 { 1274 {
1239 struct platform_device *pdev; 1275 struct platform_device *pdev;
1240 1276
1241 if (!ppc_md.get_rtc_time) 1277 if (!ppc_md.get_rtc_time)
1242 return -ENODEV; 1278 return -ENODEV;
1243 1279
1244 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 1280 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1245 if (IS_ERR(pdev)) 1281 if (IS_ERR(pdev))
1246 return PTR_ERR(pdev); 1282 return PTR_ERR(pdev);
1247 1283