Commit 62cc67b9df273be18fcb09a071592dedf751c90a

Authored by Benjamin Herrenschmidt
1 parent e872e41b79

powerpc/pmac/smp: Properly NAP offlined CPU on G5

The current code soft-disables, and then goes to NAP mode which
turns interrupts on. That means that if an interrupt occurs, we
will hit the masked interrupt code path which isn't what we want,
as it will return with EE off, which will either get us out of
NAP mode, or fail to enter it (according to spec).

Instead, let's just rely on the fact that it is safe to take
decrementer interrupts on an offline CPU and leave interrupts
enabled. We can also get rid of the special case in asm for
power4_cpu_offline_powersave() and just use power4_idle().

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

Showing 4 changed files with 15 additions and 28 deletions Inline Diff

arch/powerpc/include/asm/machdep.h
1 #ifndef _ASM_POWERPC_MACHDEP_H 1 #ifndef _ASM_POWERPC_MACHDEP_H
2 #define _ASM_POWERPC_MACHDEP_H 2 #define _ASM_POWERPC_MACHDEP_H
3 #ifdef __KERNEL__ 3 #ifdef __KERNEL__
4 4
5 /* 5 /*
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/seq_file.h> 12 #include <linux/seq_file.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/dma-mapping.h> 14 #include <linux/dma-mapping.h>
15 15
16 #include <asm/setup.h> 16 #include <asm/setup.h>
17 17
18 /* We export this macro for external modules like Alsa to know if 18 /* We export this macro for external modules like Alsa to know if
19 * ppc_md.feature_call is implemented or not 19 * ppc_md.feature_call is implemented or not
20 */ 20 */
21 #define CONFIG_PPC_HAS_FEATURE_CALLS 21 #define CONFIG_PPC_HAS_FEATURE_CALLS
22 22
23 struct pt_regs; 23 struct pt_regs;
24 struct pci_bus; 24 struct pci_bus;
25 struct device_node; 25 struct device_node;
26 struct iommu_table; 26 struct iommu_table;
27 struct rtc_time; 27 struct rtc_time;
28 struct file; 28 struct file;
29 struct pci_controller; 29 struct pci_controller;
30 struct kimage; 30 struct kimage;
31 31
32 #ifdef CONFIG_SMP 32 #ifdef CONFIG_SMP
33 struct smp_ops_t { 33 struct smp_ops_t {
34 void (*message_pass)(int target, int msg); 34 void (*message_pass)(int target, int msg);
35 int (*probe)(void); 35 int (*probe)(void);
36 void (*kick_cpu)(int nr); 36 void (*kick_cpu)(int nr);
37 void (*setup_cpu)(int nr); 37 void (*setup_cpu)(int nr);
38 void (*take_timebase)(void); 38 void (*take_timebase)(void);
39 void (*give_timebase)(void); 39 void (*give_timebase)(void);
40 int (*cpu_disable)(void); 40 int (*cpu_disable)(void);
41 void (*cpu_die)(unsigned int nr); 41 void (*cpu_die)(unsigned int nr);
42 int (*cpu_bootable)(unsigned int nr); 42 int (*cpu_bootable)(unsigned int nr);
43 }; 43 };
44 #endif 44 #endif
45 45
46 struct machdep_calls { 46 struct machdep_calls {
47 char *name; 47 char *name;
48 #ifdef CONFIG_PPC64 48 #ifdef CONFIG_PPC64
49 void (*hpte_invalidate)(unsigned long slot, 49 void (*hpte_invalidate)(unsigned long slot,
50 unsigned long va, 50 unsigned long va,
51 int psize, int ssize, 51 int psize, int ssize,
52 int local); 52 int local);
53 long (*hpte_updatepp)(unsigned long slot, 53 long (*hpte_updatepp)(unsigned long slot,
54 unsigned long newpp, 54 unsigned long newpp,
55 unsigned long va, 55 unsigned long va,
56 int psize, int ssize, 56 int psize, int ssize,
57 int local); 57 int local);
58 void (*hpte_updateboltedpp)(unsigned long newpp, 58 void (*hpte_updateboltedpp)(unsigned long newpp,
59 unsigned long ea, 59 unsigned long ea,
60 int psize, int ssize); 60 int psize, int ssize);
61 long (*hpte_insert)(unsigned long hpte_group, 61 long (*hpte_insert)(unsigned long hpte_group,
62 unsigned long va, 62 unsigned long va,
63 unsigned long prpn, 63 unsigned long prpn,
64 unsigned long rflags, 64 unsigned long rflags,
65 unsigned long vflags, 65 unsigned long vflags,
66 int psize, int ssize); 66 int psize, int ssize);
67 long (*hpte_remove)(unsigned long hpte_group); 67 long (*hpte_remove)(unsigned long hpte_group);
68 void (*hpte_removebolted)(unsigned long ea, 68 void (*hpte_removebolted)(unsigned long ea,
69 int psize, int ssize); 69 int psize, int ssize);
70 void (*flush_hash_range)(unsigned long number, int local); 70 void (*flush_hash_range)(unsigned long number, int local);
71 71
72 /* special for kexec, to be called in real mode, linear mapping is 72 /* special for kexec, to be called in real mode, linear mapping is
73 * destroyed as well */ 73 * destroyed as well */
74 void (*hpte_clear_all)(void); 74 void (*hpte_clear_all)(void);
75 75
76 int (*tce_build)(struct iommu_table *tbl, 76 int (*tce_build)(struct iommu_table *tbl,
77 long index, 77 long index,
78 long npages, 78 long npages,
79 unsigned long uaddr, 79 unsigned long uaddr,
80 enum dma_data_direction direction, 80 enum dma_data_direction direction,
81 struct dma_attrs *attrs); 81 struct dma_attrs *attrs);
82 void (*tce_free)(struct iommu_table *tbl, 82 void (*tce_free)(struct iommu_table *tbl,
83 long index, 83 long index,
84 long npages); 84 long npages);
85 unsigned long (*tce_get)(struct iommu_table *tbl, 85 unsigned long (*tce_get)(struct iommu_table *tbl,
86 long index); 86 long index);
87 void (*tce_flush)(struct iommu_table *tbl); 87 void (*tce_flush)(struct iommu_table *tbl);
88 88
89 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 89 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
90 unsigned long flags, void *caller); 90 unsigned long flags, void *caller);
91 void (*iounmap)(volatile void __iomem *token); 91 void (*iounmap)(volatile void __iomem *token);
92 92
93 #ifdef CONFIG_PM 93 #ifdef CONFIG_PM
94 void (*iommu_save)(void); 94 void (*iommu_save)(void);
95 void (*iommu_restore)(void); 95 void (*iommu_restore)(void);
96 #endif 96 #endif
97 #endif /* CONFIG_PPC64 */ 97 #endif /* CONFIG_PPC64 */
98 98
99 void (*pci_dma_dev_setup)(struct pci_dev *dev); 99 void (*pci_dma_dev_setup)(struct pci_dev *dev);
100 void (*pci_dma_bus_setup)(struct pci_bus *bus); 100 void (*pci_dma_bus_setup)(struct pci_bus *bus);
101 101
102 /* Platform set_dma_mask override */ 102 /* Platform set_dma_mask override */
103 int (*dma_set_mask)(struct device *dev, u64 dma_mask); 103 int (*dma_set_mask)(struct device *dev, u64 dma_mask);
104 104
105 int (*probe)(void); 105 int (*probe)(void);
106 void (*setup_arch)(void); /* Optional, may be NULL */ 106 void (*setup_arch)(void); /* Optional, may be NULL */
107 void (*init_early)(void); 107 void (*init_early)(void);
108 /* Optional, may be NULL. */ 108 /* Optional, may be NULL. */
109 void (*show_cpuinfo)(struct seq_file *m); 109 void (*show_cpuinfo)(struct seq_file *m);
110 void (*show_percpuinfo)(struct seq_file *m, int i); 110 void (*show_percpuinfo)(struct seq_file *m, int i);
111 111
112 void (*init_IRQ)(void); 112 void (*init_IRQ)(void);
113 113
114 /* Return an irq, or NO_IRQ to indicate there are none pending. 114 /* Return an irq, or NO_IRQ to indicate there are none pending.
115 * If for some reason there is no irq, but the interrupt 115 * If for some reason there is no irq, but the interrupt
116 * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */ 116 * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
117 unsigned int (*get_irq)(void); 117 unsigned int (*get_irq)(void);
118 118
119 /* PCI stuff */ 119 /* PCI stuff */
120 /* Called after scanning the bus, before allocating resources */ 120 /* Called after scanning the bus, before allocating resources */
121 void (*pcibios_fixup)(void); 121 void (*pcibios_fixup)(void);
122 int (*pci_probe_mode)(struct pci_bus *); 122 int (*pci_probe_mode)(struct pci_bus *);
123 void (*pci_irq_fixup)(struct pci_dev *dev); 123 void (*pci_irq_fixup)(struct pci_dev *dev);
124 124
125 /* To setup PHBs when using automatic OF platform driver for PCI */ 125 /* To setup PHBs when using automatic OF platform driver for PCI */
126 int (*pci_setup_phb)(struct pci_controller *host); 126 int (*pci_setup_phb)(struct pci_controller *host);
127 127
128 #ifdef CONFIG_PCI_MSI 128 #ifdef CONFIG_PCI_MSI
129 int (*msi_check_device)(struct pci_dev* dev, 129 int (*msi_check_device)(struct pci_dev* dev,
130 int nvec, int type); 130 int nvec, int type);
131 int (*setup_msi_irqs)(struct pci_dev *dev, 131 int (*setup_msi_irqs)(struct pci_dev *dev,
132 int nvec, int type); 132 int nvec, int type);
133 void (*teardown_msi_irqs)(struct pci_dev *dev); 133 void (*teardown_msi_irqs)(struct pci_dev *dev);
134 #endif 134 #endif
135 135
136 void (*restart)(char *cmd); 136 void (*restart)(char *cmd);
137 void (*power_off)(void); 137 void (*power_off)(void);
138 void (*halt)(void); 138 void (*halt)(void);
139 void (*panic)(char *str); 139 void (*panic)(char *str);
140 void (*cpu_die)(void); 140 void (*cpu_die)(void);
141 141
142 long (*time_init)(void); /* Optional, may be NULL */ 142 long (*time_init)(void); /* Optional, may be NULL */
143 143
144 int (*set_rtc_time)(struct rtc_time *); 144 int (*set_rtc_time)(struct rtc_time *);
145 void (*get_rtc_time)(struct rtc_time *); 145 void (*get_rtc_time)(struct rtc_time *);
146 unsigned long (*get_boot_time)(void); 146 unsigned long (*get_boot_time)(void);
147 unsigned char (*rtc_read_val)(int addr); 147 unsigned char (*rtc_read_val)(int addr);
148 void (*rtc_write_val)(int addr, unsigned char val); 148 void (*rtc_write_val)(int addr, unsigned char val);
149 149
150 void (*calibrate_decr)(void); 150 void (*calibrate_decr)(void);
151 151
152 void (*progress)(char *, unsigned short); 152 void (*progress)(char *, unsigned short);
153 153
154 /* Interface for platform error logging */ 154 /* Interface for platform error logging */
155 void (*log_error)(char *buf, unsigned int err_type, int fatal); 155 void (*log_error)(char *buf, unsigned int err_type, int fatal);
156 156
157 unsigned char (*nvram_read_val)(int addr); 157 unsigned char (*nvram_read_val)(int addr);
158 void (*nvram_write_val)(int addr, unsigned char val); 158 void (*nvram_write_val)(int addr, unsigned char val);
159 ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index); 159 ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
160 ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index); 160 ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
161 ssize_t (*nvram_size)(void); 161 ssize_t (*nvram_size)(void);
162 void (*nvram_sync)(void); 162 void (*nvram_sync)(void);
163 163
164 /* Exception handlers */ 164 /* Exception handlers */
165 int (*system_reset_exception)(struct pt_regs *regs); 165 int (*system_reset_exception)(struct pt_regs *regs);
166 int (*machine_check_exception)(struct pt_regs *regs); 166 int (*machine_check_exception)(struct pt_regs *regs);
167 167
168 /* Motherboard/chipset features. This is a kind of general purpose 168 /* Motherboard/chipset features. This is a kind of general purpose
169 * hook used to control some machine specific features (like reset 169 * hook used to control some machine specific features (like reset
170 * lines, chip power control, etc...). 170 * lines, chip power control, etc...).
171 */ 171 */
172 long (*feature_call)(unsigned int feature, ...); 172 long (*feature_call)(unsigned int feature, ...);
173 173
174 /* Get legacy PCI/IDE interrupt mapping */ 174 /* Get legacy PCI/IDE interrupt mapping */
175 int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel); 175 int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
176 176
177 /* Get access protection for /dev/mem */ 177 /* Get access protection for /dev/mem */
178 pgprot_t (*phys_mem_access_prot)(struct file *file, 178 pgprot_t (*phys_mem_access_prot)(struct file *file,
179 unsigned long pfn, 179 unsigned long pfn,
180 unsigned long size, 180 unsigned long size,
181 pgprot_t vma_prot); 181 pgprot_t vma_prot);
182 182
183 /* Idle loop for this platform, leave empty for default idle loop */ 183 /* Idle loop for this platform, leave empty for default idle loop */
184 void (*idle_loop)(void); 184 void (*idle_loop)(void);
185 185
186 /* 186 /*
187 * Function for waiting for work with reduced power in idle loop; 187 * Function for waiting for work with reduced power in idle loop;
188 * called with interrupts disabled. 188 * called with interrupts disabled.
189 */ 189 */
190 void (*power_save)(void); 190 void (*power_save)(void);
191 191
192 /* Function to enable performance monitor counters for this 192 /* Function to enable performance monitor counters for this
193 platform, called once per cpu. */ 193 platform, called once per cpu. */
194 void (*enable_pmcs)(void); 194 void (*enable_pmcs)(void);
195 195
196 /* Set DABR for this platform, leave empty for default implemenation */ 196 /* Set DABR for this platform, leave empty for default implemenation */
197 int (*set_dabr)(unsigned long dabr); 197 int (*set_dabr)(unsigned long dabr);
198 198
199 #ifdef CONFIG_PPC32 /* XXX for now */ 199 #ifdef CONFIG_PPC32 /* XXX for now */
200 /* A general init function, called by ppc_init in init/main.c. 200 /* A general init function, called by ppc_init in init/main.c.
201 May be NULL. */ 201 May be NULL. */
202 void (*init)(void); 202 void (*init)(void);
203 203
204 void (*kgdb_map_scc)(void); 204 void (*kgdb_map_scc)(void);
205 205
206 /* 206 /*
207 * optional PCI "hooks" 207 * optional PCI "hooks"
208 */ 208 */
209 /* Called at then very end of pcibios_init() */ 209 /* Called at then very end of pcibios_init() */
210 void (*pcibios_after_init)(void); 210 void (*pcibios_after_init)(void);
211 211
212 #endif /* CONFIG_PPC32 */ 212 #endif /* CONFIG_PPC32 */
213 213
214 /* Called in indirect_* to avoid touching devices */ 214 /* Called in indirect_* to avoid touching devices */
215 int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char); 215 int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
216 216
217 /* Called after PPC generic resource fixup to perform 217 /* Called after PPC generic resource fixup to perform
218 machine specific fixups */ 218 machine specific fixups */
219 void (*pcibios_fixup_resources)(struct pci_dev *); 219 void (*pcibios_fixup_resources)(struct pci_dev *);
220 220
221 /* Called for each PCI bus in the system when it's probed */ 221 /* Called for each PCI bus in the system when it's probed */
222 void (*pcibios_fixup_bus)(struct pci_bus *); 222 void (*pcibios_fixup_bus)(struct pci_bus *);
223 223
224 /* Called when pci_enable_device() is called. Returns 0 to 224 /* Called when pci_enable_device() is called. Returns 0 to
225 * allow assignment/enabling of the device. */ 225 * allow assignment/enabling of the device. */
226 int (*pcibios_enable_device_hook)(struct pci_dev *); 226 int (*pcibios_enable_device_hook)(struct pci_dev *);
227 227
228 /* Called to shutdown machine specific hardware not already controlled 228 /* Called to shutdown machine specific hardware not already controlled
229 * by other drivers. 229 * by other drivers.
230 */ 230 */
231 void (*machine_shutdown)(void); 231 void (*machine_shutdown)(void);
232 232
233 #ifdef CONFIG_KEXEC 233 #ifdef CONFIG_KEXEC
234 void (*kexec_cpu_down)(int crash_shutdown, int secondary); 234 void (*kexec_cpu_down)(int crash_shutdown, int secondary);
235 235
236 /* Called to do what every setup is needed on image and the 236 /* Called to do what every setup is needed on image and the
237 * reboot code buffer. Returns 0 on success. 237 * reboot code buffer. Returns 0 on success.
238 * Provide your own (maybe dummy) implementation if your platform 238 * Provide your own (maybe dummy) implementation if your platform
239 * claims to support kexec. 239 * claims to support kexec.
240 */ 240 */
241 int (*machine_kexec_prepare)(struct kimage *image); 241 int (*machine_kexec_prepare)(struct kimage *image);
242 242
243 /* Called to perform the _real_ kexec. 243 /* Called to perform the _real_ kexec.
244 * Do NOT allocate memory or fail here. We are past the point of 244 * Do NOT allocate memory or fail here. We are past the point of
245 * no return. 245 * no return.
246 */ 246 */
247 void (*machine_kexec)(struct kimage *image); 247 void (*machine_kexec)(struct kimage *image);
248 #endif /* CONFIG_KEXEC */ 248 #endif /* CONFIG_KEXEC */
249 249
250 #ifdef CONFIG_SUSPEND 250 #ifdef CONFIG_SUSPEND
251 /* These are called to disable and enable, respectively, IRQs when 251 /* These are called to disable and enable, respectively, IRQs when
252 * entering a suspend state. If NULL, then the generic versions 252 * entering a suspend state. If NULL, then the generic versions
253 * will be called. The generic versions disable/enable the 253 * will be called. The generic versions disable/enable the
254 * decrementer along with interrupts. 254 * decrementer along with interrupts.
255 */ 255 */
256 void (*suspend_disable_irqs)(void); 256 void (*suspend_disable_irqs)(void);
257 void (*suspend_enable_irqs)(void); 257 void (*suspend_enable_irqs)(void);
258 #endif 258 #endif
259 int (*suspend_disable_cpu)(void); 259 int (*suspend_disable_cpu)(void);
260 260
261 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 261 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
262 ssize_t (*cpu_probe)(const char *, size_t); 262 ssize_t (*cpu_probe)(const char *, size_t);
263 ssize_t (*cpu_release)(const char *, size_t); 263 ssize_t (*cpu_release)(const char *, size_t);
264 #endif 264 #endif
265 }; 265 };
266 266
267 extern void e500_idle(void); 267 extern void e500_idle(void);
268 extern void power4_idle(void); 268 extern void power4_idle(void);
269 extern void power4_cpu_offline_powersave(void);
270 extern void ppc6xx_idle(void); 269 extern void ppc6xx_idle(void);
271 extern void book3e_idle(void); 270 extern void book3e_idle(void);
272 271
273 /* 272 /*
274 * ppc_md contains a copy of the machine description structure for the 273 * ppc_md contains a copy of the machine description structure for the
275 * current platform. machine_id contains the initial address where the 274 * current platform. machine_id contains the initial address where the
276 * description was found during boot. 275 * description was found during boot.
277 */ 276 */
278 extern struct machdep_calls ppc_md; 277 extern struct machdep_calls ppc_md;
279 extern struct machdep_calls *machine_id; 278 extern struct machdep_calls *machine_id;
280 279
281 #define __machine_desc __attribute__ ((__section__ (".machine.desc"))) 280 #define __machine_desc __attribute__ ((__section__ (".machine.desc")))
282 281
283 #define define_machine(name) \ 282 #define define_machine(name) \
284 extern struct machdep_calls mach_##name; \ 283 extern struct machdep_calls mach_##name; \
285 EXPORT_SYMBOL(mach_##name); \ 284 EXPORT_SYMBOL(mach_##name); \
286 struct machdep_calls mach_##name __machine_desc = 285 struct machdep_calls mach_##name __machine_desc =
287 286
288 #define machine_is(name) \ 287 #define machine_is(name) \
289 ({ \ 288 ({ \
290 extern struct machdep_calls mach_##name \ 289 extern struct machdep_calls mach_##name \
291 __attribute__((weak)); \ 290 __attribute__((weak)); \
292 machine_id == &mach_##name; \ 291 machine_id == &mach_##name; \
293 }) 292 })
294 293
295 extern void probe_machine(void); 294 extern void probe_machine(void);
296 295
297 extern char cmd_line[COMMAND_LINE_SIZE]; 296 extern char cmd_line[COMMAND_LINE_SIZE];
298 297
299 #ifdef CONFIG_PPC_PMAC 298 #ifdef CONFIG_PPC_PMAC
300 /* 299 /*
301 * Power macintoshes have either a CUDA, PMU or SMU controlling 300 * Power macintoshes have either a CUDA, PMU or SMU controlling
302 * system reset, power, NVRAM, RTC. 301 * system reset, power, NVRAM, RTC.
303 */ 302 */
304 typedef enum sys_ctrler_kind { 303 typedef enum sys_ctrler_kind {
305 SYS_CTRLER_UNKNOWN = 0, 304 SYS_CTRLER_UNKNOWN = 0,
306 SYS_CTRLER_CUDA = 1, 305 SYS_CTRLER_CUDA = 1,
307 SYS_CTRLER_PMU = 2, 306 SYS_CTRLER_PMU = 2,
308 SYS_CTRLER_SMU = 3, 307 SYS_CTRLER_SMU = 3,
309 } sys_ctrler_t; 308 } sys_ctrler_t;
310 extern sys_ctrler_t sys_ctrler; 309 extern sys_ctrler_t sys_ctrler;
311 310
312 #endif /* CONFIG_PPC_PMAC */ 311 #endif /* CONFIG_PPC_PMAC */
313 312
314 #ifdef CONFIG_SMP 313 #ifdef CONFIG_SMP
315 /* Poor default implementations */ 314 /* Poor default implementations */
316 extern void __devinit smp_generic_give_timebase(void); 315 extern void __devinit smp_generic_give_timebase(void);
317 extern void __devinit smp_generic_take_timebase(void); 316 extern void __devinit smp_generic_take_timebase(void);
318 #endif /* CONFIG_SMP */ 317 #endif /* CONFIG_SMP */
319 318
320 319
321 /* Functions to produce codes on the leds. 320 /* Functions to produce codes on the leds.
322 * The SRC code should be unique for the message category and should 321 * The SRC code should be unique for the message category and should
323 * be limited to the lower 24 bits (the upper 8 are set by these funcs), 322 * be limited to the lower 24 bits (the upper 8 are set by these funcs),
324 * and (for boot & dump) should be sorted numerically in the order 323 * and (for boot & dump) should be sorted numerically in the order
325 * the events occur. 324 * the events occur.
326 */ 325 */
327 /* Print a boot progress message. */ 326 /* Print a boot progress message. */
328 void ppc64_boot_msg(unsigned int src, const char *msg); 327 void ppc64_boot_msg(unsigned int src, const char *msg);
329 328
330 static inline void log_error(char *buf, unsigned int err_type, int fatal) 329 static inline void log_error(char *buf, unsigned int err_type, int fatal)
331 { 330 {
332 if (ppc_md.log_error) 331 if (ppc_md.log_error)
333 ppc_md.log_error(buf, err_type, fatal); 332 ppc_md.log_error(buf, err_type, fatal);
334 } 333 }
335 334
336 #define __define_machine_initcall(mach,level,fn,id) \ 335 #define __define_machine_initcall(mach,level,fn,id) \
337 static int __init __machine_initcall_##mach##_##fn(void) { \ 336 static int __init __machine_initcall_##mach##_##fn(void) { \
338 if (machine_is(mach)) return fn(); \ 337 if (machine_is(mach)) return fn(); \
339 return 0; \ 338 return 0; \
340 } \ 339 } \
341 __define_initcall(level,__machine_initcall_##mach##_##fn,id); 340 __define_initcall(level,__machine_initcall_##mach##_##fn,id);
342 341
343 #define machine_core_initcall(mach,fn) __define_machine_initcall(mach,"1",fn,1) 342 #define machine_core_initcall(mach,fn) __define_machine_initcall(mach,"1",fn,1)
344 #define machine_core_initcall_sync(mach,fn) __define_machine_initcall(mach,"1s",fn,1s) 343 #define machine_core_initcall_sync(mach,fn) __define_machine_initcall(mach,"1s",fn,1s)
345 #define machine_postcore_initcall(mach,fn) __define_machine_initcall(mach,"2",fn,2) 344 #define machine_postcore_initcall(mach,fn) __define_machine_initcall(mach,"2",fn,2)
346 #define machine_postcore_initcall_sync(mach,fn) __define_machine_initcall(mach,"2s",fn,2s) 345 #define machine_postcore_initcall_sync(mach,fn) __define_machine_initcall(mach,"2s",fn,2s)
347 #define machine_arch_initcall(mach,fn) __define_machine_initcall(mach,"3",fn,3) 346 #define machine_arch_initcall(mach,fn) __define_machine_initcall(mach,"3",fn,3)
348 #define machine_arch_initcall_sync(mach,fn) __define_machine_initcall(mach,"3s",fn,3s) 347 #define machine_arch_initcall_sync(mach,fn) __define_machine_initcall(mach,"3s",fn,3s)
349 #define machine_subsys_initcall(mach,fn) __define_machine_initcall(mach,"4",fn,4) 348 #define machine_subsys_initcall(mach,fn) __define_machine_initcall(mach,"4",fn,4)
350 #define machine_subsys_initcall_sync(mach,fn) __define_machine_initcall(mach,"4s",fn,4s) 349 #define machine_subsys_initcall_sync(mach,fn) __define_machine_initcall(mach,"4s",fn,4s)
351 #define machine_fs_initcall(mach,fn) __define_machine_initcall(mach,"5",fn,5) 350 #define machine_fs_initcall(mach,fn) __define_machine_initcall(mach,"5",fn,5)
352 #define machine_fs_initcall_sync(mach,fn) __define_machine_initcall(mach,"5s",fn,5s) 351 #define machine_fs_initcall_sync(mach,fn) __define_machine_initcall(mach,"5s",fn,5s)
353 #define machine_rootfs_initcall(mach,fn) __define_machine_initcall(mach,"rootfs",fn,rootfs) 352 #define machine_rootfs_initcall(mach,fn) __define_machine_initcall(mach,"rootfs",fn,rootfs)
354 #define machine_device_initcall(mach,fn) __define_machine_initcall(mach,"6",fn,6) 353 #define machine_device_initcall(mach,fn) __define_machine_initcall(mach,"6",fn,6)
355 #define machine_device_initcall_sync(mach,fn) __define_machine_initcall(mach,"6s",fn,6s) 354 #define machine_device_initcall_sync(mach,fn) __define_machine_initcall(mach,"6s",fn,6s)
356 #define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7) 355 #define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
357 #define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s) 356 #define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
358 357
359 #endif /* __KERNEL__ */ 358 #endif /* __KERNEL__ */
360 #endif /* _ASM_POWERPC_MACHDEP_H */ 359 #endif /* _ASM_POWERPC_MACHDEP_H */
361 360
arch/powerpc/kernel/head_64.S
1 /* 1 /*
2 * PowerPC version 2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * 4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras. 7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support 8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras. 9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras.
11 * 11 *
12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
14 * 14 *
15 * This file contains the entry point for the 64-bit kernel along 15 * This file contains the entry point for the 64-bit kernel along
16 * with some early initialization code common to all 64-bit powerpc 16 * with some early initialization code common to all 64-bit powerpc
17 * variants. 17 * variants.
18 * 18 *
19 * This program is free software; you can redistribute it and/or 19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License 20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version. 22 * 2 of the License, or (at your option) any later version.
23 */ 23 */
24 24
25 #include <linux/threads.h> 25 #include <linux/threads.h>
26 #include <asm/reg.h> 26 #include <asm/reg.h>
27 #include <asm/page.h> 27 #include <asm/page.h>
28 #include <asm/mmu.h> 28 #include <asm/mmu.h>
29 #include <asm/ppc_asm.h> 29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h> 30 #include <asm/asm-offsets.h>
31 #include <asm/bug.h> 31 #include <asm/bug.h>
32 #include <asm/cputable.h> 32 #include <asm/cputable.h>
33 #include <asm/setup.h> 33 #include <asm/setup.h>
34 #include <asm/hvcall.h> 34 #include <asm/hvcall.h>
35 #include <asm/iseries/lpar_map.h> 35 #include <asm/iseries/lpar_map.h>
36 #include <asm/thread_info.h> 36 #include <asm/thread_info.h>
37 #include <asm/firmware.h> 37 #include <asm/firmware.h>
38 #include <asm/page_64.h> 38 #include <asm/page_64.h>
39 #include <asm/irqflags.h> 39 #include <asm/irqflags.h>
40 #include <asm/kvm_book3s_asm.h> 40 #include <asm/kvm_book3s_asm.h>
41 #include <asm/ptrace.h> 41 #include <asm/ptrace.h>
42 42
43 /* The physical memory is layed out such that the secondary processor 43 /* The physical memory is layed out such that the secondary processor
44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
45 * using the layout described in exceptions-64s.S 45 * using the layout described in exceptions-64s.S
46 */ 46 */
47 47
48 /* 48 /*
49 * Entering into this code we make the following assumptions: 49 * Entering into this code we make the following assumptions:
50 * 50 *
51 * For pSeries or server processors: 51 * For pSeries or server processors:
52 * 1. The MMU is off & open firmware is running in real mode. 52 * 1. The MMU is off & open firmware is running in real mode.
53 * 2. The kernel is entered at __start 53 * 2. The kernel is entered at __start
54 * 54 *
55 * For iSeries: 55 * For iSeries:
56 * 1. The MMU is on (as it always is for iSeries) 56 * 1. The MMU is on (as it always is for iSeries)
57 * 2. The kernel is entered at system_reset_iSeries 57 * 2. The kernel is entered at system_reset_iSeries
58 * 58 *
59 * For Book3E processors: 59 * For Book3E processors:
60 * 1. The MMU is on running in AS0 in a state defined in ePAPR 60 * 1. The MMU is on running in AS0 in a state defined in ePAPR
61 * 2. The kernel is entered at __start 61 * 2. The kernel is entered at __start
62 */ 62 */
63 63
64 .text 64 .text
65 .globl _stext 65 .globl _stext
66 _stext: 66 _stext:
67 _GLOBAL(__start) 67 _GLOBAL(__start)
68 /* NOP this out unconditionally */ 68 /* NOP this out unconditionally */
69 BEGIN_FTR_SECTION 69 BEGIN_FTR_SECTION
70 b .__start_initialization_multiplatform 70 b .__start_initialization_multiplatform
71 END_FTR_SECTION(0, 1) 71 END_FTR_SECTION(0, 1)
72 72
73 /* Catch branch to 0 in real mode */ 73 /* Catch branch to 0 in real mode */
74 trap 74 trap
75 75
76 /* Secondary processors spin on this value until it becomes nonzero. 76 /* Secondary processors spin on this value until it becomes nonzero.
77 * When it does it contains the real address of the descriptor 77 * When it does it contains the real address of the descriptor
78 * of the function that the cpu should jump to to continue 78 * of the function that the cpu should jump to to continue
79 * initialization. 79 * initialization.
80 */ 80 */
81 .globl __secondary_hold_spinloop 81 .globl __secondary_hold_spinloop
82 __secondary_hold_spinloop: 82 __secondary_hold_spinloop:
83 .llong 0x0 83 .llong 0x0
84 84
85 /* Secondary processors write this value with their cpu # */ 85 /* Secondary processors write this value with their cpu # */
86 /* after they enter the spin loop immediately below. */ 86 /* after they enter the spin loop immediately below. */
87 .globl __secondary_hold_acknowledge 87 .globl __secondary_hold_acknowledge
88 __secondary_hold_acknowledge: 88 __secondary_hold_acknowledge:
89 .llong 0x0 89 .llong 0x0
90 90
91 #ifdef CONFIG_PPC_ISERIES 91 #ifdef CONFIG_PPC_ISERIES
92 /* 92 /*
93 * At offset 0x20, there is a pointer to iSeries LPAR data. 93 * At offset 0x20, there is a pointer to iSeries LPAR data.
94 * This is required by the hypervisor 94 * This is required by the hypervisor
95 */ 95 */
96 . = 0x20 96 . = 0x20
97 .llong hvReleaseData-KERNELBASE 97 .llong hvReleaseData-KERNELBASE
98 #endif /* CONFIG_PPC_ISERIES */ 98 #endif /* CONFIG_PPC_ISERIES */
99 99
100 #ifdef CONFIG_RELOCATABLE 100 #ifdef CONFIG_RELOCATABLE
101 /* This flag is set to 1 by a loader if the kernel should run 101 /* This flag is set to 1 by a loader if the kernel should run
102 * at the loaded address instead of the linked address. This 102 * at the loaded address instead of the linked address. This
103 * is used by kexec-tools to keep the the kdump kernel in the 103 * is used by kexec-tools to keep the the kdump kernel in the
104 * crash_kernel region. The loader is responsible for 104 * crash_kernel region. The loader is responsible for
105 * observing the alignment requirement. 105 * observing the alignment requirement.
106 */ 106 */
107 /* Do not move this variable as kexec-tools knows about it. */ 107 /* Do not move this variable as kexec-tools knows about it. */
108 . = 0x5c 108 . = 0x5c
109 .globl __run_at_load 109 .globl __run_at_load
110 __run_at_load: 110 __run_at_load:
111 .long 0x72756e30 /* "run0" -- relocate to 0 by default */ 111 .long 0x72756e30 /* "run0" -- relocate to 0 by default */
112 #endif 112 #endif
113 113
114 . = 0x60 114 . = 0x60
115 /* 115 /*
116 * The following code is used to hold secondary processors 116 * The following code is used to hold secondary processors
117 * in a spin loop after they have entered the kernel, but 117 * in a spin loop after they have entered the kernel, but
118 * before the bulk of the kernel has been relocated. This code 118 * before the bulk of the kernel has been relocated. This code
119 * is relocated to physical address 0x60 before prom_init is run. 119 * is relocated to physical address 0x60 before prom_init is run.
120 * All of it must fit below the first exception vector at 0x100. 120 * All of it must fit below the first exception vector at 0x100.
121 * Use .globl here not _GLOBAL because we want __secondary_hold 121 * Use .globl here not _GLOBAL because we want __secondary_hold
122 * to be the actual text address, not a descriptor. 122 * to be the actual text address, not a descriptor.
123 */ 123 */
124 .globl __secondary_hold 124 .globl __secondary_hold
125 __secondary_hold: 125 __secondary_hold:
126 #ifndef CONFIG_PPC_BOOK3E 126 #ifndef CONFIG_PPC_BOOK3E
127 mfmsr r24 127 mfmsr r24
128 ori r24,r24,MSR_RI 128 ori r24,r24,MSR_RI
129 mtmsrd r24 /* RI on */ 129 mtmsrd r24 /* RI on */
130 #endif 130 #endif
131 /* Grab our physical cpu number */ 131 /* Grab our physical cpu number */
132 mr r24,r3 132 mr r24,r3
133 133
134 /* Tell the master cpu we're here */ 134 /* Tell the master cpu we're here */
135 /* Relocation is off & we are located at an address less */ 135 /* Relocation is off & we are located at an address less */
136 /* than 0x100, so only need to grab low order offset. */ 136 /* than 0x100, so only need to grab low order offset. */
137 std r24,__secondary_hold_acknowledge-_stext(0) 137 std r24,__secondary_hold_acknowledge-_stext(0)
138 sync 138 sync
139 139
140 /* All secondary cpus wait here until told to start. */ 140 /* All secondary cpus wait here until told to start. */
141 100: ld r4,__secondary_hold_spinloop-_stext(0) 141 100: ld r4,__secondary_hold_spinloop-_stext(0)
142 cmpdi 0,r4,0 142 cmpdi 0,r4,0
143 beq 100b 143 beq 100b
144 144
145 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 145 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
146 ld r4,0(r4) /* deref function descriptor */ 146 ld r4,0(r4) /* deref function descriptor */
147 mtctr r4 147 mtctr r4
148 mr r3,r24 148 mr r3,r24
149 li r4,0 149 li r4,0
150 bctr 150 bctr
151 #else 151 #else
152 BUG_OPCODE 152 BUG_OPCODE
153 #endif 153 #endif
154 154
155 /* This value is used to mark exception frames on the stack. */ 155 /* This value is used to mark exception frames on the stack. */
156 .section ".toc","aw" 156 .section ".toc","aw"
157 exception_marker: 157 exception_marker:
158 .tc ID_72656773_68657265[TC],0x7265677368657265 158 .tc ID_72656773_68657265[TC],0x7265677368657265
159 .text 159 .text
160 160
161 /* 161 /*
162 * On server, we include the exception vectors code here as it 162 * On server, we include the exception vectors code here as it
163 * relies on absolute addressing which is only possible within 163 * relies on absolute addressing which is only possible within
164 * this compilation unit 164 * this compilation unit
165 */ 165 */
166 #ifdef CONFIG_PPC_BOOK3S 166 #ifdef CONFIG_PPC_BOOK3S
167 #include "exceptions-64s.S" 167 #include "exceptions-64s.S"
168 #endif 168 #endif
169 169
170 _GLOBAL(generic_secondary_thread_init) 170 _GLOBAL(generic_secondary_thread_init)
171 mr r24,r3 171 mr r24,r3
172 172
173 /* turn on 64-bit mode */ 173 /* turn on 64-bit mode */
174 bl .enable_64b_mode 174 bl .enable_64b_mode
175 175
176 /* get a valid TOC pointer, wherever we're mapped at */ 176 /* get a valid TOC pointer, wherever we're mapped at */
177 bl .relative_toc 177 bl .relative_toc
178 178
179 #ifdef CONFIG_PPC_BOOK3E 179 #ifdef CONFIG_PPC_BOOK3E
180 /* Book3E initialization */ 180 /* Book3E initialization */
181 mr r3,r24 181 mr r3,r24
182 bl .book3e_secondary_thread_init 182 bl .book3e_secondary_thread_init
183 #endif 183 #endif
184 b generic_secondary_common_init 184 b generic_secondary_common_init
185 185
186 /* 186 /*
187 * On pSeries and most other platforms, secondary processors spin 187 * On pSeries and most other platforms, secondary processors spin
188 * in the following code. 188 * in the following code.
189 * At entry, r3 = this processor's number (physical cpu id) 189 * At entry, r3 = this processor's number (physical cpu id)
190 * 190 *
191 * On Book3E, r4 = 1 to indicate that the initial TLB entry for 191 * On Book3E, r4 = 1 to indicate that the initial TLB entry for
192 * this core already exists (setup via some other mechanism such 192 * this core already exists (setup via some other mechanism such
193 * as SCOM before entry). 193 * as SCOM before entry).
194 */ 194 */
195 _GLOBAL(generic_secondary_smp_init) 195 _GLOBAL(generic_secondary_smp_init)
196 mr r24,r3 196 mr r24,r3
197 mr r25,r4 197 mr r25,r4
198 198
199 /* turn on 64-bit mode */ 199 /* turn on 64-bit mode */
200 bl .enable_64b_mode 200 bl .enable_64b_mode
201 201
202 /* get a valid TOC pointer, wherever we're mapped at */ 202 /* get a valid TOC pointer, wherever we're mapped at */
203 bl .relative_toc 203 bl .relative_toc
204 204
205 #ifdef CONFIG_PPC_BOOK3E 205 #ifdef CONFIG_PPC_BOOK3E
206 /* Book3E initialization */ 206 /* Book3E initialization */
207 mr r3,r24 207 mr r3,r24
208 mr r4,r25 208 mr r4,r25
209 bl .book3e_secondary_core_init 209 bl .book3e_secondary_core_init
210 #endif 210 #endif
211 211
212 generic_secondary_common_init: 212 generic_secondary_common_init:
213 /* Set up a paca value for this processor. Since we have the 213 /* Set up a paca value for this processor. Since we have the
214 * physical cpu id in r24, we need to search the pacas to find 214 * physical cpu id in r24, we need to search the pacas to find
215 * which logical id maps to our physical one. 215 * which logical id maps to our physical one.
216 */ 216 */
217 LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ 217 LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
218 ld r13,0(r13) /* Get base vaddr of paca array */ 218 ld r13,0(r13) /* Get base vaddr of paca array */
219 li r5,0 /* logical cpu id */ 219 li r5,0 /* logical cpu id */
220 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 220 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
221 cmpw r6,r24 /* Compare to our id */ 221 cmpw r6,r24 /* Compare to our id */
222 beq 2f 222 beq 2f
223 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 223 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
224 addi r5,r5,1 224 addi r5,r5,1
225 cmpwi r5,NR_CPUS 225 cmpwi r5,NR_CPUS
226 blt 1b 226 blt 1b
227 227
228 mr r3,r24 /* not found, copy phys to r3 */ 228 mr r3,r24 /* not found, copy phys to r3 */
229 b .kexec_wait /* next kernel might do better */ 229 b .kexec_wait /* next kernel might do better */
230 230
231 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ 231 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */
232 #ifdef CONFIG_PPC_BOOK3E 232 #ifdef CONFIG_PPC_BOOK3E
233 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ 233 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
234 mtspr SPRN_SPRG_TLB_EXFRAME,r12 234 mtspr SPRN_SPRG_TLB_EXFRAME,r12
235 #endif 235 #endif
236 236
237 /* From now on, r24 is expected to be logical cpuid */ 237 /* From now on, r24 is expected to be logical cpuid */
238 mr r24,r5 238 mr r24,r5
239 3: HMT_LOW 239 3: HMT_LOW
240 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 240 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
241 /* start. */ 241 /* start. */
242 242
243 #ifndef CONFIG_SMP 243 #ifndef CONFIG_SMP
244 b 3b /* Never go on non-SMP */ 244 b 3b /* Never go on non-SMP */
245 #else 245 #else
246 cmpwi 0,r23,0 246 cmpwi 0,r23,0
247 beq 3b /* Loop until told to go */ 247 beq 3b /* Loop until told to go */
248 248
249 sync /* order paca.run and cur_cpu_spec */ 249 sync /* order paca.run and cur_cpu_spec */
250 250
251 /* See if we need to call a cpu state restore handler */ 251 /* See if we need to call a cpu state restore handler */
252 LOAD_REG_ADDR(r23, cur_cpu_spec) 252 LOAD_REG_ADDR(r23, cur_cpu_spec)
253 ld r23,0(r23) 253 ld r23,0(r23)
254 ld r23,CPU_SPEC_RESTORE(r23) 254 ld r23,CPU_SPEC_RESTORE(r23)
255 cmpdi 0,r23,0 255 cmpdi 0,r23,0
256 beq 4f 256 beq 4f
257 ld r23,0(r23) 257 ld r23,0(r23)
258 mtctr r23 258 mtctr r23
259 bctrl 259 bctrl
260 260
261 4: /* Create a temp kernel stack for use before relocation is on. */ 261 4: /* Create a temp kernel stack for use before relocation is on. */
262 ld r1,PACAEMERGSP(r13) 262 ld r1,PACAEMERGSP(r13)
263 subi r1,r1,STACK_FRAME_OVERHEAD 263 subi r1,r1,STACK_FRAME_OVERHEAD
264 264
265 b __secondary_start 265 b __secondary_start
266 #endif 266 #endif
267 267
268 /* 268 /*
269 * Turn the MMU off. 269 * Turn the MMU off.
270 * Assumes we're mapped EA == RA if the MMU is on. 270 * Assumes we're mapped EA == RA if the MMU is on.
271 */ 271 */
272 #ifdef CONFIG_PPC_BOOK3S 272 #ifdef CONFIG_PPC_BOOK3S
273 _STATIC(__mmu_off) 273 _STATIC(__mmu_off)
274 mfmsr r3 274 mfmsr r3
275 andi. r0,r3,MSR_IR|MSR_DR 275 andi. r0,r3,MSR_IR|MSR_DR
276 beqlr 276 beqlr
277 mflr r4 277 mflr r4
278 andc r3,r3,r0 278 andc r3,r3,r0
279 mtspr SPRN_SRR0,r4 279 mtspr SPRN_SRR0,r4
280 mtspr SPRN_SRR1,r3 280 mtspr SPRN_SRR1,r3
281 sync 281 sync
282 rfid 282 rfid
283 b . /* prevent speculative execution */ 283 b . /* prevent speculative execution */
284 #endif 284 #endif
285 285
286 286
287 /* 287 /*
288 * Here is our main kernel entry point. We support currently 2 kind of entries 288 * Here is our main kernel entry point. We support currently 2 kind of entries
289 * depending on the value of r5. 289 * depending on the value of r5.
290 * 290 *
291 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content 291 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
292 * in r3...r7 292 * in r3...r7
293 * 293 *
294 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the 294 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
295 * DT block, r4 is a physical pointer to the kernel itself 295 * DT block, r4 is a physical pointer to the kernel itself
296 * 296 *
297 */ 297 */
298 _GLOBAL(__start_initialization_multiplatform) 298 _GLOBAL(__start_initialization_multiplatform)
299 /* Make sure we are running in 64 bits mode */ 299 /* Make sure we are running in 64 bits mode */
300 bl .enable_64b_mode 300 bl .enable_64b_mode
301 301
302 /* Get TOC pointer (current runtime address) */ 302 /* Get TOC pointer (current runtime address) */
303 bl .relative_toc 303 bl .relative_toc
304 304
305 /* find out where we are now */ 305 /* find out where we are now */
306 bcl 20,31,$+4 306 bcl 20,31,$+4
307 0: mflr r26 /* r26 = runtime addr here */ 307 0: mflr r26 /* r26 = runtime addr here */
308 addis r26,r26,(_stext - 0b)@ha 308 addis r26,r26,(_stext - 0b)@ha
309 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ 309 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
310 310
311 /* 311 /*
312 * Are we booted from a PROM Of-type client-interface ? 312 * Are we booted from a PROM Of-type client-interface ?
313 */ 313 */
314 cmpldi cr0,r5,0 314 cmpldi cr0,r5,0
315 beq 1f 315 beq 1f
316 b .__boot_from_prom /* yes -> prom */ 316 b .__boot_from_prom /* yes -> prom */
317 1: 317 1:
318 /* Save parameters */ 318 /* Save parameters */
319 mr r31,r3 319 mr r31,r3
320 mr r30,r4 320 mr r30,r4
321 321
322 #ifdef CONFIG_PPC_BOOK3E 322 #ifdef CONFIG_PPC_BOOK3E
323 bl .start_initialization_book3e 323 bl .start_initialization_book3e
324 b .__after_prom_start 324 b .__after_prom_start
325 #else 325 #else
326 /* Setup some critical 970 SPRs before switching MMU off */ 326 /* Setup some critical 970 SPRs before switching MMU off */
327 mfspr r0,SPRN_PVR 327 mfspr r0,SPRN_PVR
328 srwi r0,r0,16 328 srwi r0,r0,16
329 cmpwi r0,0x39 /* 970 */ 329 cmpwi r0,0x39 /* 970 */
330 beq 1f 330 beq 1f
331 cmpwi r0,0x3c /* 970FX */ 331 cmpwi r0,0x3c /* 970FX */
332 beq 1f 332 beq 1f
333 cmpwi r0,0x44 /* 970MP */ 333 cmpwi r0,0x44 /* 970MP */
334 beq 1f 334 beq 1f
335 cmpwi r0,0x45 /* 970GX */ 335 cmpwi r0,0x45 /* 970GX */
336 bne 2f 336 bne 2f
337 1: bl .__cpu_preinit_ppc970 337 1: bl .__cpu_preinit_ppc970
338 2: 338 2:
339 339
340 /* Switch off MMU if not already off */ 340 /* Switch off MMU if not already off */
341 bl .__mmu_off 341 bl .__mmu_off
342 b .__after_prom_start 342 b .__after_prom_start
343 #endif /* CONFIG_PPC_BOOK3E */ 343 #endif /* CONFIG_PPC_BOOK3E */
344 344
345 _INIT_STATIC(__boot_from_prom) 345 _INIT_STATIC(__boot_from_prom)
346 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE 346 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
347 /* Save parameters */ 347 /* Save parameters */
348 mr r31,r3 348 mr r31,r3
349 mr r30,r4 349 mr r30,r4
350 mr r29,r5 350 mr r29,r5
351 mr r28,r6 351 mr r28,r6
352 mr r27,r7 352 mr r27,r7
353 353
354 /* 354 /*
355 * Align the stack to 16-byte boundary 355 * Align the stack to 16-byte boundary
356 * Depending on the size and layout of the ELF sections in the initial 356 * Depending on the size and layout of the ELF sections in the initial
357 * boot binary, the stack pointer may be unaligned on PowerMac 357 * boot binary, the stack pointer may be unaligned on PowerMac
358 */ 358 */
359 rldicr r1,r1,0,59 359 rldicr r1,r1,0,59
360 360
361 #ifdef CONFIG_RELOCATABLE 361 #ifdef CONFIG_RELOCATABLE
362 /* Relocate code for where we are now */ 362 /* Relocate code for where we are now */
363 mr r3,r26 363 mr r3,r26
364 bl .relocate 364 bl .relocate
365 #endif 365 #endif
366 366
367 /* Restore parameters */ 367 /* Restore parameters */
368 mr r3,r31 368 mr r3,r31
369 mr r4,r30 369 mr r4,r30
370 mr r5,r29 370 mr r5,r29
371 mr r6,r28 371 mr r6,r28
372 mr r7,r27 372 mr r7,r27
373 373
374 /* Do all of the interaction with OF client interface */ 374 /* Do all of the interaction with OF client interface */
375 mr r8,r26 375 mr r8,r26
376 bl .prom_init 376 bl .prom_init
377 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ 377 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
378 378
379 /* We never return. We also hit that trap if trying to boot 379 /* We never return. We also hit that trap if trying to boot
380 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ 380 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
381 trap 381 trap
382 382
383 _STATIC(__after_prom_start) 383 _STATIC(__after_prom_start)
384 #ifdef CONFIG_RELOCATABLE 384 #ifdef CONFIG_RELOCATABLE
385 /* process relocations for the final address of the kernel */ 385 /* process relocations for the final address of the kernel */
386 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ 386 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
387 sldi r25,r25,32 387 sldi r25,r25,32
388 lwz r7,__run_at_load-_stext(r26) 388 lwz r7,__run_at_load-_stext(r26)
389 cmplwi cr0,r7,1 /* flagged to stay where we are ? */ 389 cmplwi cr0,r7,1 /* flagged to stay where we are ? */
390 bne 1f 390 bne 1f
391 add r25,r25,r26 391 add r25,r25,r26
392 1: mr r3,r25 392 1: mr r3,r25
393 bl .relocate 393 bl .relocate
394 #endif 394 #endif
395 395
396 /* 396 /*
397 * We need to run with _stext at physical address PHYSICAL_START. 397 * We need to run with _stext at physical address PHYSICAL_START.
398 * This will leave some code in the first 256B of 398 * This will leave some code in the first 256B of
399 * real memory, which are reserved for software use. 399 * real memory, which are reserved for software use.
400 * 400 *
401 * Note: This process overwrites the OF exception vectors. 401 * Note: This process overwrites the OF exception vectors.
402 */ 402 */
403 li r3,0 /* target addr */ 403 li r3,0 /* target addr */
404 #ifdef CONFIG_PPC_BOOK3E 404 #ifdef CONFIG_PPC_BOOK3E
405 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ 405 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
406 #endif 406 #endif
407 mr. r4,r26 /* In some cases the loader may */ 407 mr. r4,r26 /* In some cases the loader may */
408 beq 9f /* have already put us at zero */ 408 beq 9f /* have already put us at zero */
409 li r6,0x100 /* Start offset, the first 0x100 */ 409 li r6,0x100 /* Start offset, the first 0x100 */
410 /* bytes were copied earlier. */ 410 /* bytes were copied earlier. */
411 #ifdef CONFIG_PPC_BOOK3E 411 #ifdef CONFIG_PPC_BOOK3E
412 tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ 412 tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
413 #endif 413 #endif
414 414
415 #ifdef CONFIG_CRASH_DUMP 415 #ifdef CONFIG_CRASH_DUMP
416 /* 416 /*
417 * Check if the kernel has to be running as relocatable kernel based on the 417 * Check if the kernel has to be running as relocatable kernel based on the
418 * variable __run_at_load, if it is set the kernel is treated as relocatable 418 * variable __run_at_load, if it is set the kernel is treated as relocatable
419 * kernel, otherwise it will be moved to PHYSICAL_START 419 * kernel, otherwise it will be moved to PHYSICAL_START
420 */ 420 */
421 lwz r7,__run_at_load-_stext(r26) 421 lwz r7,__run_at_load-_stext(r26)
422 cmplwi cr0,r7,1 422 cmplwi cr0,r7,1
423 bne 3f 423 bne 3f
424 424
425 li r5,__end_interrupts - _stext /* just copy interrupts */ 425 li r5,__end_interrupts - _stext /* just copy interrupts */
426 b 5f 426 b 5f
427 3: 427 3:
428 #endif 428 #endif
429 lis r5,(copy_to_here - _stext)@ha 429 lis r5,(copy_to_here - _stext)@ha
430 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ 430 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
431 431
432 bl .copy_and_flush /* copy the first n bytes */ 432 bl .copy_and_flush /* copy the first n bytes */
433 /* this includes the code being */ 433 /* this includes the code being */
434 /* executed here. */ 434 /* executed here. */
435 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ 435 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
436 addi r8,r8,(4f - _stext)@l /* that we just made */ 436 addi r8,r8,(4f - _stext)@l /* that we just made */
437 mtctr r8 437 mtctr r8
438 bctr 438 bctr
439 439
440 p_end: .llong _end - _stext 440 p_end: .llong _end - _stext
441 441
442 4: /* Now copy the rest of the kernel up to _end */ 442 4: /* Now copy the rest of the kernel up to _end */
443 addis r5,r26,(p_end - _stext)@ha 443 addis r5,r26,(p_end - _stext)@ha
444 ld r5,(p_end - _stext)@l(r5) /* get _end */ 444 ld r5,(p_end - _stext)@l(r5) /* get _end */
445 5: bl .copy_and_flush /* copy the rest */ 445 5: bl .copy_and_flush /* copy the rest */
446 446
447 9: b .start_here_multiplatform 447 9: b .start_here_multiplatform
448 448
449 /* 449 /*
450 * Copy routine used to copy the kernel to start at physical address 0 450 * Copy routine used to copy the kernel to start at physical address 0
451 * and flush and invalidate the caches as needed. 451 * and flush and invalidate the caches as needed.
452 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 452 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
453 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 453 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
454 * 454 *
455 * Note: this routine *only* clobbers r0, r6 and lr 455 * Note: this routine *only* clobbers r0, r6 and lr
456 */ 456 */
457 _GLOBAL(copy_and_flush) 457 _GLOBAL(copy_and_flush)
458 addi r5,r5,-8 458 addi r5,r5,-8
459 addi r6,r6,-8 459 addi r6,r6,-8
460 4: li r0,8 /* Use the smallest common */ 460 4: li r0,8 /* Use the smallest common */
461 /* denominator cache line */ 461 /* denominator cache line */
462 /* size. This results in */ 462 /* size. This results in */
463 /* extra cache line flushes */ 463 /* extra cache line flushes */
464 /* but operation is correct. */ 464 /* but operation is correct. */
465 /* Can't get cache line size */ 465 /* Can't get cache line size */
466 /* from NACA as it is being */ 466 /* from NACA as it is being */
467 /* moved too. */ 467 /* moved too. */
468 468
469 mtctr r0 /* put # words/line in ctr */ 469 mtctr r0 /* put # words/line in ctr */
470 3: addi r6,r6,8 /* copy a cache line */ 470 3: addi r6,r6,8 /* copy a cache line */
471 ldx r0,r6,r4 471 ldx r0,r6,r4
472 stdx r0,r6,r3 472 stdx r0,r6,r3
473 bdnz 3b 473 bdnz 3b
474 dcbst r6,r3 /* write it to memory */ 474 dcbst r6,r3 /* write it to memory */
475 sync 475 sync
476 icbi r6,r3 /* flush the icache line */ 476 icbi r6,r3 /* flush the icache line */
477 cmpld 0,r6,r5 477 cmpld 0,r6,r5
478 blt 4b 478 blt 4b
479 sync 479 sync
480 addi r5,r5,8 480 addi r5,r5,8
481 addi r6,r6,8 481 addi r6,r6,8
482 blr 482 blr
483 483
484 .align 8 484 .align 8
485 copy_to_here: 485 copy_to_here:
486 486
487 #ifdef CONFIG_SMP 487 #ifdef CONFIG_SMP
488 #ifdef CONFIG_PPC_PMAC 488 #ifdef CONFIG_PPC_PMAC
489 /* 489 /*
490 * On PowerMac, secondary processors starts from the reset vector, which 490 * On PowerMac, secondary processors starts from the reset vector, which
491 * is temporarily turned into a call to one of the functions below. 491 * is temporarily turned into a call to one of the functions below.
492 */ 492 */
493 .section ".text"; 493 .section ".text";
494 .align 2 ; 494 .align 2 ;
495 495
496 .globl __secondary_start_pmac_0 496 .globl __secondary_start_pmac_0
497 __secondary_start_pmac_0: 497 __secondary_start_pmac_0:
498 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 498 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
499 li r24,0 499 li r24,0
500 b 1f 500 b 1f
501 li r24,1 501 li r24,1
502 b 1f 502 b 1f
503 li r24,2 503 li r24,2
504 b 1f 504 b 1f
505 li r24,3 505 li r24,3
506 1: 506 1:
507 507
508 _GLOBAL(pmac_secondary_start) 508 _GLOBAL(pmac_secondary_start)
509 /* turn on 64-bit mode */ 509 /* turn on 64-bit mode */
510 bl .enable_64b_mode 510 bl .enable_64b_mode
511 511
512 li r0,0 512 li r0,0
513 mfspr r3,SPRN_HID4 513 mfspr r3,SPRN_HID4
514 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ 514 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
515 sync 515 sync
516 mtspr SPRN_HID4,r3 516 mtspr SPRN_HID4,r3
517 isync 517 isync
518 sync 518 sync
519 slbia 519 slbia
520 520
521 /* get TOC pointer (real address) */ 521 /* get TOC pointer (real address) */
522 bl .relative_toc 522 bl .relative_toc
523 523
524 /* Copy some CPU settings from CPU 0 */ 524 /* Copy some CPU settings from CPU 0 */
525 bl .__restore_cpu_ppc970 525 bl .__restore_cpu_ppc970
526 526
527 /* pSeries do that early though I don't think we really need it */ 527 /* pSeries do that early though I don't think we really need it */
528 mfmsr r3 528 mfmsr r3
529 ori r3,r3,MSR_RI 529 ori r3,r3,MSR_RI
530 mtmsrd r3 /* RI on */ 530 mtmsrd r3 /* RI on */
531 531
532 /* Set up a paca value for this processor. */ 532 /* Set up a paca value for this processor. */
533 LOAD_REG_ADDR(r4,paca) /* Load paca pointer */ 533 LOAD_REG_ADDR(r4,paca) /* Load paca pointer */
534 ld r4,0(r4) /* Get base vaddr of paca array */ 534 ld r4,0(r4) /* Get base vaddr of paca array */
535 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 535 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
536 add r13,r13,r4 /* for this processor. */ 536 add r13,r13,r4 /* for this processor. */
537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
538 538
539 /* Mark interrupts soft and hard disabled (they might be enabled
540 * in the PACA when doing hotplug)
541 */
542 li r0,0
543 stb r0,PACASOFTIRQEN(r13)
544 stb r0,PACAHARDIRQEN(r13)
545
539 /* Create a temp kernel stack for use before relocation is on. */ 546 /* Create a temp kernel stack for use before relocation is on. */
540 ld r1,PACAEMERGSP(r13) 547 ld r1,PACAEMERGSP(r13)
541 subi r1,r1,STACK_FRAME_OVERHEAD 548 subi r1,r1,STACK_FRAME_OVERHEAD
542 549
543 b __secondary_start 550 b __secondary_start
544 551
545 #endif /* CONFIG_PPC_PMAC */ 552 #endif /* CONFIG_PPC_PMAC */
546 553
547 /* 554 /*
548 * This function is called after the master CPU has released the 555 * This function is called after the master CPU has released the
549 * secondary processors. The execution environment is relocation off. 556 * secondary processors. The execution environment is relocation off.
550 * The paca for this processor has the following fields initialized at 557 * The paca for this processor has the following fields initialized at
551 * this point: 558 * this point:
552 * 1. Processor number 559 * 1. Processor number
553 * 2. Segment table pointer (virtual address) 560 * 2. Segment table pointer (virtual address)
554 * On entry the following are set: 561 * On entry the following are set:
555 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries 562 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
556 * r24 = cpu# (in Linux terms) 563 * r24 = cpu# (in Linux terms)
557 * r13 = paca virtual address 564 * r13 = paca virtual address
558 * SPRG_PACA = paca virtual address 565 * SPRG_PACA = paca virtual address
559 */ 566 */
560 .section ".text"; 567 .section ".text";
561 .align 2 ; 568 .align 2 ;
562 569
563 .globl __secondary_start 570 .globl __secondary_start
564 __secondary_start: 571 __secondary_start:
565 /* Set thread priority to MEDIUM */ 572 /* Set thread priority to MEDIUM */
566 HMT_MEDIUM 573 HMT_MEDIUM
567 574
568 /* Initialize the kernel stack. Just a repeat for iSeries. */ 575 /* Initialize the kernel stack. Just a repeat for iSeries. */
569 LOAD_REG_ADDR(r3, current_set) 576 LOAD_REG_ADDR(r3, current_set)
570 sldi r28,r24,3 /* get current_set[cpu#] */ 577 sldi r28,r24,3 /* get current_set[cpu#] */
571 ldx r14,r3,r28 578 ldx r14,r3,r28
572 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD 579 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
573 std r14,PACAKSAVE(r13) 580 std r14,PACAKSAVE(r13)
574 581
575 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 582 /* Do early setup for that CPU (stab, slb, hash table pointer) */
576 bl .early_setup_secondary 583 bl .early_setup_secondary
577 584
578 /* 585 /*
579 * setup the new stack pointer, but *don't* use this until 586 * setup the new stack pointer, but *don't* use this until
580 * translation is on. 587 * translation is on.
581 */ 588 */
582 mr r1, r14 589 mr r1, r14
583 590
584 /* Clear backchain so we get nice backtraces */ 591 /* Clear backchain so we get nice backtraces */
585 li r7,0 592 li r7,0
586 mtlr r7 593 mtlr r7
587 594
588 /* enable MMU and jump to start_secondary */ 595 /* enable MMU and jump to start_secondary */
589 LOAD_REG_ADDR(r3, .start_secondary_prolog) 596 LOAD_REG_ADDR(r3, .start_secondary_prolog)
590 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 597 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
591 #ifdef CONFIG_PPC_ISERIES 598 #ifdef CONFIG_PPC_ISERIES
592 BEGIN_FW_FTR_SECTION 599 BEGIN_FW_FTR_SECTION
593 ori r4,r4,MSR_EE 600 ori r4,r4,MSR_EE
594 li r8,1 601 li r8,1
595 stb r8,PACAHARDIRQEN(r13) 602 stb r8,PACAHARDIRQEN(r13)
596 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 603 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
597 #endif 604 #endif
598 BEGIN_FW_FTR_SECTION 605 BEGIN_FW_FTR_SECTION
599 stb r7,PACAHARDIRQEN(r13) 606 stb r7,PACAHARDIRQEN(r13)
600 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 607 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
601 stb r7,PACASOFTIRQEN(r13) 608 stb r7,PACASOFTIRQEN(r13)
602 609
603 mtspr SPRN_SRR0,r3 610 mtspr SPRN_SRR0,r3
604 mtspr SPRN_SRR1,r4 611 mtspr SPRN_SRR1,r4
605 RFI 612 RFI
606 b . /* prevent speculative execution */ 613 b . /* prevent speculative execution */
607 614
608 /* 615 /*
609 * Running with relocation on at this point. All we want to do is 616 * Running with relocation on at this point. All we want to do is
610 * zero the stack back-chain pointer and get the TOC virtual address 617 * zero the stack back-chain pointer and get the TOC virtual address
611 * before going into C code. 618 * before going into C code.
612 */ 619 */
613 _GLOBAL(start_secondary_prolog) 620 _GLOBAL(start_secondary_prolog)
614 ld r2,PACATOC(r13) 621 ld r2,PACATOC(r13)
615 li r3,0 622 li r3,0
616 std r3,0(r1) /* Zero the stack frame pointer */ 623 std r3,0(r1) /* Zero the stack frame pointer */
617 bl .start_secondary 624 bl .start_secondary
618 b . 625 b .
619 /* 626 /*
620 * Reset stack pointer and call start_secondary 627 * Reset stack pointer and call start_secondary
621 * to continue with online operation when woken up 628 * to continue with online operation when woken up
622 * from cede in cpu offline. 629 * from cede in cpu offline.
623 */ 630 */
624 _GLOBAL(start_secondary_resume) 631 _GLOBAL(start_secondary_resume)
625 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ 632 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
626 li r3,0 633 li r3,0
627 std r3,0(r1) /* Zero the stack frame pointer */ 634 std r3,0(r1) /* Zero the stack frame pointer */
628 bl .start_secondary 635 bl .start_secondary
629 b . 636 b .
630 #endif 637 #endif
631 638
632 /* 639 /*
633 * This subroutine clobbers r11 and r12 640 * This subroutine clobbers r11 and r12
634 */ 641 */
635 _GLOBAL(enable_64b_mode) 642 _GLOBAL(enable_64b_mode)
636 mfmsr r11 /* grab the current MSR */ 643 mfmsr r11 /* grab the current MSR */
637 #ifdef CONFIG_PPC_BOOK3E 644 #ifdef CONFIG_PPC_BOOK3E
638 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ 645 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
639 mtmsr r11 646 mtmsr r11
640 #else /* CONFIG_PPC_BOOK3E */ 647 #else /* CONFIG_PPC_BOOK3E */
641 li r12,(MSR_SF | MSR_ISF)@highest 648 li r12,(MSR_SF | MSR_ISF)@highest
642 sldi r12,r12,48 649 sldi r12,r12,48
643 or r11,r11,r12 650 or r11,r11,r12
644 mtmsrd r11 651 mtmsrd r11
645 isync 652 isync
646 #endif 653 #endif
647 blr 654 blr
648 655
649 /* 656 /*
650 * This puts the TOC pointer into r2, offset by 0x8000 (as expected 657 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
651 * by the toolchain). It computes the correct value for wherever we 658 * by the toolchain). It computes the correct value for wherever we
652 * are running at the moment, using position-independent code. 659 * are running at the moment, using position-independent code.
653 */ 660 */
654 _GLOBAL(relative_toc) 661 _GLOBAL(relative_toc)
655 mflr r0 662 mflr r0
656 bcl 20,31,$+4 663 bcl 20,31,$+4
657 0: mflr r9 664 0: mflr r9
658 ld r2,(p_toc - 0b)(r9) 665 ld r2,(p_toc - 0b)(r9)
659 add r2,r2,r9 666 add r2,r2,r9
660 mtlr r0 667 mtlr r0
661 blr 668 blr
662 669
663 p_toc: .llong __toc_start + 0x8000 - 0b 670 p_toc: .llong __toc_start + 0x8000 - 0b
664 671
665 /* 672 /*
666 * This is where the main kernel code starts. 673 * This is where the main kernel code starts.
667 */ 674 */
668 _INIT_STATIC(start_here_multiplatform) 675 _INIT_STATIC(start_here_multiplatform)
669 /* set up the TOC (real address) */ 676 /* set up the TOC (real address) */
670 bl .relative_toc 677 bl .relative_toc
671 678
672 /* Clear out the BSS. It may have been done in prom_init, 679 /* Clear out the BSS. It may have been done in prom_init,
673 * already but that's irrelevant since prom_init will soon 680 * already but that's irrelevant since prom_init will soon
674 * be detached from the kernel completely. Besides, we need 681 * be detached from the kernel completely. Besides, we need
675 * to clear it now for kexec-style entry. 682 * to clear it now for kexec-style entry.
676 */ 683 */
677 LOAD_REG_ADDR(r11,__bss_stop) 684 LOAD_REG_ADDR(r11,__bss_stop)
678 LOAD_REG_ADDR(r8,__bss_start) 685 LOAD_REG_ADDR(r8,__bss_start)
679 sub r11,r11,r8 /* bss size */ 686 sub r11,r11,r8 /* bss size */
680 addi r11,r11,7 /* round up to an even double word */ 687 addi r11,r11,7 /* round up to an even double word */
681 srdi. r11,r11,3 /* shift right by 3 */ 688 srdi. r11,r11,3 /* shift right by 3 */
682 beq 4f 689 beq 4f
683 addi r8,r8,-8 690 addi r8,r8,-8
684 li r0,0 691 li r0,0
685 mtctr r11 /* zero this many doublewords */ 692 mtctr r11 /* zero this many doublewords */
686 3: stdu r0,8(r8) 693 3: stdu r0,8(r8)
687 bdnz 3b 694 bdnz 3b
688 4: 695 4:
689 696
690 #ifndef CONFIG_PPC_BOOK3E 697 #ifndef CONFIG_PPC_BOOK3E
691 mfmsr r6 698 mfmsr r6
692 ori r6,r6,MSR_RI 699 ori r6,r6,MSR_RI
693 mtmsrd r6 /* RI on */ 700 mtmsrd r6 /* RI on */
694 #endif 701 #endif
695 702
696 #ifdef CONFIG_RELOCATABLE 703 #ifdef CONFIG_RELOCATABLE
697 /* Save the physical address we're running at in kernstart_addr */ 704 /* Save the physical address we're running at in kernstart_addr */
698 LOAD_REG_ADDR(r4, kernstart_addr) 705 LOAD_REG_ADDR(r4, kernstart_addr)
699 clrldi r0,r25,2 706 clrldi r0,r25,2
700 std r0,0(r4) 707 std r0,0(r4)
701 #endif 708 #endif
702 709
703 /* The following gets the stack set up with the regs */ 710 /* The following gets the stack set up with the regs */
704 /* pointing to the real addr of the kernel stack. This is */ 711 /* pointing to the real addr of the kernel stack. This is */
705 /* all done to support the C function call below which sets */ 712 /* all done to support the C function call below which sets */
706 /* up the htab. This is done because we have relocated the */ 713 /* up the htab. This is done because we have relocated the */
707 /* kernel but are still running in real mode. */ 714 /* kernel but are still running in real mode. */
708 715
709 LOAD_REG_ADDR(r3,init_thread_union) 716 LOAD_REG_ADDR(r3,init_thread_union)
710 717
711 /* set up a stack pointer */ 718 /* set up a stack pointer */
712 addi r1,r3,THREAD_SIZE 719 addi r1,r3,THREAD_SIZE
713 li r0,0 720 li r0,0
714 stdu r0,-STACK_FRAME_OVERHEAD(r1) 721 stdu r0,-STACK_FRAME_OVERHEAD(r1)
715 722
716 /* Do very early kernel initializations, including initial hash table, 723 /* Do very early kernel initializations, including initial hash table,
717 * stab and slb setup before we turn on relocation. */ 724 * stab and slb setup before we turn on relocation. */
718 725
719 /* Restore parameters passed from prom_init/kexec */ 726 /* Restore parameters passed from prom_init/kexec */
720 mr r3,r31 727 mr r3,r31
721 bl .early_setup /* also sets r13 and SPRG_PACA */ 728 bl .early_setup /* also sets r13 and SPRG_PACA */
722 729
723 LOAD_REG_ADDR(r3, .start_here_common) 730 LOAD_REG_ADDR(r3, .start_here_common)
724 ld r4,PACAKMSR(r13) 731 ld r4,PACAKMSR(r13)
725 mtspr SPRN_SRR0,r3 732 mtspr SPRN_SRR0,r3
726 mtspr SPRN_SRR1,r4 733 mtspr SPRN_SRR1,r4
727 RFI 734 RFI
728 b . /* prevent speculative execution */ 735 b . /* prevent speculative execution */
729 736
730 /* This is where all platforms converge execution */ 737 /* This is where all platforms converge execution */
731 _INIT_GLOBAL(start_here_common) 738 _INIT_GLOBAL(start_here_common)
732 /* relocation is on at this point */ 739 /* relocation is on at this point */
733 std r1,PACAKSAVE(r13) 740 std r1,PACAKSAVE(r13)
734 741
735 /* Load the TOC (virtual address) */ 742 /* Load the TOC (virtual address) */
736 ld r2,PACATOC(r13) 743 ld r2,PACATOC(r13)
737 744
738 bl .setup_system 745 bl .setup_system
739 746
740 /* Load up the kernel context */ 747 /* Load up the kernel context */
741 5: 748 5:
742 li r5,0 749 li r5,0
743 stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ 750 stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
744 #ifdef CONFIG_PPC_ISERIES 751 #ifdef CONFIG_PPC_ISERIES
745 BEGIN_FW_FTR_SECTION 752 BEGIN_FW_FTR_SECTION
746 mfmsr r5 753 mfmsr r5
747 ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ 754 ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/
748 mtmsrd r5 755 mtmsrd r5
749 li r5,1 756 li r5,1
750 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 757 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
751 #endif 758 #endif
752 stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ 759 stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */
753 760
754 bl .start_kernel 761 bl .start_kernel
755 762
756 /* Not reached */ 763 /* Not reached */
757 BUG_OPCODE 764 BUG_OPCODE
758 765
759 /* 766 /*
760 * We put a few things here that have to be page-aligned. 767 * We put a few things here that have to be page-aligned.
761 * This stuff goes at the beginning of the bss, which is page-aligned. 768 * This stuff goes at the beginning of the bss, which is page-aligned.
762 */ 769 */
763 .section ".bss" 770 .section ".bss"
764 771
765 .align PAGE_SHIFT 772 .align PAGE_SHIFT
766 773
767 .globl empty_zero_page 774 .globl empty_zero_page
768 empty_zero_page: 775 empty_zero_page:
769 .space PAGE_SIZE 776 .space PAGE_SIZE
770 777
771 .globl swapper_pg_dir 778 .globl swapper_pg_dir
772 swapper_pg_dir: 779 swapper_pg_dir:
773 .space PGD_TABLE_SIZE 780 .space PGD_TABLE_SIZE
774 781
arch/powerpc/kernel/idle_power4.S
1 /* 1 /*
2 * This file contains the power_save function for 970-family CPUs. 2 * This file contains the power_save function for 970-family CPUs.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10 #include <linux/threads.h> 10 #include <linux/threads.h>
11 #include <asm/processor.h> 11 #include <asm/processor.h>
12 #include <asm/page.h> 12 #include <asm/page.h>
13 #include <asm/cputable.h> 13 #include <asm/cputable.h>
14 #include <asm/thread_info.h> 14 #include <asm/thread_info.h>
15 #include <asm/ppc_asm.h> 15 #include <asm/ppc_asm.h>
16 #include <asm/asm-offsets.h> 16 #include <asm/asm-offsets.h>
17 17
18 #undef DEBUG 18 #undef DEBUG
19 19
20 .text 20 .text
21 21
22 _GLOBAL(power4_idle) 22 _GLOBAL(power4_idle)
23 BEGIN_FTR_SECTION 23 BEGIN_FTR_SECTION
24 blr 24 blr
25 END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) 25 END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
26 /* Now check if user or arch enabled NAP mode */ 26 /* Now check if user or arch enabled NAP mode */
27 LOAD_REG_ADDRBASE(r3,powersave_nap) 27 LOAD_REG_ADDRBASE(r3,powersave_nap)
28 lwz r4,ADDROFF(powersave_nap)(r3) 28 lwz r4,ADDROFF(powersave_nap)(r3)
29 cmpwi 0,r4,0 29 cmpwi 0,r4,0
30 beqlr 30 beqlr
31 31
32 /* Go to NAP now */ 32 /* Go to NAP now */
33 mfmsr r7 33 mfmsr r7
34 rldicl r0,r7,48,1 34 rldicl r0,r7,48,1
35 rotldi r0,r0,16 35 rotldi r0,r0,16
36 mtmsrd r0,1 /* hard-disable interrupts */ 36 mtmsrd r0,1 /* hard-disable interrupts */
37 li r0,1 37 li r0,1
38 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ 38 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
39 stb r0,PACAHARDIRQEN(r13) 39 stb r0,PACAHARDIRQEN(r13)
40 BEGIN_FTR_SECTION 40 BEGIN_FTR_SECTION
41 DSSALL 41 DSSALL
42 sync 42 sync
43 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 43 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
44 clrrdi r9,r1,THREAD_SHIFT /* current thread_info */ 44 clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
45 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ 45 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
46 ori r8,r8,_TLF_NAPPING /* so when we take an exception */ 46 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
47 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ 47 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
48 ori r7,r7,MSR_EE 48 ori r7,r7,MSR_EE
49 oris r7,r7,MSR_POW@h 49 oris r7,r7,MSR_POW@h
50 1: sync 50 1: sync
51 isync 51 isync
52 mtmsrd r7 52 mtmsrd r7
53 isync 53 isync
54 b 1b 54 b 1b
55
56 _GLOBAL(power4_cpu_offline_powersave)
57 /* Go to NAP now */
58 mfmsr r7
59 rldicl r0,r7,48,1
60 rotldi r0,r0,16
61 mtmsrd r0,1 /* hard-disable interrupts */
62 li r0,1
63 li r6,0
64 stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
65 stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
66 BEGIN_FTR_SECTION
67 DSSALL
68 sync
69 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
70 ori r7,r7,MSR_EE
71 oris r7,r7,MSR_POW@h
72 sync
73 isync
74 mtmsrd r7
75 isync
76 blr
77 55
arch/powerpc/platforms/powermac/smp.c
1 /* 1 /*
2 * SMP support for power macintosh. 2 * SMP support for power macintosh.
3 * 3 *
4 * We support both the old "powersurge" SMP architecture 4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines. 5 * and the current Core99 (G4 PowerMac) machines.
6 * 6 *
7 * Note that we don't support the very first rev. of 7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky 8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except 9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some 10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH. 11 * code to detect this card though and disable SMP. --BenH.
12 * 12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net) 13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>. 14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 * 15 *
16 * Support for DayStar quad CPU cards 16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000 17 * Copyright (C) XLR8, Inc. 1994-2000
18 * 18 *
19 * This program is free software; you can redistribute it and/or 19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License 20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version. 22 * 2 of the License, or (at your option) any later version.
23 */ 23 */
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/sched.h> 25 #include <linux/sched.h>
26 #include <linux/smp.h> 26 #include <linux/smp.h>
27 #include <linux/interrupt.h> 27 #include <linux/interrupt.h>
28 #include <linux/kernel_stat.h> 28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/init.h> 30 #include <linux/init.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/errno.h> 32 #include <linux/errno.h>
33 #include <linux/hardirq.h> 33 #include <linux/hardirq.h>
34 #include <linux/cpu.h> 34 #include <linux/cpu.h>
35 #include <linux/compiler.h> 35 #include <linux/compiler.h>
36 36
37 #include <asm/ptrace.h> 37 #include <asm/ptrace.h>
38 #include <asm/atomic.h> 38 #include <asm/atomic.h>
39 #include <asm/code-patching.h> 39 #include <asm/code-patching.h>
40 #include <asm/irq.h> 40 #include <asm/irq.h>
41 #include <asm/page.h> 41 #include <asm/page.h>
42 #include <asm/pgtable.h> 42 #include <asm/pgtable.h>
43 #include <asm/sections.h> 43 #include <asm/sections.h>
44 #include <asm/io.h> 44 #include <asm/io.h>
45 #include <asm/prom.h> 45 #include <asm/prom.h>
46 #include <asm/smp.h> 46 #include <asm/smp.h>
47 #include <asm/machdep.h> 47 #include <asm/machdep.h>
48 #include <asm/pmac_feature.h> 48 #include <asm/pmac_feature.h>
49 #include <asm/time.h> 49 #include <asm/time.h>
50 #include <asm/mpic.h> 50 #include <asm/mpic.h>
51 #include <asm/cacheflush.h> 51 #include <asm/cacheflush.h>
52 #include <asm/keylargo.h> 52 #include <asm/keylargo.h>
53 #include <asm/pmac_low_i2c.h> 53 #include <asm/pmac_low_i2c.h>
54 #include <asm/pmac_pfunc.h> 54 #include <asm/pmac_pfunc.h>
55 55
56 #include "pmac.h" 56 #include "pmac.h"
57 57
58 #undef DEBUG 58 #undef DEBUG
59 59
60 #ifdef DEBUG 60 #ifdef DEBUG
61 #define DBG(fmt...) udbg_printf(fmt) 61 #define DBG(fmt...) udbg_printf(fmt)
62 #else 62 #else
63 #define DBG(fmt...) 63 #define DBG(fmt...)
64 #endif 64 #endif
65 65
66 extern void __secondary_start_pmac_0(void); 66 extern void __secondary_start_pmac_0(void);
67 extern int pmac_pfunc_base_install(void); 67 extern int pmac_pfunc_base_install(void);
68 68
69 static void (*pmac_tb_freeze)(int freeze); 69 static void (*pmac_tb_freeze)(int freeze);
70 static u64 timebase; 70 static u64 timebase;
71 static int tb_req; 71 static int tb_req;
72 72
73 #ifdef CONFIG_PPC32 73 #ifdef CONFIG_PPC32
74 74
75 /* 75 /*
76 * Powersurge (old powermac SMP) support. 76 * Powersurge (old powermac SMP) support.
77 */ 77 */
78 78
79 /* Addresses for powersurge registers */ 79 /* Addresses for powersurge registers */
80 #define HAMMERHEAD_BASE 0xf8000000 80 #define HAMMERHEAD_BASE 0xf8000000
81 #define HHEAD_CONFIG 0x90 81 #define HHEAD_CONFIG 0x90
82 #define HHEAD_SEC_INTR 0xc0 82 #define HHEAD_SEC_INTR 0xc0
83 83
84 /* register for interrupting the primary processor on the powersurge */ 84 /* register for interrupting the primary processor on the powersurge */
85 /* N.B. this is actually the ethernet ROM! */ 85 /* N.B. this is actually the ethernet ROM! */
86 #define PSURGE_PRI_INTR 0xf3019000 86 #define PSURGE_PRI_INTR 0xf3019000
87 87
88 /* register for storing the start address for the secondary processor */ 88 /* register for storing the start address for the secondary processor */
89 /* N.B. this is the PCI config space address register for the 1st bridge */ 89 /* N.B. this is the PCI config space address register for the 1st bridge */
90 #define PSURGE_START 0xf2800000 90 #define PSURGE_START 0xf2800000
91 91
92 /* Daystar/XLR8 4-CPU card */ 92 /* Daystar/XLR8 4-CPU card */
93 #define PSURGE_QUAD_REG_ADDR 0xf8800000 93 #define PSURGE_QUAD_REG_ADDR 0xf8800000
94 94
95 #define PSURGE_QUAD_IRQ_SET 0 95 #define PSURGE_QUAD_IRQ_SET 0
96 #define PSURGE_QUAD_IRQ_CLR 1 96 #define PSURGE_QUAD_IRQ_CLR 1
97 #define PSURGE_QUAD_IRQ_PRIMARY 2 97 #define PSURGE_QUAD_IRQ_PRIMARY 2
98 #define PSURGE_QUAD_CKSTOP_CTL 3 98 #define PSURGE_QUAD_CKSTOP_CTL 3
99 #define PSURGE_QUAD_PRIMARY_ARB 4 99 #define PSURGE_QUAD_PRIMARY_ARB 4
100 #define PSURGE_QUAD_BOARD_ID 6 100 #define PSURGE_QUAD_BOARD_ID 6
101 #define PSURGE_QUAD_WHICH_CPU 7 101 #define PSURGE_QUAD_WHICH_CPU 7
102 #define PSURGE_QUAD_CKSTOP_RDBK 8 102 #define PSURGE_QUAD_CKSTOP_RDBK 8
103 #define PSURGE_QUAD_RESET_CTL 11 103 #define PSURGE_QUAD_RESET_CTL 11
104 104
105 #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v))) 105 #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
106 #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f) 106 #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
107 #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v))) 107 #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
108 #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v))) 108 #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
109 109
110 /* virtual addresses for the above */ 110 /* virtual addresses for the above */
111 static volatile u8 __iomem *hhead_base; 111 static volatile u8 __iomem *hhead_base;
112 static volatile u8 __iomem *quad_base; 112 static volatile u8 __iomem *quad_base;
113 static volatile u32 __iomem *psurge_pri_intr; 113 static volatile u32 __iomem *psurge_pri_intr;
114 static volatile u8 __iomem *psurge_sec_intr; 114 static volatile u8 __iomem *psurge_sec_intr;
115 static volatile u32 __iomem *psurge_start; 115 static volatile u32 __iomem *psurge_start;
116 116
117 /* values for psurge_type */ 117 /* values for psurge_type */
118 #define PSURGE_NONE -1 118 #define PSURGE_NONE -1
119 #define PSURGE_DUAL 0 119 #define PSURGE_DUAL 0
120 #define PSURGE_QUAD_OKEE 1 120 #define PSURGE_QUAD_OKEE 1
121 #define PSURGE_QUAD_COTTON 2 121 #define PSURGE_QUAD_COTTON 2
122 #define PSURGE_QUAD_ICEGRASS 3 122 #define PSURGE_QUAD_ICEGRASS 3
123 123
124 /* what sort of powersurge board we have */ 124 /* what sort of powersurge board we have */
125 static int psurge_type = PSURGE_NONE; 125 static int psurge_type = PSURGE_NONE;
126 126
127 /* 127 /*
128 * Set and clear IPIs for powersurge. 128 * Set and clear IPIs for powersurge.
129 */ 129 */
130 static inline void psurge_set_ipi(int cpu) 130 static inline void psurge_set_ipi(int cpu)
131 { 131 {
132 if (psurge_type == PSURGE_NONE) 132 if (psurge_type == PSURGE_NONE)
133 return; 133 return;
134 if (cpu == 0) 134 if (cpu == 0)
135 in_be32(psurge_pri_intr); 135 in_be32(psurge_pri_intr);
136 else if (psurge_type == PSURGE_DUAL) 136 else if (psurge_type == PSURGE_DUAL)
137 out_8(psurge_sec_intr, 0); 137 out_8(psurge_sec_intr, 0);
138 else 138 else
139 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu); 139 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
140 } 140 }
141 141
142 static inline void psurge_clr_ipi(int cpu) 142 static inline void psurge_clr_ipi(int cpu)
143 { 143 {
144 if (cpu > 0) { 144 if (cpu > 0) {
145 switch(psurge_type) { 145 switch(psurge_type) {
146 case PSURGE_DUAL: 146 case PSURGE_DUAL:
147 out_8(psurge_sec_intr, ~0); 147 out_8(psurge_sec_intr, ~0);
148 case PSURGE_NONE: 148 case PSURGE_NONE:
149 break; 149 break;
150 default: 150 default:
151 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu); 151 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
152 } 152 }
153 } 153 }
154 } 154 }
155 155
156 /* 156 /*
157 * On powersurge (old SMP powermac architecture) we don't have 157 * On powersurge (old SMP powermac architecture) we don't have
158 * separate IPIs for separate messages like openpic does. Instead 158 * separate IPIs for separate messages like openpic does. Instead
159 * we have a bitmap for each processor, where a 1 bit means that 159 * we have a bitmap for each processor, where a 1 bit means that
160 * the corresponding message is pending for that processor. 160 * the corresponding message is pending for that processor.
161 * Ideally each cpu's entry would be in a different cache line. 161 * Ideally each cpu's entry would be in a different cache line.
162 * -- paulus. 162 * -- paulus.
163 */ 163 */
164 static unsigned long psurge_smp_message[NR_CPUS]; 164 static unsigned long psurge_smp_message[NR_CPUS];
165 165
166 void psurge_smp_message_recv(void) 166 void psurge_smp_message_recv(void)
167 { 167 {
168 int cpu = smp_processor_id(); 168 int cpu = smp_processor_id();
169 int msg; 169 int msg;
170 170
171 /* clear interrupt */ 171 /* clear interrupt */
172 psurge_clr_ipi(cpu); 172 psurge_clr_ipi(cpu);
173 173
174 if (num_online_cpus() < 2) 174 if (num_online_cpus() < 2)
175 return; 175 return;
176 176
177 /* make sure there is a message there */ 177 /* make sure there is a message there */
178 for (msg = 0; msg < 4; msg++) 178 for (msg = 0; msg < 4; msg++)
179 if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) 179 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
180 smp_message_recv(msg); 180 smp_message_recv(msg);
181 } 181 }
182 182
183 irqreturn_t psurge_primary_intr(int irq, void *d) 183 irqreturn_t psurge_primary_intr(int irq, void *d)
184 { 184 {
185 psurge_smp_message_recv(); 185 psurge_smp_message_recv();
186 return IRQ_HANDLED; 186 return IRQ_HANDLED;
187 } 187 }
188 188
189 static void smp_psurge_message_pass(int target, int msg) 189 static void smp_psurge_message_pass(int target, int msg)
190 { 190 {
191 int i; 191 int i;
192 192
193 if (num_online_cpus() < 2) 193 if (num_online_cpus() < 2)
194 return; 194 return;
195 195
196 for_each_online_cpu(i) { 196 for_each_online_cpu(i) {
197 if (target == MSG_ALL 197 if (target == MSG_ALL
198 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) 198 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
199 || target == i) { 199 || target == i) {
200 set_bit(msg, &psurge_smp_message[i]); 200 set_bit(msg, &psurge_smp_message[i]);
201 psurge_set_ipi(i); 201 psurge_set_ipi(i);
202 } 202 }
203 } 203 }
204 } 204 }
205 205
206 /* 206 /*
207 * Determine a quad card presence. We read the board ID register, we 207 * Determine a quad card presence. We read the board ID register, we
208 * force the data bus to change to something else, and we read it again. 208 * force the data bus to change to something else, and we read it again.
209 * It it's stable, then the register probably exist (ugh !) 209 * It it's stable, then the register probably exist (ugh !)
210 */ 210 */
211 static int __init psurge_quad_probe(void) 211 static int __init psurge_quad_probe(void)
212 { 212 {
213 int type; 213 int type;
214 unsigned int i; 214 unsigned int i;
215 215
216 type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID); 216 type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
217 if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS 217 if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
218 || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) 218 || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
219 return PSURGE_DUAL; 219 return PSURGE_DUAL;
220 220
221 /* looks OK, try a slightly more rigorous test */ 221 /* looks OK, try a slightly more rigorous test */
222 /* bogus is not necessarily cacheline-aligned, 222 /* bogus is not necessarily cacheline-aligned,
223 though I don't suppose that really matters. -- paulus */ 223 though I don't suppose that really matters. -- paulus */
224 for (i = 0; i < 100; i++) { 224 for (i = 0; i < 100; i++) {
225 volatile u32 bogus[8]; 225 volatile u32 bogus[8];
226 bogus[(0+i)%8] = 0x00000000; 226 bogus[(0+i)%8] = 0x00000000;
227 bogus[(1+i)%8] = 0x55555555; 227 bogus[(1+i)%8] = 0x55555555;
228 bogus[(2+i)%8] = 0xFFFFFFFF; 228 bogus[(2+i)%8] = 0xFFFFFFFF;
229 bogus[(3+i)%8] = 0xAAAAAAAA; 229 bogus[(3+i)%8] = 0xAAAAAAAA;
230 bogus[(4+i)%8] = 0x33333333; 230 bogus[(4+i)%8] = 0x33333333;
231 bogus[(5+i)%8] = 0xCCCCCCCC; 231 bogus[(5+i)%8] = 0xCCCCCCCC;
232 bogus[(6+i)%8] = 0xCCCCCCCC; 232 bogus[(6+i)%8] = 0xCCCCCCCC;
233 bogus[(7+i)%8] = 0x33333333; 233 bogus[(7+i)%8] = 0x33333333;
234 wmb(); 234 wmb();
235 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory"); 235 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
236 mb(); 236 mb();
237 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) 237 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
238 return PSURGE_DUAL; 238 return PSURGE_DUAL;
239 } 239 }
240 return type; 240 return type;
241 } 241 }
242 242
243 static void __init psurge_quad_init(void) 243 static void __init psurge_quad_init(void)
244 { 244 {
245 int procbits; 245 int procbits;
246 246
247 if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351); 247 if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
248 procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU); 248 procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
249 if (psurge_type == PSURGE_QUAD_ICEGRASS) 249 if (psurge_type == PSURGE_QUAD_ICEGRASS)
250 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); 250 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
251 else 251 else
252 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits); 252 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
253 mdelay(33); 253 mdelay(33);
254 out_8(psurge_sec_intr, ~0); 254 out_8(psurge_sec_intr, ~0);
255 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits); 255 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
256 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); 256 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
257 if (psurge_type != PSURGE_QUAD_ICEGRASS) 257 if (psurge_type != PSURGE_QUAD_ICEGRASS)
258 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits); 258 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
259 PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits); 259 PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
260 mdelay(33); 260 mdelay(33);
261 PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits); 261 PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
262 mdelay(33); 262 mdelay(33);
263 PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits); 263 PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
264 mdelay(33); 264 mdelay(33);
265 } 265 }
266 266
267 static int __init smp_psurge_probe(void) 267 static int __init smp_psurge_probe(void)
268 { 268 {
269 int i, ncpus; 269 int i, ncpus;
270 struct device_node *dn; 270 struct device_node *dn;
271 271
272 /* We don't do SMP on the PPC601 -- paulus */ 272 /* We don't do SMP on the PPC601 -- paulus */
273 if (PVR_VER(mfspr(SPRN_PVR)) == 1) 273 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
274 return 1; 274 return 1;
275 275
276 /* 276 /*
277 * The powersurge cpu board can be used in the generation 277 * The powersurge cpu board can be used in the generation
278 * of powermacs that have a socket for an upgradeable cpu card, 278 * of powermacs that have a socket for an upgradeable cpu card,
279 * including the 7500, 8500, 9500, 9600. 279 * including the 7500, 8500, 9500, 9600.
280 * The device tree doesn't tell you if you have 2 cpus because 280 * The device tree doesn't tell you if you have 2 cpus because
281 * OF doesn't know anything about the 2nd processor. 281 * OF doesn't know anything about the 2nd processor.
282 * Instead we look for magic bits in magic registers, 282 * Instead we look for magic bits in magic registers,
283 * in the hammerhead memory controller in the case of the 283 * in the hammerhead memory controller in the case of the
284 * dual-cpu powersurge board. -- paulus. 284 * dual-cpu powersurge board. -- paulus.
285 */ 285 */
286 dn = of_find_node_by_name(NULL, "hammerhead"); 286 dn = of_find_node_by_name(NULL, "hammerhead");
287 if (dn == NULL) 287 if (dn == NULL)
288 return 1; 288 return 1;
289 of_node_put(dn); 289 of_node_put(dn);
290 290
291 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800); 291 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
292 quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024); 292 quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
293 psurge_sec_intr = hhead_base + HHEAD_SEC_INTR; 293 psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
294 294
295 psurge_type = psurge_quad_probe(); 295 psurge_type = psurge_quad_probe();
296 if (psurge_type != PSURGE_DUAL) { 296 if (psurge_type != PSURGE_DUAL) {
297 psurge_quad_init(); 297 psurge_quad_init();
298 /* All released cards using this HW design have 4 CPUs */ 298 /* All released cards using this HW design have 4 CPUs */
299 ncpus = 4; 299 ncpus = 4;
300 /* No sure how timebase sync works on those, let's use SW */ 300 /* No sure how timebase sync works on those, let's use SW */
301 smp_ops->give_timebase = smp_generic_give_timebase; 301 smp_ops->give_timebase = smp_generic_give_timebase;
302 smp_ops->take_timebase = smp_generic_take_timebase; 302 smp_ops->take_timebase = smp_generic_take_timebase;
303 } else { 303 } else {
304 iounmap(quad_base); 304 iounmap(quad_base);
305 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { 305 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
306 /* not a dual-cpu card */ 306 /* not a dual-cpu card */
307 iounmap(hhead_base); 307 iounmap(hhead_base);
308 psurge_type = PSURGE_NONE; 308 psurge_type = PSURGE_NONE;
309 return 1; 309 return 1;
310 } 310 }
311 ncpus = 2; 311 ncpus = 2;
312 } 312 }
313 313
314 psurge_start = ioremap(PSURGE_START, 4); 314 psurge_start = ioremap(PSURGE_START, 4);
315 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); 315 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
316 316
317 /* This is necessary because OF doesn't know about the 317 /* This is necessary because OF doesn't know about the
318 * secondary cpu(s), and thus there aren't nodes in the 318 * secondary cpu(s), and thus there aren't nodes in the
319 * device tree for them, and smp_setup_cpu_maps hasn't 319 * device tree for them, and smp_setup_cpu_maps hasn't
320 * set their bits in cpu_present_mask. 320 * set their bits in cpu_present_mask.
321 */ 321 */
322 if (ncpus > NR_CPUS) 322 if (ncpus > NR_CPUS)
323 ncpus = NR_CPUS; 323 ncpus = NR_CPUS;
324 for (i = 1; i < ncpus ; ++i) 324 for (i = 1; i < ncpus ; ++i)
325 set_cpu_present(i, true); 325 set_cpu_present(i, true);
326 326
327 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); 327 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
328 328
329 return ncpus; 329 return ncpus;
330 } 330 }
331 331
332 static void __init smp_psurge_kick_cpu(int nr) 332 static void __init smp_psurge_kick_cpu(int nr)
333 { 333 {
334 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; 334 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
335 unsigned long a, flags; 335 unsigned long a, flags;
336 int i, j; 336 int i, j;
337 337
338 /* Defining this here is evil ... but I prefer hiding that 338 /* Defining this here is evil ... but I prefer hiding that
339 * crap to avoid giving people ideas that they can do the 339 * crap to avoid giving people ideas that they can do the
340 * same. 340 * same.
341 */ 341 */
342 extern volatile unsigned int cpu_callin_map[NR_CPUS]; 342 extern volatile unsigned int cpu_callin_map[NR_CPUS];
343 343
344 /* may need to flush here if secondary bats aren't setup */ 344 /* may need to flush here if secondary bats aren't setup */
345 for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) 345 for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
346 asm volatile("dcbf 0,%0" : : "r" (a) : "memory"); 346 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
347 asm volatile("sync"); 347 asm volatile("sync");
348 348
349 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); 349 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
350 350
351 /* This is going to freeze the timeebase, we disable interrupts */ 351 /* This is going to freeze the timeebase, we disable interrupts */
352 local_irq_save(flags); 352 local_irq_save(flags);
353 353
354 out_be32(psurge_start, start); 354 out_be32(psurge_start, start);
355 mb(); 355 mb();
356 356
357 psurge_set_ipi(nr); 357 psurge_set_ipi(nr);
358 358
359 /* 359 /*
360 * We can't use udelay here because the timebase is now frozen. 360 * We can't use udelay here because the timebase is now frozen.
361 */ 361 */
362 for (i = 0; i < 2000; ++i) 362 for (i = 0; i < 2000; ++i)
363 asm volatile("nop" : : : "memory"); 363 asm volatile("nop" : : : "memory");
364 psurge_clr_ipi(nr); 364 psurge_clr_ipi(nr);
365 365
366 /* 366 /*
367 * Also, because the timebase is frozen, we must not return to the 367 * Also, because the timebase is frozen, we must not return to the
368 * caller which will try to do udelay's etc... Instead, we wait -here- 368 * caller which will try to do udelay's etc... Instead, we wait -here-
369 * for the CPU to callin. 369 * for the CPU to callin.
370 */ 370 */
371 for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) { 371 for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
372 for (j = 1; j < 10000; j++) 372 for (j = 1; j < 10000; j++)
373 asm volatile("nop" : : : "memory"); 373 asm volatile("nop" : : : "memory");
374 asm volatile("sync" : : : "memory"); 374 asm volatile("sync" : : : "memory");
375 } 375 }
376 if (!cpu_callin_map[nr]) 376 if (!cpu_callin_map[nr])
377 goto stuck; 377 goto stuck;
378 378
379 /* And we do the TB sync here too for standard dual CPU cards */ 379 /* And we do the TB sync here too for standard dual CPU cards */
380 if (psurge_type == PSURGE_DUAL) { 380 if (psurge_type == PSURGE_DUAL) {
381 while(!tb_req) 381 while(!tb_req)
382 barrier(); 382 barrier();
383 tb_req = 0; 383 tb_req = 0;
384 mb(); 384 mb();
385 timebase = get_tb(); 385 timebase = get_tb();
386 mb(); 386 mb();
387 while (timebase) 387 while (timebase)
388 barrier(); 388 barrier();
389 mb(); 389 mb();
390 } 390 }
391 stuck: 391 stuck:
392 /* now interrupt the secondary, restarting both TBs */ 392 /* now interrupt the secondary, restarting both TBs */
393 if (psurge_type == PSURGE_DUAL) 393 if (psurge_type == PSURGE_DUAL)
394 psurge_set_ipi(1); 394 psurge_set_ipi(1);
395 395
396 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); 396 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
397 } 397 }
398 398
399 static struct irqaction psurge_irqaction = { 399 static struct irqaction psurge_irqaction = {
400 .handler = psurge_primary_intr, 400 .handler = psurge_primary_intr,
401 .flags = IRQF_DISABLED, 401 .flags = IRQF_DISABLED,
402 .name = "primary IPI", 402 .name = "primary IPI",
403 }; 403 };
404 404
405 static void __init smp_psurge_setup_cpu(int cpu_nr) 405 static void __init smp_psurge_setup_cpu(int cpu_nr)
406 { 406 {
407 if (cpu_nr != 0) 407 if (cpu_nr != 0)
408 return; 408 return;
409 409
410 /* reset the entry point so if we get another intr we won't 410 /* reset the entry point so if we get another intr we won't
411 * try to startup again */ 411 * try to startup again */
412 out_be32(psurge_start, 0x100); 412 out_be32(psurge_start, 0x100);
413 if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction)) 413 if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
414 printk(KERN_ERR "Couldn't get primary IPI interrupt"); 414 printk(KERN_ERR "Couldn't get primary IPI interrupt");
415 } 415 }
416 416
417 void __init smp_psurge_take_timebase(void) 417 void __init smp_psurge_take_timebase(void)
418 { 418 {
419 if (psurge_type != PSURGE_DUAL) 419 if (psurge_type != PSURGE_DUAL)
420 return; 420 return;
421 421
422 tb_req = 1; 422 tb_req = 1;
423 mb(); 423 mb();
424 while (!timebase) 424 while (!timebase)
425 barrier(); 425 barrier();
426 mb(); 426 mb();
427 set_tb(timebase >> 32, timebase & 0xffffffff); 427 set_tb(timebase >> 32, timebase & 0xffffffff);
428 timebase = 0; 428 timebase = 0;
429 mb(); 429 mb();
430 set_dec(tb_ticks_per_jiffy/2); 430 set_dec(tb_ticks_per_jiffy/2);
431 } 431 }
432 432
433 void __init smp_psurge_give_timebase(void) 433 void __init smp_psurge_give_timebase(void)
434 { 434 {
435 /* Nothing to do here */ 435 /* Nothing to do here */
436 } 436 }
437 437
438 /* PowerSurge-style Macs */ 438 /* PowerSurge-style Macs */
439 struct smp_ops_t psurge_smp_ops = { 439 struct smp_ops_t psurge_smp_ops = {
440 .message_pass = smp_psurge_message_pass, 440 .message_pass = smp_psurge_message_pass,
441 .probe = smp_psurge_probe, 441 .probe = smp_psurge_probe,
442 .kick_cpu = smp_psurge_kick_cpu, 442 .kick_cpu = smp_psurge_kick_cpu,
443 .setup_cpu = smp_psurge_setup_cpu, 443 .setup_cpu = smp_psurge_setup_cpu,
444 .give_timebase = smp_psurge_give_timebase, 444 .give_timebase = smp_psurge_give_timebase,
445 .take_timebase = smp_psurge_take_timebase, 445 .take_timebase = smp_psurge_take_timebase,
446 }; 446 };
447 #endif /* CONFIG_PPC32 - actually powersurge support */ 447 #endif /* CONFIG_PPC32 - actually powersurge support */
448 448
449 /* 449 /*
450 * Core 99 and later support 450 * Core 99 and later support
451 */ 451 */
452 452
453 453
454 static void smp_core99_give_timebase(void) 454 static void smp_core99_give_timebase(void)
455 { 455 {
456 unsigned long flags; 456 unsigned long flags;
457 457
458 local_irq_save(flags); 458 local_irq_save(flags);
459 459
460 while(!tb_req) 460 while(!tb_req)
461 barrier(); 461 barrier();
462 tb_req = 0; 462 tb_req = 0;
463 (*pmac_tb_freeze)(1); 463 (*pmac_tb_freeze)(1);
464 mb(); 464 mb();
465 timebase = get_tb(); 465 timebase = get_tb();
466 mb(); 466 mb();
467 while (timebase) 467 while (timebase)
468 barrier(); 468 barrier();
469 mb(); 469 mb();
470 (*pmac_tb_freeze)(0); 470 (*pmac_tb_freeze)(0);
471 mb(); 471 mb();
472 472
473 local_irq_restore(flags); 473 local_irq_restore(flags);
474 } 474 }
475 475
476 476
477 static void __devinit smp_core99_take_timebase(void) 477 static void __devinit smp_core99_take_timebase(void)
478 { 478 {
479 unsigned long flags; 479 unsigned long flags;
480 480
481 local_irq_save(flags); 481 local_irq_save(flags);
482 482
483 tb_req = 1; 483 tb_req = 1;
484 mb(); 484 mb();
485 while (!timebase) 485 while (!timebase)
486 barrier(); 486 barrier();
487 mb(); 487 mb();
488 set_tb(timebase >> 32, timebase & 0xffffffff); 488 set_tb(timebase >> 32, timebase & 0xffffffff);
489 timebase = 0; 489 timebase = 0;
490 mb(); 490 mb();
491 491
492 local_irq_restore(flags); 492 local_irq_restore(flags);
493 } 493 }
494 494
495 #ifdef CONFIG_PPC64 495 #ifdef CONFIG_PPC64
496 /* 496 /*
497 * G5s enable/disable the timebase via an i2c-connected clock chip. 497 * G5s enable/disable the timebase via an i2c-connected clock chip.
498 */ 498 */
499 static struct pmac_i2c_bus *pmac_tb_clock_chip_host; 499 static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
500 static u8 pmac_tb_pulsar_addr; 500 static u8 pmac_tb_pulsar_addr;
501 501
502 static void smp_core99_cypress_tb_freeze(int freeze) 502 static void smp_core99_cypress_tb_freeze(int freeze)
503 { 503 {
504 u8 data; 504 u8 data;
505 int rc; 505 int rc;
506 506
507 /* Strangely, the device-tree says address is 0xd2, but darwin 507 /* Strangely, the device-tree says address is 0xd2, but darwin
508 * accesses 0xd0 ... 508 * accesses 0xd0 ...
509 */ 509 */
510 pmac_i2c_setmode(pmac_tb_clock_chip_host, 510 pmac_i2c_setmode(pmac_tb_clock_chip_host,
511 pmac_i2c_mode_combined); 511 pmac_i2c_mode_combined);
512 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, 512 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
513 0xd0 | pmac_i2c_read, 513 0xd0 | pmac_i2c_read,
514 1, 0x81, &data, 1); 514 1, 0x81, &data, 1);
515 if (rc != 0) 515 if (rc != 0)
516 goto bail; 516 goto bail;
517 517
518 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c); 518 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
519 519
520 pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub); 520 pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
521 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, 521 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
522 0xd0 | pmac_i2c_write, 522 0xd0 | pmac_i2c_write,
523 1, 0x81, &data, 1); 523 1, 0x81, &data, 1);
524 524
525 bail: 525 bail:
526 if (rc != 0) { 526 if (rc != 0) {
527 printk("Cypress Timebase %s rc: %d\n", 527 printk("Cypress Timebase %s rc: %d\n",
528 freeze ? "freeze" : "unfreeze", rc); 528 freeze ? "freeze" : "unfreeze", rc);
529 panic("Timebase freeze failed !\n"); 529 panic("Timebase freeze failed !\n");
530 } 530 }
531 } 531 }
532 532
533 533
534 static void smp_core99_pulsar_tb_freeze(int freeze) 534 static void smp_core99_pulsar_tb_freeze(int freeze)
535 { 535 {
536 u8 data; 536 u8 data;
537 int rc; 537 int rc;
538 538
539 pmac_i2c_setmode(pmac_tb_clock_chip_host, 539 pmac_i2c_setmode(pmac_tb_clock_chip_host,
540 pmac_i2c_mode_combined); 540 pmac_i2c_mode_combined);
541 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, 541 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
542 pmac_tb_pulsar_addr | pmac_i2c_read, 542 pmac_tb_pulsar_addr | pmac_i2c_read,
543 1, 0x2e, &data, 1); 543 1, 0x2e, &data, 1);
544 if (rc != 0) 544 if (rc != 0)
545 goto bail; 545 goto bail;
546 546
547 data = (data & 0x88) | (freeze ? 0x11 : 0x22); 547 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
548 548
549 pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub); 549 pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
550 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, 550 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
551 pmac_tb_pulsar_addr | pmac_i2c_write, 551 pmac_tb_pulsar_addr | pmac_i2c_write,
552 1, 0x2e, &data, 1); 552 1, 0x2e, &data, 1);
553 bail: 553 bail:
554 if (rc != 0) { 554 if (rc != 0) {
555 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n", 555 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
556 freeze ? "freeze" : "unfreeze", rc); 556 freeze ? "freeze" : "unfreeze", rc);
557 panic("Timebase freeze failed !\n"); 557 panic("Timebase freeze failed !\n");
558 } 558 }
559 } 559 }
560 560
561 static void __init smp_core99_setup_i2c_hwsync(int ncpus) 561 static void __init smp_core99_setup_i2c_hwsync(int ncpus)
562 { 562 {
563 struct device_node *cc = NULL; 563 struct device_node *cc = NULL;
564 struct device_node *p; 564 struct device_node *p;
565 const char *name = NULL; 565 const char *name = NULL;
566 const u32 *reg; 566 const u32 *reg;
567 int ok; 567 int ok;
568 568
569 /* Look for the clock chip */ 569 /* Look for the clock chip */
570 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) { 570 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
571 p = of_get_parent(cc); 571 p = of_get_parent(cc);
572 ok = p && of_device_is_compatible(p, "uni-n-i2c"); 572 ok = p && of_device_is_compatible(p, "uni-n-i2c");
573 of_node_put(p); 573 of_node_put(p);
574 if (!ok) 574 if (!ok)
575 continue; 575 continue;
576 576
577 pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc); 577 pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
578 if (pmac_tb_clock_chip_host == NULL) 578 if (pmac_tb_clock_chip_host == NULL)
579 continue; 579 continue;
580 reg = of_get_property(cc, "reg", NULL); 580 reg = of_get_property(cc, "reg", NULL);
581 if (reg == NULL) 581 if (reg == NULL)
582 continue; 582 continue;
583 switch (*reg) { 583 switch (*reg) {
584 case 0xd2: 584 case 0xd2:
585 if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) { 585 if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) {
586 pmac_tb_freeze = smp_core99_pulsar_tb_freeze; 586 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
587 pmac_tb_pulsar_addr = 0xd2; 587 pmac_tb_pulsar_addr = 0xd2;
588 name = "Pulsar"; 588 name = "Pulsar";
589 } else if (of_device_is_compatible(cc, "cy28508")) { 589 } else if (of_device_is_compatible(cc, "cy28508")) {
590 pmac_tb_freeze = smp_core99_cypress_tb_freeze; 590 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
591 name = "Cypress"; 591 name = "Cypress";
592 } 592 }
593 break; 593 break;
594 case 0xd4: 594 case 0xd4:
595 pmac_tb_freeze = smp_core99_pulsar_tb_freeze; 595 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
596 pmac_tb_pulsar_addr = 0xd4; 596 pmac_tb_pulsar_addr = 0xd4;
597 name = "Pulsar"; 597 name = "Pulsar";
598 break; 598 break;
599 } 599 }
600 if (pmac_tb_freeze != NULL) 600 if (pmac_tb_freeze != NULL)
601 break; 601 break;
602 } 602 }
603 if (pmac_tb_freeze != NULL) { 603 if (pmac_tb_freeze != NULL) {
604 /* Open i2c bus for synchronous access */ 604 /* Open i2c bus for synchronous access */
605 if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) { 605 if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
606 printk(KERN_ERR "Failed top open i2c bus for clock" 606 printk(KERN_ERR "Failed top open i2c bus for clock"
607 " sync, fallback to software sync !\n"); 607 " sync, fallback to software sync !\n");
608 goto no_i2c_sync; 608 goto no_i2c_sync;
609 } 609 }
610 printk(KERN_INFO "Processor timebase sync using %s i2c clock\n", 610 printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
611 name); 611 name);
612 return; 612 return;
613 } 613 }
614 no_i2c_sync: 614 no_i2c_sync:
615 pmac_tb_freeze = NULL; 615 pmac_tb_freeze = NULL;
616 pmac_tb_clock_chip_host = NULL; 616 pmac_tb_clock_chip_host = NULL;
617 } 617 }
618 618
619 619
620 620
621 /* 621 /*
622 * Newer G5s uses a platform function 622 * Newer G5s uses a platform function
623 */ 623 */
624 624
625 static void smp_core99_pfunc_tb_freeze(int freeze) 625 static void smp_core99_pfunc_tb_freeze(int freeze)
626 { 626 {
627 struct device_node *cpus; 627 struct device_node *cpus;
628 struct pmf_args args; 628 struct pmf_args args;
629 629
630 cpus = of_find_node_by_path("/cpus"); 630 cpus = of_find_node_by_path("/cpus");
631 BUG_ON(cpus == NULL); 631 BUG_ON(cpus == NULL);
632 args.count = 1; 632 args.count = 1;
633 args.u[0].v = !freeze; 633 args.u[0].v = !freeze;
634 pmf_call_function(cpus, "cpu-timebase", &args); 634 pmf_call_function(cpus, "cpu-timebase", &args);
635 of_node_put(cpus); 635 of_node_put(cpus);
636 } 636 }
637 637
638 #else /* CONFIG_PPC64 */ 638 #else /* CONFIG_PPC64 */
639 639
640 /* 640 /*
641 * SMP G4 use a GPIO to enable/disable the timebase. 641 * SMP G4 use a GPIO to enable/disable the timebase.
642 */ 642 */
643 643
644 static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */ 644 static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
645 645
646 static void smp_core99_gpio_tb_freeze(int freeze) 646 static void smp_core99_gpio_tb_freeze(int freeze)
647 { 647 {
648 if (freeze) 648 if (freeze)
649 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4); 649 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
650 else 650 else
651 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); 651 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
652 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); 652 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
653 } 653 }
654 654
655 655
656 #endif /* !CONFIG_PPC64 */ 656 #endif /* !CONFIG_PPC64 */
657 657
658 /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */ 658 /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
659 volatile static long int core99_l2_cache; 659 volatile static long int core99_l2_cache;
660 volatile static long int core99_l3_cache; 660 volatile static long int core99_l3_cache;
661 661
662 static void __devinit core99_init_caches(int cpu) 662 static void __devinit core99_init_caches(int cpu)
663 { 663 {
664 #ifndef CONFIG_PPC64 664 #ifndef CONFIG_PPC64
665 if (!cpu_has_feature(CPU_FTR_L2CR)) 665 if (!cpu_has_feature(CPU_FTR_L2CR))
666 return; 666 return;
667 667
668 if (cpu == 0) { 668 if (cpu == 0) {
669 core99_l2_cache = _get_L2CR(); 669 core99_l2_cache = _get_L2CR();
670 printk("CPU0: L2CR is %lx\n", core99_l2_cache); 670 printk("CPU0: L2CR is %lx\n", core99_l2_cache);
671 } else { 671 } else {
672 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR()); 672 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
673 _set_L2CR(0); 673 _set_L2CR(0);
674 _set_L2CR(core99_l2_cache); 674 _set_L2CR(core99_l2_cache);
675 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache); 675 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
676 } 676 }
677 677
678 if (!cpu_has_feature(CPU_FTR_L3CR)) 678 if (!cpu_has_feature(CPU_FTR_L3CR))
679 return; 679 return;
680 680
681 if (cpu == 0){ 681 if (cpu == 0){
682 core99_l3_cache = _get_L3CR(); 682 core99_l3_cache = _get_L3CR();
683 printk("CPU0: L3CR is %lx\n", core99_l3_cache); 683 printk("CPU0: L3CR is %lx\n", core99_l3_cache);
684 } else { 684 } else {
685 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR()); 685 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
686 _set_L3CR(0); 686 _set_L3CR(0);
687 _set_L3CR(core99_l3_cache); 687 _set_L3CR(core99_l3_cache);
688 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); 688 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
689 } 689 }
690 #endif /* !CONFIG_PPC64 */ 690 #endif /* !CONFIG_PPC64 */
691 } 691 }
692 692
693 static void __init smp_core99_setup(int ncpus) 693 static void __init smp_core99_setup(int ncpus)
694 { 694 {
695 #ifdef CONFIG_PPC64 695 #ifdef CONFIG_PPC64
696 696
697 /* i2c based HW sync on some G5s */ 697 /* i2c based HW sync on some G5s */
698 if (of_machine_is_compatible("PowerMac7,2") || 698 if (of_machine_is_compatible("PowerMac7,2") ||
699 of_machine_is_compatible("PowerMac7,3") || 699 of_machine_is_compatible("PowerMac7,3") ||
700 of_machine_is_compatible("RackMac3,1")) 700 of_machine_is_compatible("RackMac3,1"))
701 smp_core99_setup_i2c_hwsync(ncpus); 701 smp_core99_setup_i2c_hwsync(ncpus);
702 702
703 /* pfunc based HW sync on recent G5s */ 703 /* pfunc based HW sync on recent G5s */
704 if (pmac_tb_freeze == NULL) { 704 if (pmac_tb_freeze == NULL) {
705 struct device_node *cpus = 705 struct device_node *cpus =
706 of_find_node_by_path("/cpus"); 706 of_find_node_by_path("/cpus");
707 if (cpus && 707 if (cpus &&
708 of_get_property(cpus, "platform-cpu-timebase", NULL)) { 708 of_get_property(cpus, "platform-cpu-timebase", NULL)) {
709 pmac_tb_freeze = smp_core99_pfunc_tb_freeze; 709 pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
710 printk(KERN_INFO "Processor timebase sync using" 710 printk(KERN_INFO "Processor timebase sync using"
711 " platform function\n"); 711 " platform function\n");
712 } 712 }
713 } 713 }
714 714
715 #else /* CONFIG_PPC64 */ 715 #else /* CONFIG_PPC64 */
716 716
717 /* GPIO based HW sync on ppc32 Core99 */ 717 /* GPIO based HW sync on ppc32 Core99 */
718 if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) { 718 if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
719 struct device_node *cpu; 719 struct device_node *cpu;
720 const u32 *tbprop = NULL; 720 const u32 *tbprop = NULL;
721 721
722 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */ 722 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
723 cpu = of_find_node_by_type(NULL, "cpu"); 723 cpu = of_find_node_by_type(NULL, "cpu");
724 if (cpu != NULL) { 724 if (cpu != NULL) {
725 tbprop = of_get_property(cpu, "timebase-enable", NULL); 725 tbprop = of_get_property(cpu, "timebase-enable", NULL);
726 if (tbprop) 726 if (tbprop)
727 core99_tb_gpio = *tbprop; 727 core99_tb_gpio = *tbprop;
728 of_node_put(cpu); 728 of_node_put(cpu);
729 } 729 }
730 pmac_tb_freeze = smp_core99_gpio_tb_freeze; 730 pmac_tb_freeze = smp_core99_gpio_tb_freeze;
731 printk(KERN_INFO "Processor timebase sync using" 731 printk(KERN_INFO "Processor timebase sync using"
732 " GPIO 0x%02x\n", core99_tb_gpio); 732 " GPIO 0x%02x\n", core99_tb_gpio);
733 } 733 }
734 734
735 #endif /* CONFIG_PPC64 */ 735 #endif /* CONFIG_PPC64 */
736 736
737 /* No timebase sync, fallback to software */ 737 /* No timebase sync, fallback to software */
738 if (pmac_tb_freeze == NULL) { 738 if (pmac_tb_freeze == NULL) {
739 smp_ops->give_timebase = smp_generic_give_timebase; 739 smp_ops->give_timebase = smp_generic_give_timebase;
740 smp_ops->take_timebase = smp_generic_take_timebase; 740 smp_ops->take_timebase = smp_generic_take_timebase;
741 printk(KERN_INFO "Processor timebase sync using software\n"); 741 printk(KERN_INFO "Processor timebase sync using software\n");
742 } 742 }
743 743
744 #ifndef CONFIG_PPC64 744 #ifndef CONFIG_PPC64
745 { 745 {
746 int i; 746 int i;
747 747
748 /* XXX should get this from reg properties */ 748 /* XXX should get this from reg properties */
749 for (i = 1; i < ncpus; ++i) 749 for (i = 1; i < ncpus; ++i)
750 set_hard_smp_processor_id(i, i); 750 set_hard_smp_processor_id(i, i);
751 } 751 }
752 #endif 752 #endif
753 753
754 /* 32 bits SMP can't NAP */ 754 /* 32 bits SMP can't NAP */
755 if (!of_machine_is_compatible("MacRISC4")) 755 if (!of_machine_is_compatible("MacRISC4"))
756 powersave_nap = 0; 756 powersave_nap = 0;
757 } 757 }
758 758
759 static int __init smp_core99_probe(void) 759 static int __init smp_core99_probe(void)
760 { 760 {
761 struct device_node *cpus; 761 struct device_node *cpus;
762 int ncpus = 0; 762 int ncpus = 0;
763 763
764 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345); 764 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
765 765
766 /* Count CPUs in the device-tree */ 766 /* Count CPUs in the device-tree */
767 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;) 767 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
768 ++ncpus; 768 ++ncpus;
769 769
770 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus); 770 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
771 771
772 /* Nothing more to do if less than 2 of them */ 772 /* Nothing more to do if less than 2 of them */
773 if (ncpus <= 1) 773 if (ncpus <= 1)
774 return 1; 774 return 1;
775 775
776 /* We need to perform some early initialisations before we can start 776 /* We need to perform some early initialisations before we can start
777 * setting up SMP as we are running before initcalls 777 * setting up SMP as we are running before initcalls
778 */ 778 */
779 pmac_pfunc_base_install(); 779 pmac_pfunc_base_install();
780 pmac_i2c_init(); 780 pmac_i2c_init();
781 781
782 /* Setup various bits like timebase sync method, ability to nap, ... */ 782 /* Setup various bits like timebase sync method, ability to nap, ... */
783 smp_core99_setup(ncpus); 783 smp_core99_setup(ncpus);
784 784
785 /* Install IPIs */ 785 /* Install IPIs */
786 mpic_request_ipis(); 786 mpic_request_ipis();
787 787
788 /* Collect l2cr and l3cr values from CPU 0 */ 788 /* Collect l2cr and l3cr values from CPU 0 */
789 core99_init_caches(0); 789 core99_init_caches(0);
790 790
791 return ncpus; 791 return ncpus;
792 } 792 }
793 793
794 static void __devinit smp_core99_kick_cpu(int nr) 794 static void __devinit smp_core99_kick_cpu(int nr)
795 { 795 {
796 unsigned int save_vector; 796 unsigned int save_vector;
797 unsigned long target, flags; 797 unsigned long target, flags;
798 unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); 798 unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
799 799
800 if (nr < 0 || nr > 3) 800 if (nr < 0 || nr > 3)
801 return; 801 return;
802 802
803 if (ppc_md.progress) 803 if (ppc_md.progress)
804 ppc_md.progress("smp_core99_kick_cpu", 0x346); 804 ppc_md.progress("smp_core99_kick_cpu", 0x346);
805 805
806 local_irq_save(flags); 806 local_irq_save(flags);
807 807
808 /* Save reset vector */ 808 /* Save reset vector */
809 save_vector = *vector; 809 save_vector = *vector;
810 810
811 /* Setup fake reset vector that does 811 /* Setup fake reset vector that does
812 * b __secondary_start_pmac_0 + nr*8 812 * b __secondary_start_pmac_0 + nr*8
813 */ 813 */
814 target = (unsigned long) __secondary_start_pmac_0 + nr * 8; 814 target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
815 patch_branch(vector, target, BRANCH_SET_LINK); 815 patch_branch(vector, target, BRANCH_SET_LINK);
816 816
817 /* Put some life in our friend */ 817 /* Put some life in our friend */
818 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); 818 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
819 819
820 /* FIXME: We wait a bit for the CPU to take the exception, I should 820 /* FIXME: We wait a bit for the CPU to take the exception, I should
821 * instead wait for the entry code to set something for me. Well, 821 * instead wait for the entry code to set something for me. Well,
822 * ideally, all that crap will be done in prom.c and the CPU left 822 * ideally, all that crap will be done in prom.c and the CPU left
823 * in a RAM-based wait loop like CHRP. 823 * in a RAM-based wait loop like CHRP.
824 */ 824 */
825 mdelay(1); 825 mdelay(1);
826 826
827 /* Restore our exception vector */ 827 /* Restore our exception vector */
828 *vector = save_vector; 828 *vector = save_vector;
829 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); 829 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
830 830
831 local_irq_restore(flags); 831 local_irq_restore(flags);
832 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); 832 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
833 } 833 }
834 834
835 static void __devinit smp_core99_setup_cpu(int cpu_nr) 835 static void __devinit smp_core99_setup_cpu(int cpu_nr)
836 { 836 {
837 /* Setup L2/L3 */ 837 /* Setup L2/L3 */
838 if (cpu_nr != 0) 838 if (cpu_nr != 0)
839 core99_init_caches(cpu_nr); 839 core99_init_caches(cpu_nr);
840 840
841 /* Setup openpic */ 841 /* Setup openpic */
842 mpic_setup_this_cpu(); 842 mpic_setup_this_cpu();
843 843
844 if (cpu_nr == 0) { 844 if (cpu_nr == 0) {
845 #ifdef CONFIG_PPC64 845 #ifdef CONFIG_PPC64
846 extern void g5_phy_disable_cpu1(void); 846 extern void g5_phy_disable_cpu1(void);
847 847
848 /* Close i2c bus if it was used for tb sync */ 848 /* Close i2c bus if it was used for tb sync */
849 if (pmac_tb_clock_chip_host) { 849 if (pmac_tb_clock_chip_host) {
850 pmac_i2c_close(pmac_tb_clock_chip_host); 850 pmac_i2c_close(pmac_tb_clock_chip_host);
851 pmac_tb_clock_chip_host = NULL; 851 pmac_tb_clock_chip_host = NULL;
852 } 852 }
853 853
854 /* If we didn't start the second CPU, we must take 854 /* If we didn't start the second CPU, we must take
855 * it off the bus 855 * it off the bus
856 */ 856 */
857 if (of_machine_is_compatible("MacRISC4") && 857 if (of_machine_is_compatible("MacRISC4") &&
858 num_online_cpus() < 2) 858 num_online_cpus() < 2)
859 g5_phy_disable_cpu1(); 859 g5_phy_disable_cpu1();
860 #endif /* CONFIG_PPC64 */ 860 #endif /* CONFIG_PPC64 */
861 861
862 if (ppc_md.progress) 862 if (ppc_md.progress)
863 ppc_md.progress("core99_setup_cpu 0 done", 0x349); 863 ppc_md.progress("core99_setup_cpu 0 done", 0x349);
864 } 864 }
865 } 865 }
866 866
867 867
868 #ifdef CONFIG_HOTPLUG_CPU 868 #ifdef CONFIG_HOTPLUG_CPU
869 869
870 static int smp_core99_cpu_disable(void) 870 static int smp_core99_cpu_disable(void)
871 { 871 {
872 int rc = generic_cpu_disable(); 872 int rc = generic_cpu_disable();
873 if (rc) 873 if (rc)
874 return rc; 874 return rc;
875 875
876 mpic_cpu_set_priority(0xf); 876 mpic_cpu_set_priority(0xf);
877 877
878 return 0; 878 return 0;
879 } 879 }
880 880
881 #ifdef CONFIG_PPC32 881 #ifdef CONFIG_PPC32
882 882
883 static void pmac_cpu_die(void) 883 static void pmac_cpu_die(void)
884 { 884 {
885 local_irq_disable(); 885 local_irq_disable();
886 idle_task_exit(); 886 idle_task_exit();
887 printk(KERN_DEBUG "CPU%d offline\n", smp_processor_id()); 887 printk(KERN_DEBUG "CPU%d offline\n", smp_processor_id());
888 __get_cpu_var(cpu_state) = CPU_DEAD; 888 __get_cpu_var(cpu_state) = CPU_DEAD;
889 smp_wmb(); 889 smp_wmb();
890 mb(); 890 mb();
891 low_cpu_die(); 891 low_cpu_die();
892 } 892 }
893 893
894 #else /* CONFIG_PPC32 */ 894 #else /* CONFIG_PPC32 */
895 895
896 static void pmac_cpu_die(void) 896 static void pmac_cpu_die(void)
897 { 897 {
898 local_irq_disable(); 898 local_irq_disable();
899 idle_task_exit(); 899 idle_task_exit();
900 900
901 /* 901 /*
902 * turn off as much as possible, we'll be 902 * turn off as much as possible, we'll be
903 * kicked out as this will only be invoked 903 * kicked out as this will only be invoked
904 * on core99 platforms for now ... 904 * on core99 platforms for now ...
905 */ 905 */
906 906
907 printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); 907 printk(KERN_INFO "CPU#%d offline\n", smp_processor_id());
908 __get_cpu_var(cpu_state) = CPU_DEAD; 908 __get_cpu_var(cpu_state) = CPU_DEAD;
909 smp_wmb(); 909 smp_wmb();
910 910
911 /* 911 /*
912 * during the path that leads here preemption is disabled, 912 * during the path that leads here preemption is disabled,
913 * reenable it now so that when coming up preempt count is 913 * reenable it now so that when coming up preempt count is
914 * zero correctly 914 * zero correctly
915 */ 915 */
916 preempt_enable(); 916 preempt_enable();
917 917
918 /* 918 /*
919 * hard-disable interrupts for the non-NAP case, the NAP code 919 * Re-enable interrupts. The NAP code needs to enable them
920 * needs to re-enable interrupts (but soft-disables them) 920 * anyways, do it now so we deal with the case where one already
921 * happened while soft-disabled.
922 * We shouldn't get any external interrupts, only decrementer, and the
923 * decrementer handler is safe for use on offline CPUs
921 */ 924 */
922 hard_irq_disable(); 925 local_irq_enable();
923 926
924 while (1) { 927 while (1) {
925 /* let's not take timer interrupts too often ... */ 928 /* let's not take timer interrupts too often ... */
926 set_dec(0x7fffffff); 929 set_dec(0x7fffffff);
927 930
928 /* should always be true at this point */ 931 /* Enter NAP mode */
929 if (cpu_has_feature(CPU_FTR_CAN_NAP)) 932 power4_idle();
930 power4_cpu_offline_powersave();
931 } 933 }
932 } 934 }
933 935
934 #endif /* else CONFIG_PPC32 */ 936 #endif /* else CONFIG_PPC32 */
935 #endif /* CONFIG_HOTPLUG_CPU */ 937 #endif /* CONFIG_HOTPLUG_CPU */
936 938
937 /* Core99 Macs (dual G4s and G5s) */ 939 /* Core99 Macs (dual G4s and G5s) */
938 struct smp_ops_t core99_smp_ops = { 940 struct smp_ops_t core99_smp_ops = {
939 .message_pass = smp_mpic_message_pass, 941 .message_pass = smp_mpic_message_pass,
940 .probe = smp_core99_probe, 942 .probe = smp_core99_probe,
941 .kick_cpu = smp_core99_kick_cpu, 943 .kick_cpu = smp_core99_kick_cpu,
942 .setup_cpu = smp_core99_setup_cpu, 944 .setup_cpu = smp_core99_setup_cpu,
943 .give_timebase = smp_core99_give_timebase, 945 .give_timebase = smp_core99_give_timebase,
944 .take_timebase = smp_core99_take_timebase, 946 .take_timebase = smp_core99_take_timebase,
945 #if defined(CONFIG_HOTPLUG_CPU) 947 #if defined(CONFIG_HOTPLUG_CPU)
946 .cpu_disable = smp_core99_cpu_disable, 948 .cpu_disable = smp_core99_cpu_disable,
947 .cpu_die = generic_cpu_die, 949 .cpu_die = generic_cpu_die,
948 #endif 950 #endif
949 }; 951 };
950 952
951 void __init pmac_setup_smp(void) 953 void __init pmac_setup_smp(void)
952 { 954 {
953 struct device_node *np; 955 struct device_node *np;
954 956
955 /* Check for Core99 */ 957 /* Check for Core99 */
956 np = of_find_node_by_name(NULL, "uni-n"); 958 np = of_find_node_by_name(NULL, "uni-n");
957 if (!np) 959 if (!np)
958 np = of_find_node_by_name(NULL, "u3"); 960 np = of_find_node_by_name(NULL, "u3");
959 if (!np) 961 if (!np)
960 np = of_find_node_by_name(NULL, "u4"); 962 np = of_find_node_by_name(NULL, "u4");
961 if (np) { 963 if (np) {
962 of_node_put(np); 964 of_node_put(np);
963 smp_ops = &core99_smp_ops; 965 smp_ops = &core99_smp_ops;
964 } 966 }
965 #ifdef CONFIG_PPC32 967 #ifdef CONFIG_PPC32
966 else { 968 else {
967 /* We have to set bits in cpu_possible_mask here since the 969 /* We have to set bits in cpu_possible_mask here since the
968 * secondary CPU(s) aren't in the device tree. Various 970 * secondary CPU(s) aren't in the device tree. Various
969 * things won't be initialized for CPUs not in the possible 971 * things won't be initialized for CPUs not in the possible
970 * map, so we really need to fix it up here. 972 * map, so we really need to fix it up here.
971 */ 973 */
972 int cpu; 974 int cpu;
973 975
974 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) 976 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
975 set_cpu_possible(cpu, true); 977 set_cpu_possible(cpu, true);
976 smp_ops = &psurge_smp_ops; 978 smp_ops = &psurge_smp_ops;
977 } 979 }
978 #endif /* CONFIG_PPC32 */ 980 #endif /* CONFIG_PPC32 */
979 981
980 #ifdef CONFIG_HOTPLUG_CPU 982 #ifdef CONFIG_HOTPLUG_CPU
981 ppc_md.cpu_die = pmac_cpu_die; 983 ppc_md.cpu_die = pmac_cpu_die;
982 #endif 984 #endif
983 } 985 }
984 986
985 987