Commit fb380aadfe34e8d3ce628cb3e386882351940874
Committed by
Martin Schwidefsky
1 parent
957a37ad58
Exists in
master
and in
7 other branches
[S390] Move __cpu_logical_map to smp.c
Finally move it to the place where it belongs to and make get rid of it for !CONFIG_SMP. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Showing 5 changed files with 18 additions and 8 deletions Inline Diff
arch/s390/include/asm/sigp.h
1 | /* | 1 | /* |
2 | * include/asm-s390/sigp.h | 2 | * include/asm-s390/sigp.h |
3 | * | 3 | * |
4 | * S390 version | 4 | * S390 version |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation |
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
8 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 8 | * Heiko Carstens (heiko.carstens@de.ibm.com) |
9 | * | 9 | * |
10 | * sigp.h by D.J. Barrow (c) IBM 1999 | 10 | * sigp.h by D.J. Barrow (c) IBM 1999 |
11 | * contains routines / structures for signalling other S/390 processors in an | 11 | * contains routines / structures for signalling other S/390 processors in an |
12 | * SMP configuration. | 12 | * SMP configuration. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef __SIGP__ | 15 | #ifndef __SIGP__ |
16 | #define __SIGP__ | 16 | #define __SIGP__ |
17 | 17 | ||
18 | #include <asm/ptrace.h> | 18 | #include <asm/system.h> |
19 | #include <asm/atomic.h> | ||
20 | 19 | ||
21 | /* get real cpu address from logical cpu number */ | 20 | /* get real cpu address from logical cpu number */ |
22 | extern int __cpu_logical_map[]; | 21 | extern int __cpu_logical_map[]; |
23 | 22 | ||
23 | static inline int cpu_logical_map(int cpu) | ||
24 | { | ||
25 | #ifdef CONFIG_SMP | ||
26 | return __cpu_logical_map[cpu]; | ||
27 | #else | ||
28 | return stap(); | ||
29 | #endif | ||
30 | } | ||
31 | |||
24 | typedef enum | 32 | typedef enum |
25 | { | 33 | { |
26 | sigp_unassigned=0x0, | 34 | sigp_unassigned=0x0, |
27 | sigp_sense, | 35 | sigp_sense, |
28 | sigp_external_call, | 36 | sigp_external_call, |
29 | sigp_emergency_signal, | 37 | sigp_emergency_signal, |
30 | sigp_start, | 38 | sigp_start, |
31 | sigp_stop, | 39 | sigp_stop, |
32 | sigp_restart, | 40 | sigp_restart, |
33 | sigp_unassigned1, | 41 | sigp_unassigned1, |
34 | sigp_unassigned2, | 42 | sigp_unassigned2, |
35 | sigp_stop_and_store_status, | 43 | sigp_stop_and_store_status, |
36 | sigp_unassigned3, | 44 | sigp_unassigned3, |
37 | sigp_initial_cpu_reset, | 45 | sigp_initial_cpu_reset, |
38 | sigp_cpu_reset, | 46 | sigp_cpu_reset, |
39 | sigp_set_prefix, | 47 | sigp_set_prefix, |
40 | sigp_store_status_at_address, | 48 | sigp_store_status_at_address, |
41 | sigp_store_extended_status_at_address | 49 | sigp_store_extended_status_at_address |
42 | } sigp_order_code; | 50 | } sigp_order_code; |
43 | 51 | ||
44 | typedef __u32 sigp_status_word; | 52 | typedef __u32 sigp_status_word; |
45 | 53 | ||
46 | typedef enum | 54 | typedef enum |
47 | { | 55 | { |
48 | sigp_order_code_accepted=0, | 56 | sigp_order_code_accepted=0, |
49 | sigp_status_stored, | 57 | sigp_status_stored, |
50 | sigp_busy, | 58 | sigp_busy, |
51 | sigp_not_operational | 59 | sigp_not_operational |
52 | } sigp_ccode; | 60 | } sigp_ccode; |
53 | 61 | ||
54 | 62 | ||
55 | /* | 63 | /* |
56 | * Definitions for the external call | 64 | * Definitions for the external call |
57 | */ | 65 | */ |
58 | 66 | ||
59 | /* 'Bit' signals, asynchronous */ | 67 | /* 'Bit' signals, asynchronous */ |
60 | typedef enum | 68 | typedef enum |
61 | { | 69 | { |
62 | ec_schedule=0, | 70 | ec_schedule=0, |
63 | ec_call_function, | 71 | ec_call_function, |
64 | ec_call_function_single, | 72 | ec_call_function_single, |
65 | ec_bit_last | 73 | ec_bit_last |
66 | } ec_bit_sig; | 74 | } ec_bit_sig; |
67 | 75 | ||
68 | /* | 76 | /* |
69 | * Signal processor | 77 | * Signal processor |
70 | */ | 78 | */ |
71 | static inline sigp_ccode | 79 | static inline sigp_ccode |
72 | signal_processor(__u16 cpu_addr, sigp_order_code order_code) | 80 | signal_processor(__u16 cpu_addr, sigp_order_code order_code) |
73 | { | 81 | { |
74 | register unsigned long reg1 asm ("1") = 0; | 82 | register unsigned long reg1 asm ("1") = 0; |
75 | sigp_ccode ccode; | 83 | sigp_ccode ccode; |
76 | 84 | ||
77 | asm volatile( | 85 | asm volatile( |
78 | " sigp %1,%2,0(%3)\n" | 86 | " sigp %1,%2,0(%3)\n" |
79 | " ipm %0\n" | 87 | " ipm %0\n" |
80 | " srl %0,28\n" | 88 | " srl %0,28\n" |
81 | : "=d" (ccode) | 89 | : "=d" (ccode) |
82 | : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]), | 90 | : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), |
83 | "a" (order_code) : "cc" , "memory"); | 91 | "a" (order_code) : "cc" , "memory"); |
84 | return ccode; | 92 | return ccode; |
85 | } | 93 | } |
86 | 94 | ||
87 | /* | 95 | /* |
88 | * Signal processor with parameter | 96 | * Signal processor with parameter |
89 | */ | 97 | */ |
90 | static inline sigp_ccode | 98 | static inline sigp_ccode |
91 | signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code) | 99 | signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code) |
92 | { | 100 | { |
93 | register unsigned int reg1 asm ("1") = parameter; | 101 | register unsigned int reg1 asm ("1") = parameter; |
94 | sigp_ccode ccode; | 102 | sigp_ccode ccode; |
95 | 103 | ||
96 | asm volatile( | 104 | asm volatile( |
97 | " sigp %1,%2,0(%3)\n" | 105 | " sigp %1,%2,0(%3)\n" |
98 | " ipm %0\n" | 106 | " ipm %0\n" |
99 | " srl %0,28\n" | 107 | " srl %0,28\n" |
100 | : "=d" (ccode) | 108 | : "=d" (ccode) |
101 | : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]), | 109 | : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), |
102 | "a" (order_code) : "cc" , "memory"); | 110 | "a" (order_code) : "cc" , "memory"); |
103 | return ccode; | 111 | return ccode; |
104 | } | 112 | } |
105 | 113 | ||
106 | /* | 114 | /* |
107 | * Signal processor with parameter and return status | 115 | * Signal processor with parameter and return status |
108 | */ | 116 | */ |
109 | static inline sigp_ccode | 117 | static inline sigp_ccode |
110 | signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr, | 118 | signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr, |
111 | sigp_order_code order_code) | 119 | sigp_order_code order_code) |
112 | { | 120 | { |
113 | register unsigned int reg1 asm ("1") = parameter; | 121 | register unsigned int reg1 asm ("1") = parameter; |
114 | sigp_ccode ccode; | 122 | sigp_ccode ccode; |
115 | 123 | ||
116 | asm volatile( | 124 | asm volatile( |
117 | " sigp %1,%2,0(%3)\n" | 125 | " sigp %1,%2,0(%3)\n" |
118 | " ipm %0\n" | 126 | " ipm %0\n" |
119 | " srl %0,28\n" | 127 | " srl %0,28\n" |
120 | : "=d" (ccode), "+d" (reg1) | 128 | : "=d" (ccode), "+d" (reg1) |
121 | : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) | 129 | : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) |
122 | : "cc" , "memory"); | 130 | : "cc" , "memory"); |
123 | *statusptr = reg1; | 131 | *statusptr = reg1; |
124 | return ccode; | 132 | return ccode; |
125 | } | 133 | } |
126 | 134 | ||
127 | #endif /* __SIGP__ */ | 135 | #endif /* __SIGP__ */ |
arch/s390/kernel/setup.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/setup.c | 2 | * arch/s390/kernel/setup.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * S390 version |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com), | 6 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
8 | * | 8 | * |
9 | * Derived from "arch/i386/kernel/setup.c" | 9 | * Derived from "arch/i386/kernel/setup.c" |
10 | * Copyright (C) 1995, Linus Torvalds | 10 | * Copyright (C) 1995, Linus Torvalds |
11 | */ | 11 | */ |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * This file handles the architecture-dependent parts of initialization | 14 | * This file handles the architecture-dependent parts of initialization |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define KMSG_COMPONENT "setup" | 17 | #define KMSG_COMPONENT "setup" |
18 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 18 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
19 | 19 | ||
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/stddef.h> | 25 | #include <linux/stddef.h> |
26 | #include <linux/unistd.h> | 26 | #include <linux/unistd.h> |
27 | #include <linux/ptrace.h> | 27 | #include <linux/ptrace.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/user.h> | 29 | #include <linux/user.h> |
30 | #include <linux/tty.h> | 30 | #include <linux/tty.h> |
31 | #include <linux/ioport.h> | 31 | #include <linux/ioport.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/initrd.h> | 34 | #include <linux/initrd.h> |
35 | #include <linux/bootmem.h> | 35 | #include <linux/bootmem.h> |
36 | #include <linux/root_dev.h> | 36 | #include <linux/root_dev.h> |
37 | #include <linux/console.h> | 37 | #include <linux/console.h> |
38 | #include <linux/kernel_stat.h> | 38 | #include <linux/kernel_stat.h> |
39 | #include <linux/device.h> | 39 | #include <linux/device.h> |
40 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
41 | #include <linux/pfn.h> | 41 | #include <linux/pfn.h> |
42 | #include <linux/ctype.h> | 42 | #include <linux/ctype.h> |
43 | #include <linux/reboot.h> | 43 | #include <linux/reboot.h> |
44 | #include <linux/topology.h> | 44 | #include <linux/topology.h> |
45 | #include <linux/ftrace.h> | 45 | #include <linux/ftrace.h> |
46 | 46 | ||
47 | #include <asm/ipl.h> | 47 | #include <asm/ipl.h> |
48 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
49 | #include <asm/system.h> | 49 | #include <asm/system.h> |
50 | #include <asm/smp.h> | 50 | #include <asm/smp.h> |
51 | #include <asm/mmu_context.h> | 51 | #include <asm/mmu_context.h> |
52 | #include <asm/cpcmd.h> | 52 | #include <asm/cpcmd.h> |
53 | #include <asm/lowcore.h> | 53 | #include <asm/lowcore.h> |
54 | #include <asm/irq.h> | 54 | #include <asm/irq.h> |
55 | #include <asm/page.h> | 55 | #include <asm/page.h> |
56 | #include <asm/ptrace.h> | 56 | #include <asm/ptrace.h> |
57 | #include <asm/sections.h> | 57 | #include <asm/sections.h> |
58 | #include <asm/ebcdic.h> | 58 | #include <asm/ebcdic.h> |
59 | #include <asm/compat.h> | 59 | #include <asm/compat.h> |
60 | #include <asm/kvm_virtio.h> | 60 | #include <asm/kvm_virtio.h> |
61 | 61 | ||
62 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | 62 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | |
63 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | 63 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); |
64 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 64 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | |
65 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 65 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | |
66 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 66 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * User copy operations. | 69 | * User copy operations. |
70 | */ | 70 | */ |
71 | struct uaccess_ops uaccess; | 71 | struct uaccess_ops uaccess; |
72 | EXPORT_SYMBOL(uaccess); | 72 | EXPORT_SYMBOL(uaccess); |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Machine setup.. | 75 | * Machine setup.. |
76 | */ | 76 | */ |
77 | unsigned int console_mode = 0; | 77 | unsigned int console_mode = 0; |
78 | EXPORT_SYMBOL(console_mode); | 78 | EXPORT_SYMBOL(console_mode); |
79 | 79 | ||
80 | unsigned int console_devno = -1; | 80 | unsigned int console_devno = -1; |
81 | EXPORT_SYMBOL(console_devno); | 81 | EXPORT_SYMBOL(console_devno); |
82 | 82 | ||
83 | unsigned int console_irq = -1; | 83 | unsigned int console_irq = -1; |
84 | EXPORT_SYMBOL(console_irq); | 84 | EXPORT_SYMBOL(console_irq); |
85 | 85 | ||
86 | unsigned long elf_hwcap = 0; | 86 | unsigned long elf_hwcap = 0; |
87 | char elf_platform[ELF_PLATFORM_SIZE]; | 87 | char elf_platform[ELF_PLATFORM_SIZE]; |
88 | 88 | ||
89 | struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; | 89 | struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; |
90 | int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | ||
91 | 90 | ||
92 | int __initdata memory_end_set; | 91 | int __initdata memory_end_set; |
93 | unsigned long __initdata memory_end; | 92 | unsigned long __initdata memory_end; |
94 | 93 | ||
95 | /* An array with a pointer to the lowcore of every CPU. */ | 94 | /* An array with a pointer to the lowcore of every CPU. */ |
96 | struct _lowcore *lowcore_ptr[NR_CPUS]; | 95 | struct _lowcore *lowcore_ptr[NR_CPUS]; |
97 | EXPORT_SYMBOL(lowcore_ptr); | 96 | EXPORT_SYMBOL(lowcore_ptr); |
98 | 97 | ||
99 | /* | 98 | /* |
100 | * This is set up by the setup-routine at boot-time | 99 | * This is set up by the setup-routine at boot-time |
101 | * for S390 need to find out, what we have to setup | 100 | * for S390 need to find out, what we have to setup |
102 | * using address 0x10400 ... | 101 | * using address 0x10400 ... |
103 | */ | 102 | */ |
104 | 103 | ||
105 | #include <asm/setup.h> | 104 | #include <asm/setup.h> |
106 | 105 | ||
107 | static struct resource code_resource = { | 106 | static struct resource code_resource = { |
108 | .name = "Kernel code", | 107 | .name = "Kernel code", |
109 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | 108 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
110 | }; | 109 | }; |
111 | 110 | ||
112 | static struct resource data_resource = { | 111 | static struct resource data_resource = { |
113 | .name = "Kernel data", | 112 | .name = "Kernel data", |
114 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | 113 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
115 | }; | 114 | }; |
116 | 115 | ||
117 | /* | 116 | /* |
118 | * cpu_init() initializes state that is per-CPU. | 117 | * cpu_init() initializes state that is per-CPU. |
119 | */ | 118 | */ |
120 | void __cpuinit cpu_init(void) | 119 | void __cpuinit cpu_init(void) |
121 | { | 120 | { |
122 | /* | 121 | /* |
123 | * Store processor id in lowcore (used e.g. in timer_interrupt) | 122 | * Store processor id in lowcore (used e.g. in timer_interrupt) |
124 | */ | 123 | */ |
125 | get_cpu_id(&S390_lowcore.cpu_id); | 124 | get_cpu_id(&S390_lowcore.cpu_id); |
126 | 125 | ||
127 | atomic_inc(&init_mm.mm_count); | 126 | atomic_inc(&init_mm.mm_count); |
128 | current->active_mm = &init_mm; | 127 | current->active_mm = &init_mm; |
129 | BUG_ON(current->mm); | 128 | BUG_ON(current->mm); |
130 | enter_lazy_tlb(&init_mm, current); | 129 | enter_lazy_tlb(&init_mm, current); |
131 | } | 130 | } |
132 | 131 | ||
133 | /* | 132 | /* |
134 | * condev= and conmode= setup parameter. | 133 | * condev= and conmode= setup parameter. |
135 | */ | 134 | */ |
136 | 135 | ||
137 | static int __init condev_setup(char *str) | 136 | static int __init condev_setup(char *str) |
138 | { | 137 | { |
139 | int vdev; | 138 | int vdev; |
140 | 139 | ||
141 | vdev = simple_strtoul(str, &str, 0); | 140 | vdev = simple_strtoul(str, &str, 0); |
142 | if (vdev >= 0 && vdev < 65536) { | 141 | if (vdev >= 0 && vdev < 65536) { |
143 | console_devno = vdev; | 142 | console_devno = vdev; |
144 | console_irq = -1; | 143 | console_irq = -1; |
145 | } | 144 | } |
146 | return 1; | 145 | return 1; |
147 | } | 146 | } |
148 | 147 | ||
149 | __setup("condev=", condev_setup); | 148 | __setup("condev=", condev_setup); |
150 | 149 | ||
151 | static void __init set_preferred_console(void) | 150 | static void __init set_preferred_console(void) |
152 | { | 151 | { |
153 | if (MACHINE_IS_KVM) | 152 | if (MACHINE_IS_KVM) |
154 | add_preferred_console("hvc", 0, NULL); | 153 | add_preferred_console("hvc", 0, NULL); |
155 | else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) | 154 | else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) |
156 | add_preferred_console("ttyS", 0, NULL); | 155 | add_preferred_console("ttyS", 0, NULL); |
157 | else if (CONSOLE_IS_3270) | 156 | else if (CONSOLE_IS_3270) |
158 | add_preferred_console("tty3270", 0, NULL); | 157 | add_preferred_console("tty3270", 0, NULL); |
159 | } | 158 | } |
160 | 159 | ||
161 | static int __init conmode_setup(char *str) | 160 | static int __init conmode_setup(char *str) |
162 | { | 161 | { |
163 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 162 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
164 | if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) | 163 | if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) |
165 | SET_CONSOLE_SCLP; | 164 | SET_CONSOLE_SCLP; |
166 | #endif | 165 | #endif |
167 | #if defined(CONFIG_TN3215_CONSOLE) | 166 | #if defined(CONFIG_TN3215_CONSOLE) |
168 | if (strncmp(str, "3215", 5) == 0) | 167 | if (strncmp(str, "3215", 5) == 0) |
169 | SET_CONSOLE_3215; | 168 | SET_CONSOLE_3215; |
170 | #endif | 169 | #endif |
171 | #if defined(CONFIG_TN3270_CONSOLE) | 170 | #if defined(CONFIG_TN3270_CONSOLE) |
172 | if (strncmp(str, "3270", 5) == 0) | 171 | if (strncmp(str, "3270", 5) == 0) |
173 | SET_CONSOLE_3270; | 172 | SET_CONSOLE_3270; |
174 | #endif | 173 | #endif |
175 | set_preferred_console(); | 174 | set_preferred_console(); |
176 | return 1; | 175 | return 1; |
177 | } | 176 | } |
178 | 177 | ||
179 | __setup("conmode=", conmode_setup); | 178 | __setup("conmode=", conmode_setup); |
180 | 179 | ||
181 | static void __init conmode_default(void) | 180 | static void __init conmode_default(void) |
182 | { | 181 | { |
183 | char query_buffer[1024]; | 182 | char query_buffer[1024]; |
184 | char *ptr; | 183 | char *ptr; |
185 | 184 | ||
186 | if (MACHINE_IS_VM) { | 185 | if (MACHINE_IS_VM) { |
187 | cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); | 186 | cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); |
188 | console_devno = simple_strtoul(query_buffer + 5, NULL, 16); | 187 | console_devno = simple_strtoul(query_buffer + 5, NULL, 16); |
189 | ptr = strstr(query_buffer, "SUBCHANNEL ="); | 188 | ptr = strstr(query_buffer, "SUBCHANNEL ="); |
190 | console_irq = simple_strtoul(ptr + 13, NULL, 16); | 189 | console_irq = simple_strtoul(ptr + 13, NULL, 16); |
191 | cpcmd("QUERY TERM", query_buffer, 1024, NULL); | 190 | cpcmd("QUERY TERM", query_buffer, 1024, NULL); |
192 | ptr = strstr(query_buffer, "CONMODE"); | 191 | ptr = strstr(query_buffer, "CONMODE"); |
193 | /* | 192 | /* |
194 | * Set the conmode to 3215 so that the device recognition | 193 | * Set the conmode to 3215 so that the device recognition |
195 | * will set the cu_type of the console to 3215. If the | 194 | * will set the cu_type of the console to 3215. If the |
196 | * conmode is 3270 and we don't set it back then both | 195 | * conmode is 3270 and we don't set it back then both |
197 | * 3215 and the 3270 driver will try to access the console | 196 | * 3215 and the 3270 driver will try to access the console |
198 | * device (3215 as console and 3270 as normal tty). | 197 | * device (3215 as console and 3270 as normal tty). |
199 | */ | 198 | */ |
200 | cpcmd("TERM CONMODE 3215", NULL, 0, NULL); | 199 | cpcmd("TERM CONMODE 3215", NULL, 0, NULL); |
201 | if (ptr == NULL) { | 200 | if (ptr == NULL) { |
202 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 201 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
203 | SET_CONSOLE_SCLP; | 202 | SET_CONSOLE_SCLP; |
204 | #endif | 203 | #endif |
205 | return; | 204 | return; |
206 | } | 205 | } |
207 | if (strncmp(ptr + 8, "3270", 4) == 0) { | 206 | if (strncmp(ptr + 8, "3270", 4) == 0) { |
208 | #if defined(CONFIG_TN3270_CONSOLE) | 207 | #if defined(CONFIG_TN3270_CONSOLE) |
209 | SET_CONSOLE_3270; | 208 | SET_CONSOLE_3270; |
210 | #elif defined(CONFIG_TN3215_CONSOLE) | 209 | #elif defined(CONFIG_TN3215_CONSOLE) |
211 | SET_CONSOLE_3215; | 210 | SET_CONSOLE_3215; |
212 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 211 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
213 | SET_CONSOLE_SCLP; | 212 | SET_CONSOLE_SCLP; |
214 | #endif | 213 | #endif |
215 | } else if (strncmp(ptr + 8, "3215", 4) == 0) { | 214 | } else if (strncmp(ptr + 8, "3215", 4) == 0) { |
216 | #if defined(CONFIG_TN3215_CONSOLE) | 215 | #if defined(CONFIG_TN3215_CONSOLE) |
217 | SET_CONSOLE_3215; | 216 | SET_CONSOLE_3215; |
218 | #elif defined(CONFIG_TN3270_CONSOLE) | 217 | #elif defined(CONFIG_TN3270_CONSOLE) |
219 | SET_CONSOLE_3270; | 218 | SET_CONSOLE_3270; |
220 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 219 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
221 | SET_CONSOLE_SCLP; | 220 | SET_CONSOLE_SCLP; |
222 | #endif | 221 | #endif |
223 | } | 222 | } |
224 | } else { | 223 | } else { |
225 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 224 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
226 | SET_CONSOLE_SCLP; | 225 | SET_CONSOLE_SCLP; |
227 | #endif | 226 | #endif |
228 | } | 227 | } |
229 | } | 228 | } |
230 | 229 | ||
231 | #ifdef CONFIG_ZFCPDUMP | 230 | #ifdef CONFIG_ZFCPDUMP |
232 | static void __init setup_zfcpdump(unsigned int console_devno) | 231 | static void __init setup_zfcpdump(unsigned int console_devno) |
233 | { | 232 | { |
234 | static char str[41]; | 233 | static char str[41]; |
235 | 234 | ||
236 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 235 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
237 | return; | 236 | return; |
238 | if (console_devno != -1) | 237 | if (console_devno != -1) |
239 | sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", | 238 | sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", |
240 | ipl_info.data.fcp.dev_id.devno, console_devno); | 239 | ipl_info.data.fcp.dev_id.devno, console_devno); |
241 | else | 240 | else |
242 | sprintf(str, " cio_ignore=all,!0.0.%04x", | 241 | sprintf(str, " cio_ignore=all,!0.0.%04x", |
243 | ipl_info.data.fcp.dev_id.devno); | 242 | ipl_info.data.fcp.dev_id.devno); |
244 | strcat(boot_command_line, str); | 243 | strcat(boot_command_line, str); |
245 | console_loglevel = 2; | 244 | console_loglevel = 2; |
246 | } | 245 | } |
247 | #else | 246 | #else |
248 | static inline void setup_zfcpdump(unsigned int console_devno) {} | 247 | static inline void setup_zfcpdump(unsigned int console_devno) {} |
249 | #endif /* CONFIG_ZFCPDUMP */ | 248 | #endif /* CONFIG_ZFCPDUMP */ |
250 | 249 | ||
251 | /* | 250 | /* |
252 | * Reboot, halt and power_off stubs. They just call _machine_restart, | 251 | * Reboot, halt and power_off stubs. They just call _machine_restart, |
253 | * _machine_halt or _machine_power_off. | 252 | * _machine_halt or _machine_power_off. |
254 | */ | 253 | */ |
255 | 254 | ||
256 | void machine_restart(char *command) | 255 | void machine_restart(char *command) |
257 | { | 256 | { |
258 | if ((!in_interrupt() && !in_atomic()) || oops_in_progress) | 257 | if ((!in_interrupt() && !in_atomic()) || oops_in_progress) |
259 | /* | 258 | /* |
260 | * Only unblank the console if we are called in enabled | 259 | * Only unblank the console if we are called in enabled |
261 | * context or a bust_spinlocks cleared the way for us. | 260 | * context or a bust_spinlocks cleared the way for us. |
262 | */ | 261 | */ |
263 | console_unblank(); | 262 | console_unblank(); |
264 | _machine_restart(command); | 263 | _machine_restart(command); |
265 | } | 264 | } |
266 | 265 | ||
267 | void machine_halt(void) | 266 | void machine_halt(void) |
268 | { | 267 | { |
269 | if (!in_interrupt() || oops_in_progress) | 268 | if (!in_interrupt() || oops_in_progress) |
270 | /* | 269 | /* |
271 | * Only unblank the console if we are called in enabled | 270 | * Only unblank the console if we are called in enabled |
272 | * context or a bust_spinlocks cleared the way for us. | 271 | * context or a bust_spinlocks cleared the way for us. |
273 | */ | 272 | */ |
274 | console_unblank(); | 273 | console_unblank(); |
275 | _machine_halt(); | 274 | _machine_halt(); |
276 | } | 275 | } |
277 | 276 | ||
278 | void machine_power_off(void) | 277 | void machine_power_off(void) |
279 | { | 278 | { |
280 | if (!in_interrupt() || oops_in_progress) | 279 | if (!in_interrupt() || oops_in_progress) |
281 | /* | 280 | /* |
282 | * Only unblank the console if we are called in enabled | 281 | * Only unblank the console if we are called in enabled |
283 | * context or a bust_spinlocks cleared the way for us. | 282 | * context or a bust_spinlocks cleared the way for us. |
284 | */ | 283 | */ |
285 | console_unblank(); | 284 | console_unblank(); |
286 | _machine_power_off(); | 285 | _machine_power_off(); |
287 | } | 286 | } |
288 | 287 | ||
289 | /* | 288 | /* |
290 | * Dummy power off function. | 289 | * Dummy power off function. |
291 | */ | 290 | */ |
292 | void (*pm_power_off)(void) = machine_power_off; | 291 | void (*pm_power_off)(void) = machine_power_off; |
293 | 292 | ||
294 | static int __init early_parse_mem(char *p) | 293 | static int __init early_parse_mem(char *p) |
295 | { | 294 | { |
296 | memory_end = memparse(p, &p); | 295 | memory_end = memparse(p, &p); |
297 | memory_end_set = 1; | 296 | memory_end_set = 1; |
298 | return 0; | 297 | return 0; |
299 | } | 298 | } |
300 | early_param("mem", early_parse_mem); | 299 | early_param("mem", early_parse_mem); |
301 | 300 | ||
302 | unsigned int user_mode = HOME_SPACE_MODE; | 301 | unsigned int user_mode = HOME_SPACE_MODE; |
303 | EXPORT_SYMBOL_GPL(user_mode); | 302 | EXPORT_SYMBOL_GPL(user_mode); |
304 | 303 | ||
305 | static int set_amode_and_uaccess(unsigned long user_amode, | 304 | static int set_amode_and_uaccess(unsigned long user_amode, |
306 | unsigned long user32_amode) | 305 | unsigned long user32_amode) |
307 | { | 306 | { |
308 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 307 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | |
309 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 308 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | |
310 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | 309 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; |
311 | #ifdef CONFIG_COMPAT | 310 | #ifdef CONFIG_COMPAT |
312 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | 311 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | |
313 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 312 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | |
314 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | 313 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; |
315 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | 314 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | |
316 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | 315 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | |
317 | PSW32_MASK_PSTATE; | 316 | PSW32_MASK_PSTATE; |
318 | #endif | 317 | #endif |
319 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 318 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | |
320 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | 319 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; |
321 | 320 | ||
322 | if (MACHINE_HAS_MVCOS) { | 321 | if (MACHINE_HAS_MVCOS) { |
323 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 322 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
324 | return 1; | 323 | return 1; |
325 | } else { | 324 | } else { |
326 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); | 325 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); |
327 | return 0; | 326 | return 0; |
328 | } | 327 | } |
329 | } | 328 | } |
330 | 329 | ||
331 | /* | 330 | /* |
332 | * Switch kernel/user addressing modes? | 331 | * Switch kernel/user addressing modes? |
333 | */ | 332 | */ |
334 | static int __init early_parse_switch_amode(char *p) | 333 | static int __init early_parse_switch_amode(char *p) |
335 | { | 334 | { |
336 | if (user_mode != SECONDARY_SPACE_MODE) | 335 | if (user_mode != SECONDARY_SPACE_MODE) |
337 | user_mode = PRIMARY_SPACE_MODE; | 336 | user_mode = PRIMARY_SPACE_MODE; |
338 | return 0; | 337 | return 0; |
339 | } | 338 | } |
340 | early_param("switch_amode", early_parse_switch_amode); | 339 | early_param("switch_amode", early_parse_switch_amode); |
341 | 340 | ||
342 | static int __init early_parse_user_mode(char *p) | 341 | static int __init early_parse_user_mode(char *p) |
343 | { | 342 | { |
344 | if (p && strcmp(p, "primary") == 0) | 343 | if (p && strcmp(p, "primary") == 0) |
345 | user_mode = PRIMARY_SPACE_MODE; | 344 | user_mode = PRIMARY_SPACE_MODE; |
346 | #ifdef CONFIG_S390_EXEC_PROTECT | 345 | #ifdef CONFIG_S390_EXEC_PROTECT |
347 | else if (p && strcmp(p, "secondary") == 0) | 346 | else if (p && strcmp(p, "secondary") == 0) |
348 | user_mode = SECONDARY_SPACE_MODE; | 347 | user_mode = SECONDARY_SPACE_MODE; |
349 | #endif | 348 | #endif |
350 | else if (!p || strcmp(p, "home") == 0) | 349 | else if (!p || strcmp(p, "home") == 0) |
351 | user_mode = HOME_SPACE_MODE; | 350 | user_mode = HOME_SPACE_MODE; |
352 | else | 351 | else |
353 | return 1; | 352 | return 1; |
354 | return 0; | 353 | return 0; |
355 | } | 354 | } |
356 | early_param("user_mode", early_parse_user_mode); | 355 | early_param("user_mode", early_parse_user_mode); |
357 | 356 | ||
358 | #ifdef CONFIG_S390_EXEC_PROTECT | 357 | #ifdef CONFIG_S390_EXEC_PROTECT |
359 | /* | 358 | /* |
360 | * Enable execute protection? | 359 | * Enable execute protection? |
361 | */ | 360 | */ |
362 | static int __init early_parse_noexec(char *p) | 361 | static int __init early_parse_noexec(char *p) |
363 | { | 362 | { |
364 | if (!strncmp(p, "off", 3)) | 363 | if (!strncmp(p, "off", 3)) |
365 | return 0; | 364 | return 0; |
366 | user_mode = SECONDARY_SPACE_MODE; | 365 | user_mode = SECONDARY_SPACE_MODE; |
367 | return 0; | 366 | return 0; |
368 | } | 367 | } |
369 | early_param("noexec", early_parse_noexec); | 368 | early_param("noexec", early_parse_noexec); |
370 | #endif /* CONFIG_S390_EXEC_PROTECT */ | 369 | #endif /* CONFIG_S390_EXEC_PROTECT */ |
371 | 370 | ||
372 | static void setup_addressing_mode(void) | 371 | static void setup_addressing_mode(void) |
373 | { | 372 | { |
374 | if (user_mode == SECONDARY_SPACE_MODE) { | 373 | if (user_mode == SECONDARY_SPACE_MODE) { |
375 | if (set_amode_and_uaccess(PSW_ASC_SECONDARY, | 374 | if (set_amode_and_uaccess(PSW_ASC_SECONDARY, |
376 | PSW32_ASC_SECONDARY)) | 375 | PSW32_ASC_SECONDARY)) |
377 | pr_info("Execute protection active, " | 376 | pr_info("Execute protection active, " |
378 | "mvcos available\n"); | 377 | "mvcos available\n"); |
379 | else | 378 | else |
380 | pr_info("Execute protection active, " | 379 | pr_info("Execute protection active, " |
381 | "mvcos not available\n"); | 380 | "mvcos not available\n"); |
382 | } else if (user_mode == PRIMARY_SPACE_MODE) { | 381 | } else if (user_mode == PRIMARY_SPACE_MODE) { |
383 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) | 382 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) |
384 | pr_info("Address spaces switched, " | 383 | pr_info("Address spaces switched, " |
385 | "mvcos available\n"); | 384 | "mvcos available\n"); |
386 | else | 385 | else |
387 | pr_info("Address spaces switched, " | 386 | pr_info("Address spaces switched, " |
388 | "mvcos not available\n"); | 387 | "mvcos not available\n"); |
389 | } | 388 | } |
390 | #ifdef CONFIG_TRACE_IRQFLAGS | 389 | #ifdef CONFIG_TRACE_IRQFLAGS |
391 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 390 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
392 | io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 391 | io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
393 | #endif | 392 | #endif |
394 | } | 393 | } |
395 | 394 | ||
396 | static void __init | 395 | static void __init |
397 | setup_lowcore(void) | 396 | setup_lowcore(void) |
398 | { | 397 | { |
399 | struct _lowcore *lc; | 398 | struct _lowcore *lc; |
400 | int lc_pages; | 399 | int lc_pages; |
401 | 400 | ||
402 | /* | 401 | /* |
403 | * Setup lowcore for boot cpu | 402 | * Setup lowcore for boot cpu |
404 | */ | 403 | */ |
405 | lc_pages = sizeof(void *) == 8 ? 2 : 1; | 404 | lc_pages = sizeof(void *) == 8 ? 2 : 1; |
406 | lc = (struct _lowcore *) | 405 | lc = (struct _lowcore *) |
407 | __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); | 406 | __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); |
408 | memset(lc, 0, lc_pages * PAGE_SIZE); | 407 | memset(lc, 0, lc_pages * PAGE_SIZE); |
409 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 408 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; |
410 | lc->restart_psw.addr = | 409 | lc->restart_psw.addr = |
411 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 410 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
412 | if (user_mode != HOME_SPACE_MODE) | 411 | if (user_mode != HOME_SPACE_MODE) |
413 | lc->restart_psw.mask |= PSW_ASC_HOME; | 412 | lc->restart_psw.mask |= PSW_ASC_HOME; |
414 | lc->external_new_psw.mask = psw_kernel_bits; | 413 | lc->external_new_psw.mask = psw_kernel_bits; |
415 | lc->external_new_psw.addr = | 414 | lc->external_new_psw.addr = |
416 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 415 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
417 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 416 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; |
418 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 417 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
419 | lc->program_new_psw.mask = psw_kernel_bits; | 418 | lc->program_new_psw.mask = psw_kernel_bits; |
420 | lc->program_new_psw.addr = | 419 | lc->program_new_psw.addr = |
421 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 420 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; |
422 | lc->mcck_new_psw.mask = | 421 | lc->mcck_new_psw.mask = |
423 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | 422 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; |
424 | lc->mcck_new_psw.addr = | 423 | lc->mcck_new_psw.addr = |
425 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 424 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
426 | lc->io_new_psw.mask = psw_kernel_bits; | 425 | lc->io_new_psw.mask = psw_kernel_bits; |
427 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 426 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
428 | lc->clock_comparator = -1ULL; | 427 | lc->clock_comparator = -1ULL; |
429 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 428 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
430 | lc->async_stack = (unsigned long) | 429 | lc->async_stack = (unsigned long) |
431 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | 430 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; |
432 | lc->panic_stack = (unsigned long) | 431 | lc->panic_stack = (unsigned long) |
433 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; | 432 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; |
434 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; | 433 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; |
435 | lc->thread_info = (unsigned long) &init_thread_union; | 434 | lc->thread_info = (unsigned long) &init_thread_union; |
436 | lc->machine_flags = S390_lowcore.machine_flags; | 435 | lc->machine_flags = S390_lowcore.machine_flags; |
437 | #ifndef CONFIG_64BIT | 436 | #ifndef CONFIG_64BIT |
438 | if (MACHINE_HAS_IEEE) { | 437 | if (MACHINE_HAS_IEEE) { |
439 | lc->extended_save_area_addr = (__u32) | 438 | lc->extended_save_area_addr = (__u32) |
440 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); | 439 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); |
441 | /* enable extended save area */ | 440 | /* enable extended save area */ |
442 | __ctl_set_bit(14, 29); | 441 | __ctl_set_bit(14, 29); |
443 | } | 442 | } |
444 | #else | 443 | #else |
445 | lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; | 444 | lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; |
446 | #endif | 445 | #endif |
447 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; | 446 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; |
448 | lc->async_enter_timer = S390_lowcore.async_enter_timer; | 447 | lc->async_enter_timer = S390_lowcore.async_enter_timer; |
449 | lc->exit_timer = S390_lowcore.exit_timer; | 448 | lc->exit_timer = S390_lowcore.exit_timer; |
450 | lc->user_timer = S390_lowcore.user_timer; | 449 | lc->user_timer = S390_lowcore.user_timer; |
451 | lc->system_timer = S390_lowcore.system_timer; | 450 | lc->system_timer = S390_lowcore.system_timer; |
452 | lc->steal_timer = S390_lowcore.steal_timer; | 451 | lc->steal_timer = S390_lowcore.steal_timer; |
453 | lc->last_update_timer = S390_lowcore.last_update_timer; | 452 | lc->last_update_timer = S390_lowcore.last_update_timer; |
454 | lc->last_update_clock = S390_lowcore.last_update_clock; | 453 | lc->last_update_clock = S390_lowcore.last_update_clock; |
455 | lc->ftrace_func = S390_lowcore.ftrace_func; | 454 | lc->ftrace_func = S390_lowcore.ftrace_func; |
456 | set_prefix((u32)(unsigned long) lc); | 455 | set_prefix((u32)(unsigned long) lc); |
457 | lowcore_ptr[0] = lc; | 456 | lowcore_ptr[0] = lc; |
458 | } | 457 | } |
459 | 458 | ||
460 | static void __init | 459 | static void __init |
461 | setup_resources(void) | 460 | setup_resources(void) |
462 | { | 461 | { |
463 | struct resource *res, *sub_res; | 462 | struct resource *res, *sub_res; |
464 | int i; | 463 | int i; |
465 | 464 | ||
466 | code_resource.start = (unsigned long) &_text; | 465 | code_resource.start = (unsigned long) &_text; |
467 | code_resource.end = (unsigned long) &_etext - 1; | 466 | code_resource.end = (unsigned long) &_etext - 1; |
468 | data_resource.start = (unsigned long) &_etext; | 467 | data_resource.start = (unsigned long) &_etext; |
469 | data_resource.end = (unsigned long) &_edata - 1; | 468 | data_resource.end = (unsigned long) &_edata - 1; |
470 | 469 | ||
471 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 470 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
472 | if (!memory_chunk[i].size) | 471 | if (!memory_chunk[i].size) |
473 | continue; | 472 | continue; |
474 | res = alloc_bootmem_low(sizeof(struct resource)); | 473 | res = alloc_bootmem_low(sizeof(struct resource)); |
475 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 474 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
476 | switch (memory_chunk[i].type) { | 475 | switch (memory_chunk[i].type) { |
477 | case CHUNK_READ_WRITE: | 476 | case CHUNK_READ_WRITE: |
478 | res->name = "System RAM"; | 477 | res->name = "System RAM"; |
479 | break; | 478 | break; |
480 | case CHUNK_READ_ONLY: | 479 | case CHUNK_READ_ONLY: |
481 | res->name = "System ROM"; | 480 | res->name = "System ROM"; |
482 | res->flags |= IORESOURCE_READONLY; | 481 | res->flags |= IORESOURCE_READONLY; |
483 | break; | 482 | break; |
484 | default: | 483 | default: |
485 | res->name = "reserved"; | 484 | res->name = "reserved"; |
486 | } | 485 | } |
487 | res->start = memory_chunk[i].addr; | 486 | res->start = memory_chunk[i].addr; |
488 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; | 487 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; |
489 | request_resource(&iomem_resource, res); | 488 | request_resource(&iomem_resource, res); |
490 | 489 | ||
491 | if (code_resource.start >= res->start && | 490 | if (code_resource.start >= res->start && |
492 | code_resource.start <= res->end && | 491 | code_resource.start <= res->end && |
493 | code_resource.end > res->end) { | 492 | code_resource.end > res->end) { |
494 | sub_res = alloc_bootmem_low(sizeof(struct resource)); | 493 | sub_res = alloc_bootmem_low(sizeof(struct resource)); |
495 | memcpy(sub_res, &code_resource, | 494 | memcpy(sub_res, &code_resource, |
496 | sizeof(struct resource)); | 495 | sizeof(struct resource)); |
497 | sub_res->end = res->end; | 496 | sub_res->end = res->end; |
498 | code_resource.start = res->end + 1; | 497 | code_resource.start = res->end + 1; |
499 | request_resource(res, sub_res); | 498 | request_resource(res, sub_res); |
500 | } | 499 | } |
501 | 500 | ||
502 | if (code_resource.start >= res->start && | 501 | if (code_resource.start >= res->start && |
503 | code_resource.start <= res->end && | 502 | code_resource.start <= res->end && |
504 | code_resource.end <= res->end) | 503 | code_resource.end <= res->end) |
505 | request_resource(res, &code_resource); | 504 | request_resource(res, &code_resource); |
506 | 505 | ||
507 | if (data_resource.start >= res->start && | 506 | if (data_resource.start >= res->start && |
508 | data_resource.start <= res->end && | 507 | data_resource.start <= res->end && |
509 | data_resource.end > res->end) { | 508 | data_resource.end > res->end) { |
510 | sub_res = alloc_bootmem_low(sizeof(struct resource)); | 509 | sub_res = alloc_bootmem_low(sizeof(struct resource)); |
511 | memcpy(sub_res, &data_resource, | 510 | memcpy(sub_res, &data_resource, |
512 | sizeof(struct resource)); | 511 | sizeof(struct resource)); |
513 | sub_res->end = res->end; | 512 | sub_res->end = res->end; |
514 | data_resource.start = res->end + 1; | 513 | data_resource.start = res->end + 1; |
515 | request_resource(res, sub_res); | 514 | request_resource(res, sub_res); |
516 | } | 515 | } |
517 | 516 | ||
518 | if (data_resource.start >= res->start && | 517 | if (data_resource.start >= res->start && |
519 | data_resource.start <= res->end && | 518 | data_resource.start <= res->end && |
520 | data_resource.end <= res->end) | 519 | data_resource.end <= res->end) |
521 | request_resource(res, &data_resource); | 520 | request_resource(res, &data_resource); |
522 | } | 521 | } |
523 | } | 522 | } |
524 | 523 | ||
525 | unsigned long real_memory_size; | 524 | unsigned long real_memory_size; |
526 | EXPORT_SYMBOL_GPL(real_memory_size); | 525 | EXPORT_SYMBOL_GPL(real_memory_size); |
527 | 526 | ||
528 | static void __init setup_memory_end(void) | 527 | static void __init setup_memory_end(void) |
529 | { | 528 | { |
530 | unsigned long memory_size; | 529 | unsigned long memory_size; |
531 | unsigned long max_mem; | 530 | unsigned long max_mem; |
532 | int i; | 531 | int i; |
533 | 532 | ||
534 | #ifdef CONFIG_ZFCPDUMP | 533 | #ifdef CONFIG_ZFCPDUMP |
535 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 534 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { |
536 | memory_end = ZFCPDUMP_HSA_SIZE; | 535 | memory_end = ZFCPDUMP_HSA_SIZE; |
537 | memory_end_set = 1; | 536 | memory_end_set = 1; |
538 | } | 537 | } |
539 | #endif | 538 | #endif |
540 | memory_size = 0; | 539 | memory_size = 0; |
541 | memory_end &= PAGE_MASK; | 540 | memory_end &= PAGE_MASK; |
542 | 541 | ||
543 | max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS; | 542 | max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS; |
544 | memory_end = min(max_mem, memory_end); | 543 | memory_end = min(max_mem, memory_end); |
545 | 544 | ||
546 | /* | 545 | /* |
547 | * Make sure all chunks are MAX_ORDER aligned so we don't need the | 546 | * Make sure all chunks are MAX_ORDER aligned so we don't need the |
548 | * extra checks that HOLES_IN_ZONE would require. | 547 | * extra checks that HOLES_IN_ZONE would require. |
549 | */ | 548 | */ |
550 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 549 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
551 | unsigned long start, end; | 550 | unsigned long start, end; |
552 | struct mem_chunk *chunk; | 551 | struct mem_chunk *chunk; |
553 | unsigned long align; | 552 | unsigned long align; |
554 | 553 | ||
555 | chunk = &memory_chunk[i]; | 554 | chunk = &memory_chunk[i]; |
556 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); | 555 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); |
557 | start = (chunk->addr + align - 1) & ~(align - 1); | 556 | start = (chunk->addr + align - 1) & ~(align - 1); |
558 | end = (chunk->addr + chunk->size) & ~(align - 1); | 557 | end = (chunk->addr + chunk->size) & ~(align - 1); |
559 | if (start >= end) | 558 | if (start >= end) |
560 | memset(chunk, 0, sizeof(*chunk)); | 559 | memset(chunk, 0, sizeof(*chunk)); |
561 | else { | 560 | else { |
562 | chunk->addr = start; | 561 | chunk->addr = start; |
563 | chunk->size = end - start; | 562 | chunk->size = end - start; |
564 | } | 563 | } |
565 | } | 564 | } |
566 | 565 | ||
567 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 566 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
568 | struct mem_chunk *chunk = &memory_chunk[i]; | 567 | struct mem_chunk *chunk = &memory_chunk[i]; |
569 | 568 | ||
570 | real_memory_size = max(real_memory_size, | 569 | real_memory_size = max(real_memory_size, |
571 | chunk->addr + chunk->size); | 570 | chunk->addr + chunk->size); |
572 | if (chunk->addr >= max_mem) { | 571 | if (chunk->addr >= max_mem) { |
573 | memset(chunk, 0, sizeof(*chunk)); | 572 | memset(chunk, 0, sizeof(*chunk)); |
574 | continue; | 573 | continue; |
575 | } | 574 | } |
576 | if (chunk->addr + chunk->size > max_mem) | 575 | if (chunk->addr + chunk->size > max_mem) |
577 | chunk->size = max_mem - chunk->addr; | 576 | chunk->size = max_mem - chunk->addr; |
578 | memory_size = max(memory_size, chunk->addr + chunk->size); | 577 | memory_size = max(memory_size, chunk->addr + chunk->size); |
579 | } | 578 | } |
580 | if (!memory_end) | 579 | if (!memory_end) |
581 | memory_end = memory_size; | 580 | memory_end = memory_size; |
582 | } | 581 | } |
583 | 582 | ||
584 | static void __init | 583 | static void __init |
585 | setup_memory(void) | 584 | setup_memory(void) |
586 | { | 585 | { |
587 | unsigned long bootmap_size; | 586 | unsigned long bootmap_size; |
588 | unsigned long start_pfn, end_pfn; | 587 | unsigned long start_pfn, end_pfn; |
589 | int i; | 588 | int i; |
590 | 589 | ||
591 | /* | 590 | /* |
592 | * partially used pages are not usable - thus | 591 | * partially used pages are not usable - thus |
593 | * we are rounding upwards: | 592 | * we are rounding upwards: |
594 | */ | 593 | */ |
595 | start_pfn = PFN_UP(__pa(&_end)); | 594 | start_pfn = PFN_UP(__pa(&_end)); |
596 | end_pfn = max_pfn = PFN_DOWN(memory_end); | 595 | end_pfn = max_pfn = PFN_DOWN(memory_end); |
597 | 596 | ||
598 | #ifdef CONFIG_BLK_DEV_INITRD | 597 | #ifdef CONFIG_BLK_DEV_INITRD |
599 | /* | 598 | /* |
600 | * Move the initrd in case the bitmap of the bootmem allocater | 599 | * Move the initrd in case the bitmap of the bootmem allocater |
601 | * would overwrite it. | 600 | * would overwrite it. |
602 | */ | 601 | */ |
603 | 602 | ||
604 | if (INITRD_START && INITRD_SIZE) { | 603 | if (INITRD_START && INITRD_SIZE) { |
605 | unsigned long bmap_size; | 604 | unsigned long bmap_size; |
606 | unsigned long start; | 605 | unsigned long start; |
607 | 606 | ||
608 | bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); | 607 | bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); |
609 | bmap_size = PFN_PHYS(bmap_size); | 608 | bmap_size = PFN_PHYS(bmap_size); |
610 | 609 | ||
611 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | 610 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { |
612 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 611 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
613 | 612 | ||
614 | if (start + INITRD_SIZE > memory_end) { | 613 | if (start + INITRD_SIZE > memory_end) { |
615 | pr_err("initrd extends beyond end of " | 614 | pr_err("initrd extends beyond end of " |
616 | "memory (0x%08lx > 0x%08lx) " | 615 | "memory (0x%08lx > 0x%08lx) " |
617 | "disabling initrd\n", | 616 | "disabling initrd\n", |
618 | start + INITRD_SIZE, memory_end); | 617 | start + INITRD_SIZE, memory_end); |
619 | INITRD_START = INITRD_SIZE = 0; | 618 | INITRD_START = INITRD_SIZE = 0; |
620 | } else { | 619 | } else { |
621 | pr_info("Moving initrd (0x%08lx -> " | 620 | pr_info("Moving initrd (0x%08lx -> " |
622 | "0x%08lx, size: %ld)\n", | 621 | "0x%08lx, size: %ld)\n", |
623 | INITRD_START, start, INITRD_SIZE); | 622 | INITRD_START, start, INITRD_SIZE); |
624 | memmove((void *) start, (void *) INITRD_START, | 623 | memmove((void *) start, (void *) INITRD_START, |
625 | INITRD_SIZE); | 624 | INITRD_SIZE); |
626 | INITRD_START = start; | 625 | INITRD_START = start; |
627 | } | 626 | } |
628 | } | 627 | } |
629 | } | 628 | } |
630 | #endif | 629 | #endif |
631 | 630 | ||
632 | /* | 631 | /* |
633 | * Initialize the boot-time allocator | 632 | * Initialize the boot-time allocator |
634 | */ | 633 | */ |
635 | bootmap_size = init_bootmem(start_pfn, end_pfn); | 634 | bootmap_size = init_bootmem(start_pfn, end_pfn); |
636 | 635 | ||
637 | /* | 636 | /* |
638 | * Register RAM areas with the bootmem allocator. | 637 | * Register RAM areas with the bootmem allocator. |
639 | */ | 638 | */ |
640 | 639 | ||
641 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 640 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
642 | unsigned long start_chunk, end_chunk, pfn; | 641 | unsigned long start_chunk, end_chunk, pfn; |
643 | 642 | ||
644 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 643 | if (memory_chunk[i].type != CHUNK_READ_WRITE) |
645 | continue; | 644 | continue; |
646 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 645 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
647 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | 646 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
648 | end_chunk = min(end_chunk, end_pfn); | 647 | end_chunk = min(end_chunk, end_pfn); |
649 | if (start_chunk >= end_chunk) | 648 | if (start_chunk >= end_chunk) |
650 | continue; | 649 | continue; |
651 | add_active_range(0, start_chunk, end_chunk); | 650 | add_active_range(0, start_chunk, end_chunk); |
652 | pfn = max(start_chunk, start_pfn); | 651 | pfn = max(start_chunk, start_pfn); |
653 | for (; pfn < end_chunk; pfn++) | 652 | for (; pfn < end_chunk; pfn++) |
654 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); | 653 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); |
655 | } | 654 | } |
656 | 655 | ||
657 | psw_set_key(PAGE_DEFAULT_KEY); | 656 | psw_set_key(PAGE_DEFAULT_KEY); |
658 | 657 | ||
659 | free_bootmem_with_active_regions(0, max_pfn); | 658 | free_bootmem_with_active_regions(0, max_pfn); |
660 | 659 | ||
661 | /* | 660 | /* |
662 | * Reserve memory used for lowcore/command line/kernel image. | 661 | * Reserve memory used for lowcore/command line/kernel image. |
663 | */ | 662 | */ |
664 | reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); | 663 | reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); |
665 | reserve_bootmem((unsigned long)_stext, | 664 | reserve_bootmem((unsigned long)_stext, |
666 | PFN_PHYS(start_pfn) - (unsigned long)_stext, | 665 | PFN_PHYS(start_pfn) - (unsigned long)_stext, |
667 | BOOTMEM_DEFAULT); | 666 | BOOTMEM_DEFAULT); |
668 | /* | 667 | /* |
669 | * Reserve the bootmem bitmap itself as well. We do this in two | 668 | * Reserve the bootmem bitmap itself as well. We do this in two |
670 | * steps (first step was init_bootmem()) because this catches | 669 | * steps (first step was init_bootmem()) because this catches |
671 | * the (very unlikely) case of us accidentally initializing the | 670 | * the (very unlikely) case of us accidentally initializing the |
672 | * bootmem allocator with an invalid RAM area. | 671 | * bootmem allocator with an invalid RAM area. |
673 | */ | 672 | */ |
674 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | 673 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, |
675 | BOOTMEM_DEFAULT); | 674 | BOOTMEM_DEFAULT); |
676 | 675 | ||
677 | #ifdef CONFIG_BLK_DEV_INITRD | 676 | #ifdef CONFIG_BLK_DEV_INITRD |
678 | if (INITRD_START && INITRD_SIZE) { | 677 | if (INITRD_START && INITRD_SIZE) { |
679 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 678 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
680 | reserve_bootmem(INITRD_START, INITRD_SIZE, | 679 | reserve_bootmem(INITRD_START, INITRD_SIZE, |
681 | BOOTMEM_DEFAULT); | 680 | BOOTMEM_DEFAULT); |
682 | initrd_start = INITRD_START; | 681 | initrd_start = INITRD_START; |
683 | initrd_end = initrd_start + INITRD_SIZE; | 682 | initrd_end = initrd_start + INITRD_SIZE; |
684 | } else { | 683 | } else { |
685 | pr_err("initrd extends beyond end of " | 684 | pr_err("initrd extends beyond end of " |
686 | "memory (0x%08lx > 0x%08lx) " | 685 | "memory (0x%08lx > 0x%08lx) " |
687 | "disabling initrd\n", | 686 | "disabling initrd\n", |
688 | initrd_start + INITRD_SIZE, memory_end); | 687 | initrd_start + INITRD_SIZE, memory_end); |
689 | initrd_start = initrd_end = 0; | 688 | initrd_start = initrd_end = 0; |
690 | } | 689 | } |
691 | } | 690 | } |
692 | #endif | 691 | #endif |
693 | } | 692 | } |
694 | 693 | ||
695 | /* | 694 | /* |
696 | * Setup hardware capabilities. | 695 | * Setup hardware capabilities. |
697 | */ | 696 | */ |
698 | static void __init setup_hwcaps(void) | 697 | static void __init setup_hwcaps(void) |
699 | { | 698 | { |
700 | static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; | 699 | static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; |
701 | unsigned long long facility_list_extended; | 700 | unsigned long long facility_list_extended; |
702 | unsigned int facility_list; | 701 | unsigned int facility_list; |
703 | int i; | 702 | int i; |
704 | 703 | ||
705 | facility_list = stfl(); | 704 | facility_list = stfl(); |
706 | /* | 705 | /* |
707 | * The store facility list bits numbers as found in the principles | 706 | * The store facility list bits numbers as found in the principles |
708 | * of operation are numbered with bit 1UL<<31 as number 0 to | 707 | * of operation are numbered with bit 1UL<<31 as number 0 to |
709 | * bit 1UL<<0 as number 31. | 708 | * bit 1UL<<0 as number 31. |
710 | * Bit 0: instructions named N3, "backported" to esa-mode | 709 | * Bit 0: instructions named N3, "backported" to esa-mode |
711 | * Bit 2: z/Architecture mode is active | 710 | * Bit 2: z/Architecture mode is active |
712 | * Bit 7: the store-facility-list-extended facility is installed | 711 | * Bit 7: the store-facility-list-extended facility is installed |
713 | * Bit 17: the message-security assist is installed | 712 | * Bit 17: the message-security assist is installed |
714 | * Bit 19: the long-displacement facility is installed | 713 | * Bit 19: the long-displacement facility is installed |
715 | * Bit 21: the extended-immediate facility is installed | 714 | * Bit 21: the extended-immediate facility is installed |
716 | * Bit 22: extended-translation facility 3 is installed | 715 | * Bit 22: extended-translation facility 3 is installed |
717 | * Bit 30: extended-translation facility 3 enhancement facility | 716 | * Bit 30: extended-translation facility 3 enhancement facility |
718 | * These get translated to: | 717 | * These get translated to: |
719 | * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, | 718 | * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, |
720 | * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, | 719 | * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, |
721 | * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and | 720 | * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and |
722 | * HWCAP_S390_ETF3EH bit 8 (22 && 30). | 721 | * HWCAP_S390_ETF3EH bit 8 (22 && 30). |
723 | */ | 722 | */ |
724 | for (i = 0; i < 6; i++) | 723 | for (i = 0; i < 6; i++) |
725 | if (facility_list & (1UL << (31 - stfl_bits[i]))) | 724 | if (facility_list & (1UL << (31 - stfl_bits[i]))) |
726 | elf_hwcap |= 1UL << i; | 725 | elf_hwcap |= 1UL << i; |
727 | 726 | ||
728 | if ((facility_list & (1UL << (31 - 22))) | 727 | if ((facility_list & (1UL << (31 - 22))) |
729 | && (facility_list & (1UL << (31 - 30)))) | 728 | && (facility_list & (1UL << (31 - 30)))) |
730 | elf_hwcap |= HWCAP_S390_ETF3EH; | 729 | elf_hwcap |= HWCAP_S390_ETF3EH; |
731 | 730 | ||
732 | /* | 731 | /* |
733 | * Check for additional facilities with store-facility-list-extended. | 732 | * Check for additional facilities with store-facility-list-extended. |
734 | * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 | 733 | * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 |
735 | * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information | 734 | * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information |
736 | * as stored by stfl, bits 32-xxx contain additional facilities. | 735 | * as stored by stfl, bits 32-xxx contain additional facilities. |
737 | * How many facility words are stored depends on the number of | 736 | * How many facility words are stored depends on the number of |
738 | * doublewords passed to the instruction. The additional facilites | 737 | * doublewords passed to the instruction. The additional facilites |
739 | * are: | 738 | * are: |
740 | * Bit 42: decimal floating point facility is installed | 739 | * Bit 42: decimal floating point facility is installed |
741 | * Bit 44: perform floating point operation facility is installed | 740 | * Bit 44: perform floating point operation facility is installed |
742 | * translated to: | 741 | * translated to: |
743 | * HWCAP_S390_DFP bit 6 (42 && 44). | 742 | * HWCAP_S390_DFP bit 6 (42 && 44). |
744 | */ | 743 | */ |
745 | if ((elf_hwcap & (1UL << 2)) && | 744 | if ((elf_hwcap & (1UL << 2)) && |
746 | __stfle(&facility_list_extended, 1) > 0) { | 745 | __stfle(&facility_list_extended, 1) > 0) { |
747 | if ((facility_list_extended & (1ULL << (63 - 42))) | 746 | if ((facility_list_extended & (1ULL << (63 - 42))) |
748 | && (facility_list_extended & (1ULL << (63 - 44)))) | 747 | && (facility_list_extended & (1ULL << (63 - 44)))) |
749 | elf_hwcap |= HWCAP_S390_DFP; | 748 | elf_hwcap |= HWCAP_S390_DFP; |
750 | } | 749 | } |
751 | 750 | ||
752 | /* | 751 | /* |
753 | * Huge page support HWCAP_S390_HPAGE is bit 7. | 752 | * Huge page support HWCAP_S390_HPAGE is bit 7. |
754 | */ | 753 | */ |
755 | if (MACHINE_HAS_HPAGE) | 754 | if (MACHINE_HAS_HPAGE) |
756 | elf_hwcap |= HWCAP_S390_HPAGE; | 755 | elf_hwcap |= HWCAP_S390_HPAGE; |
757 | 756 | ||
758 | /* | 757 | /* |
759 | * 64-bit register support for 31-bit processes | 758 | * 64-bit register support for 31-bit processes |
760 | * HWCAP_S390_HIGH_GPRS is bit 9. | 759 | * HWCAP_S390_HIGH_GPRS is bit 9. |
761 | */ | 760 | */ |
762 | elf_hwcap |= HWCAP_S390_HIGH_GPRS; | 761 | elf_hwcap |= HWCAP_S390_HIGH_GPRS; |
763 | 762 | ||
764 | switch (S390_lowcore.cpu_id.machine) { | 763 | switch (S390_lowcore.cpu_id.machine) { |
765 | case 0x9672: | 764 | case 0x9672: |
766 | #if !defined(CONFIG_64BIT) | 765 | #if !defined(CONFIG_64BIT) |
767 | default: /* Use "g5" as default for 31 bit kernels. */ | 766 | default: /* Use "g5" as default for 31 bit kernels. */ |
768 | #endif | 767 | #endif |
769 | strcpy(elf_platform, "g5"); | 768 | strcpy(elf_platform, "g5"); |
770 | break; | 769 | break; |
771 | case 0x2064: | 770 | case 0x2064: |
772 | case 0x2066: | 771 | case 0x2066: |
773 | #if defined(CONFIG_64BIT) | 772 | #if defined(CONFIG_64BIT) |
774 | default: /* Use "z900" as default for 64 bit kernels. */ | 773 | default: /* Use "z900" as default for 64 bit kernels. */ |
775 | #endif | 774 | #endif |
776 | strcpy(elf_platform, "z900"); | 775 | strcpy(elf_platform, "z900"); |
777 | break; | 776 | break; |
778 | case 0x2084: | 777 | case 0x2084: |
779 | case 0x2086: | 778 | case 0x2086: |
780 | strcpy(elf_platform, "z990"); | 779 | strcpy(elf_platform, "z990"); |
781 | break; | 780 | break; |
782 | case 0x2094: | 781 | case 0x2094: |
783 | case 0x2096: | 782 | case 0x2096: |
784 | strcpy(elf_platform, "z9-109"); | 783 | strcpy(elf_platform, "z9-109"); |
785 | break; | 784 | break; |
786 | case 0x2097: | 785 | case 0x2097: |
787 | case 0x2098: | 786 | case 0x2098: |
788 | strcpy(elf_platform, "z10"); | 787 | strcpy(elf_platform, "z10"); |
789 | break; | 788 | break; |
790 | } | 789 | } |
791 | } | 790 | } |
792 | 791 | ||
793 | /* | 792 | /* |
794 | * Setup function called from init/main.c just after the banner | 793 | * Setup function called from init/main.c just after the banner |
795 | * was printed. | 794 | * was printed. |
796 | */ | 795 | */ |
797 | 796 | ||
798 | void __init | 797 | void __init |
799 | setup_arch(char **cmdline_p) | 798 | setup_arch(char **cmdline_p) |
800 | { | 799 | { |
801 | /* | 800 | /* |
802 | * print what head.S has found out about the machine | 801 | * print what head.S has found out about the machine |
803 | */ | 802 | */ |
804 | #ifndef CONFIG_64BIT | 803 | #ifndef CONFIG_64BIT |
805 | if (MACHINE_IS_VM) | 804 | if (MACHINE_IS_VM) |
806 | pr_info("Linux is running as a z/VM " | 805 | pr_info("Linux is running as a z/VM " |
807 | "guest operating system in 31-bit mode\n"); | 806 | "guest operating system in 31-bit mode\n"); |
808 | else | 807 | else |
809 | pr_info("Linux is running natively in 31-bit mode\n"); | 808 | pr_info("Linux is running natively in 31-bit mode\n"); |
810 | if (MACHINE_HAS_IEEE) | 809 | if (MACHINE_HAS_IEEE) |
811 | pr_info("The hardware system has IEEE compatible " | 810 | pr_info("The hardware system has IEEE compatible " |
812 | "floating point units\n"); | 811 | "floating point units\n"); |
813 | else | 812 | else |
814 | pr_info("The hardware system has no IEEE compatible " | 813 | pr_info("The hardware system has no IEEE compatible " |
815 | "floating point units\n"); | 814 | "floating point units\n"); |
816 | #else /* CONFIG_64BIT */ | 815 | #else /* CONFIG_64BIT */ |
817 | if (MACHINE_IS_VM) | 816 | if (MACHINE_IS_VM) |
818 | pr_info("Linux is running as a z/VM " | 817 | pr_info("Linux is running as a z/VM " |
819 | "guest operating system in 64-bit mode\n"); | 818 | "guest operating system in 64-bit mode\n"); |
820 | else if (MACHINE_IS_KVM) | 819 | else if (MACHINE_IS_KVM) |
821 | pr_info("Linux is running under KVM in 64-bit mode\n"); | 820 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
822 | else | 821 | else |
823 | pr_info("Linux is running natively in 64-bit mode\n"); | 822 | pr_info("Linux is running natively in 64-bit mode\n"); |
824 | #endif /* CONFIG_64BIT */ | 823 | #endif /* CONFIG_64BIT */ |
825 | 824 | ||
826 | /* Have one command line that is parsed and saved in /proc/cmdline */ | 825 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
827 | /* boot_command_line has been already set up in early.c */ | 826 | /* boot_command_line has been already set up in early.c */ |
828 | *cmdline_p = boot_command_line; | 827 | *cmdline_p = boot_command_line; |
829 | 828 | ||
830 | ROOT_DEV = Root_RAM0; | 829 | ROOT_DEV = Root_RAM0; |
831 | 830 | ||
832 | init_mm.start_code = PAGE_OFFSET; | 831 | init_mm.start_code = PAGE_OFFSET; |
833 | init_mm.end_code = (unsigned long) &_etext; | 832 | init_mm.end_code = (unsigned long) &_etext; |
834 | init_mm.end_data = (unsigned long) &_edata; | 833 | init_mm.end_data = (unsigned long) &_edata; |
835 | init_mm.brk = (unsigned long) &_end; | 834 | init_mm.brk = (unsigned long) &_end; |
836 | 835 | ||
837 | if (MACHINE_HAS_MVCOS) | 836 | if (MACHINE_HAS_MVCOS) |
838 | memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); | 837 | memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); |
839 | else | 838 | else |
840 | memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); | 839 | memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); |
841 | 840 | ||
842 | parse_early_param(); | 841 | parse_early_param(); |
843 | 842 | ||
844 | setup_ipl(); | 843 | setup_ipl(); |
845 | setup_memory_end(); | 844 | setup_memory_end(); |
846 | setup_addressing_mode(); | 845 | setup_addressing_mode(); |
847 | setup_memory(); | 846 | setup_memory(); |
848 | setup_resources(); | 847 | setup_resources(); |
849 | setup_lowcore(); | 848 | setup_lowcore(); |
850 | 849 | ||
851 | cpu_init(); | 850 | cpu_init(); |
852 | s390_init_cpu_topology(); | 851 | s390_init_cpu_topology(); |
853 | 852 | ||
854 | /* | 853 | /* |
855 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). | 854 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). |
856 | */ | 855 | */ |
857 | setup_hwcaps(); | 856 | setup_hwcaps(); |
858 | 857 | ||
859 | /* | 858 | /* |
860 | * Create kernel page tables and switch to virtual addressing. | 859 | * Create kernel page tables and switch to virtual addressing. |
861 | */ | 860 | */ |
862 | paging_init(); | 861 | paging_init(); |
863 | 862 | ||
864 | /* Setup default console */ | 863 | /* Setup default console */ |
865 | conmode_default(); | 864 | conmode_default(); |
866 | set_preferred_console(); | 865 | set_preferred_console(); |
867 | 866 | ||
868 | /* Setup zfcpdump support */ | 867 | /* Setup zfcpdump support */ |
869 | setup_zfcpdump(console_devno); | 868 | setup_zfcpdump(console_devno); |
870 | } | 869 | } |
871 | 870 |
arch/s390/kernel/smp.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/smp.c | 2 | * arch/s390/kernel/smp.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999, 2009 | 4 | * Copyright IBM Corp. 1999, 2009 |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 7 | * Heiko Carstens (heiko.carstens@de.ibm.com) |
8 | * | 8 | * |
9 | * based on other smp stuff by | 9 | * based on other smp stuff by |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | 10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> |
11 | * (c) 1998 Ingo Molnar | 11 | * (c) 1998 Ingo Molnar |
12 | * | 12 | * |
13 | * We work with logical cpu numbering everywhere we can. The only | 13 | * We work with logical cpu numbering everywhere we can. The only |
14 | * functions using the real cpu address (got from STAP) are the sigp | 14 | * functions using the real cpu address (got from STAP) are the sigp |
15 | * functions. For all other functions we use the identity mapping. | 15 | * functions. For all other functions we use the identity mapping. |
16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | 16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is |
17 | * used e.g. to find the idle task belonging to a logical cpu. Every array | 17 | * used e.g. to find the idle task belonging to a logical cpu. Every array |
18 | * in the kernel is sorted by the logical cpu number and not by the physical | 18 | * in the kernel is sorted by the logical cpu number and not by the physical |
19 | * one which is causing all the confusion with __cpu_logical_map and | 19 | * one which is causing all the confusion with __cpu_logical_map and |
20 | * cpu_number_map in other architectures. | 20 | * cpu_number_map in other architectures. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define KMSG_COMPONENT "cpu" | 23 | #define KMSG_COMPONENT "cpu" |
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
25 | 25 | ||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
31 | #include <linux/kernel_stat.h> | 31 | #include <linux/kernel_stat.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/cache.h> | 33 | #include <linux/cache.h> |
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <linux/irqflags.h> | 35 | #include <linux/irqflags.h> |
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/timex.h> | 37 | #include <linux/timex.h> |
38 | #include <linux/bootmem.h> | 38 | #include <linux/bootmem.h> |
39 | #include <asm/ipl.h> | 39 | #include <asm/ipl.h> |
40 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
41 | #include <asm/sigp.h> | 41 | #include <asm/sigp.h> |
42 | #include <asm/pgalloc.h> | 42 | #include <asm/pgalloc.h> |
43 | #include <asm/irq.h> | 43 | #include <asm/irq.h> |
44 | #include <asm/s390_ext.h> | 44 | #include <asm/s390_ext.h> |
45 | #include <asm/cpcmd.h> | 45 | #include <asm/cpcmd.h> |
46 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
47 | #include <asm/timer.h> | 47 | #include <asm/timer.h> |
48 | #include <asm/lowcore.h> | 48 | #include <asm/lowcore.h> |
49 | #include <asm/sclp.h> | 49 | #include <asm/sclp.h> |
50 | #include <asm/cputime.h> | 50 | #include <asm/cputime.h> |
51 | #include <asm/vdso.h> | 51 | #include <asm/vdso.h> |
52 | #include <asm/cpu.h> | 52 | #include <asm/cpu.h> |
53 | #include "entry.h" | 53 | #include "entry.h" |
54 | 54 | ||
55 | /* logical cpu to cpu address */ | ||
56 | int __cpu_logical_map[NR_CPUS]; | ||
57 | |||
55 | static struct task_struct *current_set[NR_CPUS]; | 58 | static struct task_struct *current_set[NR_CPUS]; |
56 | 59 | ||
57 | static u8 smp_cpu_type; | 60 | static u8 smp_cpu_type; |
58 | static int smp_use_sigp_detection; | 61 | static int smp_use_sigp_detection; |
59 | 62 | ||
60 | enum s390_cpu_state { | 63 | enum s390_cpu_state { |
61 | CPU_STATE_STANDBY, | 64 | CPU_STATE_STANDBY, |
62 | CPU_STATE_CONFIGURED, | 65 | CPU_STATE_CONFIGURED, |
63 | }; | 66 | }; |
64 | 67 | ||
65 | DEFINE_MUTEX(smp_cpu_state_mutex); | 68 | DEFINE_MUTEX(smp_cpu_state_mutex); |
66 | int smp_cpu_polarization[NR_CPUS]; | 69 | int smp_cpu_polarization[NR_CPUS]; |
67 | static int smp_cpu_state[NR_CPUS]; | 70 | static int smp_cpu_state[NR_CPUS]; |
68 | static int cpu_management; | 71 | static int cpu_management; |
69 | 72 | ||
70 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 73 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
71 | 74 | ||
72 | static void smp_ext_bitcall(int, ec_bit_sig); | 75 | static void smp_ext_bitcall(int, ec_bit_sig); |
73 | 76 | ||
74 | static int cpu_stopped(int cpu) | 77 | static int cpu_stopped(int cpu) |
75 | { | 78 | { |
76 | __u32 status; | 79 | __u32 status; |
77 | 80 | ||
78 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | 81 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { |
79 | case sigp_status_stored: | 82 | case sigp_status_stored: |
80 | /* Check for stopped and check stop state */ | 83 | /* Check for stopped and check stop state */ |
81 | if (status & 0x50) | 84 | if (status & 0x50) |
82 | return 1; | 85 | return 1; |
83 | break; | 86 | break; |
84 | default: | 87 | default: |
85 | break; | 88 | break; |
86 | } | 89 | } |
87 | return 0; | 90 | return 0; |
88 | } | 91 | } |
89 | 92 | ||
90 | void smp_send_stop(void) | 93 | void smp_send_stop(void) |
91 | { | 94 | { |
92 | int cpu, rc; | 95 | int cpu, rc; |
93 | 96 | ||
94 | /* Disable all interrupts/machine checks */ | 97 | /* Disable all interrupts/machine checks */ |
95 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 98 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
96 | trace_hardirqs_off(); | 99 | trace_hardirqs_off(); |
97 | 100 | ||
98 | /* stop all processors */ | 101 | /* stop all processors */ |
99 | for_each_online_cpu(cpu) { | 102 | for_each_online_cpu(cpu) { |
100 | if (cpu == smp_processor_id()) | 103 | if (cpu == smp_processor_id()) |
101 | continue; | 104 | continue; |
102 | do { | 105 | do { |
103 | rc = signal_processor(cpu, sigp_stop); | 106 | rc = signal_processor(cpu, sigp_stop); |
104 | } while (rc == sigp_busy); | 107 | } while (rc == sigp_busy); |
105 | 108 | ||
106 | while (!cpu_stopped(cpu)) | 109 | while (!cpu_stopped(cpu)) |
107 | cpu_relax(); | 110 | cpu_relax(); |
108 | } | 111 | } |
109 | } | 112 | } |
110 | 113 | ||
111 | /* | 114 | /* |
112 | * This is the main routine where commands issued by other | 115 | * This is the main routine where commands issued by other |
113 | * cpus are handled. | 116 | * cpus are handled. |
114 | */ | 117 | */ |
115 | 118 | ||
116 | static void do_ext_call_interrupt(__u16 code) | 119 | static void do_ext_call_interrupt(__u16 code) |
117 | { | 120 | { |
118 | unsigned long bits; | 121 | unsigned long bits; |
119 | 122 | ||
120 | /* | 123 | /* |
121 | * handle bit signal external calls | 124 | * handle bit signal external calls |
122 | * | 125 | * |
123 | * For the ec_schedule signal we have to do nothing. All the work | 126 | * For the ec_schedule signal we have to do nothing. All the work |
124 | * is done automatically when we return from the interrupt. | 127 | * is done automatically when we return from the interrupt. |
125 | */ | 128 | */ |
126 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 129 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
127 | 130 | ||
128 | if (test_bit(ec_call_function, &bits)) | 131 | if (test_bit(ec_call_function, &bits)) |
129 | generic_smp_call_function_interrupt(); | 132 | generic_smp_call_function_interrupt(); |
130 | 133 | ||
131 | if (test_bit(ec_call_function_single, &bits)) | 134 | if (test_bit(ec_call_function_single, &bits)) |
132 | generic_smp_call_function_single_interrupt(); | 135 | generic_smp_call_function_single_interrupt(); |
133 | } | 136 | } |
134 | 137 | ||
135 | /* | 138 | /* |
136 | * Send an external call sigp to another cpu and return without waiting | 139 | * Send an external call sigp to another cpu and return without waiting |
137 | * for its completion. | 140 | * for its completion. |
138 | */ | 141 | */ |
139 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | 142 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) |
140 | { | 143 | { |
141 | /* | 144 | /* |
142 | * Set signaling bit in lowcore of target cpu and kick it | 145 | * Set signaling bit in lowcore of target cpu and kick it |
143 | */ | 146 | */ |
144 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 147 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
145 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) | 148 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
146 | udelay(10); | 149 | udelay(10); |
147 | } | 150 | } |
148 | 151 | ||
149 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 152 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
150 | { | 153 | { |
151 | int cpu; | 154 | int cpu; |
152 | 155 | ||
153 | for_each_cpu(cpu, mask) | 156 | for_each_cpu(cpu, mask) |
154 | smp_ext_bitcall(cpu, ec_call_function); | 157 | smp_ext_bitcall(cpu, ec_call_function); |
155 | } | 158 | } |
156 | 159 | ||
157 | void arch_send_call_function_single_ipi(int cpu) | 160 | void arch_send_call_function_single_ipi(int cpu) |
158 | { | 161 | { |
159 | smp_ext_bitcall(cpu, ec_call_function_single); | 162 | smp_ext_bitcall(cpu, ec_call_function_single); |
160 | } | 163 | } |
161 | 164 | ||
162 | #ifndef CONFIG_64BIT | 165 | #ifndef CONFIG_64BIT |
163 | /* | 166 | /* |
164 | * this function sends a 'purge tlb' signal to another CPU. | 167 | * this function sends a 'purge tlb' signal to another CPU. |
165 | */ | 168 | */ |
166 | static void smp_ptlb_callback(void *info) | 169 | static void smp_ptlb_callback(void *info) |
167 | { | 170 | { |
168 | __tlb_flush_local(); | 171 | __tlb_flush_local(); |
169 | } | 172 | } |
170 | 173 | ||
171 | void smp_ptlb_all(void) | 174 | void smp_ptlb_all(void) |
172 | { | 175 | { |
173 | on_each_cpu(smp_ptlb_callback, NULL, 1); | 176 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
174 | } | 177 | } |
175 | EXPORT_SYMBOL(smp_ptlb_all); | 178 | EXPORT_SYMBOL(smp_ptlb_all); |
176 | #endif /* ! CONFIG_64BIT */ | 179 | #endif /* ! CONFIG_64BIT */ |
177 | 180 | ||
178 | /* | 181 | /* |
179 | * this function sends a 'reschedule' IPI to another CPU. | 182 | * this function sends a 'reschedule' IPI to another CPU. |
180 | * it goes straight through and wastes no time serializing | 183 | * it goes straight through and wastes no time serializing |
181 | * anything. Worst case is that we lose a reschedule ... | 184 | * anything. Worst case is that we lose a reschedule ... |
182 | */ | 185 | */ |
183 | void smp_send_reschedule(int cpu) | 186 | void smp_send_reschedule(int cpu) |
184 | { | 187 | { |
185 | smp_ext_bitcall(cpu, ec_schedule); | 188 | smp_ext_bitcall(cpu, ec_schedule); |
186 | } | 189 | } |
187 | 190 | ||
188 | /* | 191 | /* |
189 | * parameter area for the set/clear control bit callbacks | 192 | * parameter area for the set/clear control bit callbacks |
190 | */ | 193 | */ |
191 | struct ec_creg_mask_parms { | 194 | struct ec_creg_mask_parms { |
192 | unsigned long orvals[16]; | 195 | unsigned long orvals[16]; |
193 | unsigned long andvals[16]; | 196 | unsigned long andvals[16]; |
194 | }; | 197 | }; |
195 | 198 | ||
196 | /* | 199 | /* |
197 | * callback for setting/clearing control bits | 200 | * callback for setting/clearing control bits |
198 | */ | 201 | */ |
199 | static void smp_ctl_bit_callback(void *info) | 202 | static void smp_ctl_bit_callback(void *info) |
200 | { | 203 | { |
201 | struct ec_creg_mask_parms *pp = info; | 204 | struct ec_creg_mask_parms *pp = info; |
202 | unsigned long cregs[16]; | 205 | unsigned long cregs[16]; |
203 | int i; | 206 | int i; |
204 | 207 | ||
205 | __ctl_store(cregs, 0, 15); | 208 | __ctl_store(cregs, 0, 15); |
206 | for (i = 0; i <= 15; i++) | 209 | for (i = 0; i <= 15; i++) |
207 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | 210 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; |
208 | __ctl_load(cregs, 0, 15); | 211 | __ctl_load(cregs, 0, 15); |
209 | } | 212 | } |
210 | 213 | ||
211 | /* | 214 | /* |
212 | * Set a bit in a control register of all cpus | 215 | * Set a bit in a control register of all cpus |
213 | */ | 216 | */ |
214 | void smp_ctl_set_bit(int cr, int bit) | 217 | void smp_ctl_set_bit(int cr, int bit) |
215 | { | 218 | { |
216 | struct ec_creg_mask_parms parms; | 219 | struct ec_creg_mask_parms parms; |
217 | 220 | ||
218 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 221 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
219 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 222 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
220 | parms.orvals[cr] = 1 << bit; | 223 | parms.orvals[cr] = 1 << bit; |
221 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 224 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
222 | } | 225 | } |
223 | EXPORT_SYMBOL(smp_ctl_set_bit); | 226 | EXPORT_SYMBOL(smp_ctl_set_bit); |
224 | 227 | ||
225 | /* | 228 | /* |
226 | * Clear a bit in a control register of all cpus | 229 | * Clear a bit in a control register of all cpus |
227 | */ | 230 | */ |
228 | void smp_ctl_clear_bit(int cr, int bit) | 231 | void smp_ctl_clear_bit(int cr, int bit) |
229 | { | 232 | { |
230 | struct ec_creg_mask_parms parms; | 233 | struct ec_creg_mask_parms parms; |
231 | 234 | ||
232 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 235 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
233 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 236 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
234 | parms.andvals[cr] = ~(1L << bit); | 237 | parms.andvals[cr] = ~(1L << bit); |
235 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 238 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
236 | } | 239 | } |
237 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 240 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
238 | 241 | ||
239 | /* | 242 | /* |
240 | * In early ipl state a temp. logically cpu number is needed, so the sigp | 243 | * In early ipl state a temp. logically cpu number is needed, so the sigp |
241 | * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on | 244 | * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on |
242 | * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. | 245 | * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. |
243 | */ | 246 | */ |
244 | #define CPU_INIT_NO 1 | 247 | #define CPU_INIT_NO 1 |
245 | 248 | ||
246 | #ifdef CONFIG_ZFCPDUMP | 249 | #ifdef CONFIG_ZFCPDUMP |
247 | 250 | ||
248 | /* | 251 | /* |
249 | * zfcpdump_prefix_array holds prefix registers for the following scenario: | 252 | * zfcpdump_prefix_array holds prefix registers for the following scenario: |
250 | * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to | 253 | * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to |
251 | * save its prefix registers, since they get lost, when switching from 31 bit | 254 | * save its prefix registers, since they get lost, when switching from 31 bit |
252 | * to 64 bit. | 255 | * to 64 bit. |
253 | */ | 256 | */ |
254 | unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ | 257 | unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ |
255 | __attribute__((__section__(".data"))); | 258 | __attribute__((__section__(".data"))); |
256 | 259 | ||
257 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 260 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
258 | { | 261 | { |
259 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 262 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
260 | return; | 263 | return; |
261 | if (cpu >= NR_CPUS) { | 264 | if (cpu >= NR_CPUS) { |
262 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " | 265 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
263 | "the dump\n", cpu, NR_CPUS - 1); | 266 | "the dump\n", cpu, NR_CPUS - 1); |
264 | return; | 267 | return; |
265 | } | 268 | } |
266 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); | 269 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); |
267 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; | 270 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; |
268 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == | 271 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == |
269 | sigp_busy) | 272 | sigp_busy) |
270 | cpu_relax(); | 273 | cpu_relax(); |
271 | memcpy(zfcpdump_save_areas[cpu], | 274 | memcpy(zfcpdump_save_areas[cpu], |
272 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 275 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, |
273 | SAVE_AREA_SIZE); | 276 | SAVE_AREA_SIZE); |
274 | #ifdef CONFIG_64BIT | 277 | #ifdef CONFIG_64BIT |
275 | /* copy original prefix register */ | 278 | /* copy original prefix register */ |
276 | zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; | 279 | zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; |
277 | #endif | 280 | #endif |
278 | } | 281 | } |
279 | 282 | ||
280 | union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 283 | union save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
281 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | 284 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); |
282 | 285 | ||
283 | #else | 286 | #else |
284 | 287 | ||
285 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | 288 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } |
286 | 289 | ||
287 | #endif /* CONFIG_ZFCPDUMP */ | 290 | #endif /* CONFIG_ZFCPDUMP */ |
288 | 291 | ||
289 | static int cpu_known(int cpu_id) | 292 | static int cpu_known(int cpu_id) |
290 | { | 293 | { |
291 | int cpu; | 294 | int cpu; |
292 | 295 | ||
293 | for_each_present_cpu(cpu) { | 296 | for_each_present_cpu(cpu) { |
294 | if (__cpu_logical_map[cpu] == cpu_id) | 297 | if (__cpu_logical_map[cpu] == cpu_id) |
295 | return 1; | 298 | return 1; |
296 | } | 299 | } |
297 | return 0; | 300 | return 0; |
298 | } | 301 | } |
299 | 302 | ||
300 | static int smp_rescan_cpus_sigp(cpumask_t avail) | 303 | static int smp_rescan_cpus_sigp(cpumask_t avail) |
301 | { | 304 | { |
302 | int cpu_id, logical_cpu; | 305 | int cpu_id, logical_cpu; |
303 | 306 | ||
304 | logical_cpu = cpumask_first(&avail); | 307 | logical_cpu = cpumask_first(&avail); |
305 | if (logical_cpu >= nr_cpu_ids) | 308 | if (logical_cpu >= nr_cpu_ids) |
306 | return 0; | 309 | return 0; |
307 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { | 310 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { |
308 | if (cpu_known(cpu_id)) | 311 | if (cpu_known(cpu_id)) |
309 | continue; | 312 | continue; |
310 | __cpu_logical_map[logical_cpu] = cpu_id; | 313 | __cpu_logical_map[logical_cpu] = cpu_id; |
311 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 314 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
312 | if (!cpu_stopped(logical_cpu)) | 315 | if (!cpu_stopped(logical_cpu)) |
313 | continue; | 316 | continue; |
314 | cpu_set(logical_cpu, cpu_present_map); | 317 | cpu_set(logical_cpu, cpu_present_map); |
315 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 318 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
316 | logical_cpu = cpumask_next(logical_cpu, &avail); | 319 | logical_cpu = cpumask_next(logical_cpu, &avail); |
317 | if (logical_cpu >= nr_cpu_ids) | 320 | if (logical_cpu >= nr_cpu_ids) |
318 | break; | 321 | break; |
319 | } | 322 | } |
320 | return 0; | 323 | return 0; |
321 | } | 324 | } |
322 | 325 | ||
323 | static int smp_rescan_cpus_sclp(cpumask_t avail) | 326 | static int smp_rescan_cpus_sclp(cpumask_t avail) |
324 | { | 327 | { |
325 | struct sclp_cpu_info *info; | 328 | struct sclp_cpu_info *info; |
326 | int cpu_id, logical_cpu, cpu; | 329 | int cpu_id, logical_cpu, cpu; |
327 | int rc; | 330 | int rc; |
328 | 331 | ||
329 | logical_cpu = cpumask_first(&avail); | 332 | logical_cpu = cpumask_first(&avail); |
330 | if (logical_cpu >= nr_cpu_ids) | 333 | if (logical_cpu >= nr_cpu_ids) |
331 | return 0; | 334 | return 0; |
332 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 335 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
333 | if (!info) | 336 | if (!info) |
334 | return -ENOMEM; | 337 | return -ENOMEM; |
335 | rc = sclp_get_cpu_info(info); | 338 | rc = sclp_get_cpu_info(info); |
336 | if (rc) | 339 | if (rc) |
337 | goto out; | 340 | goto out; |
338 | for (cpu = 0; cpu < info->combined; cpu++) { | 341 | for (cpu = 0; cpu < info->combined; cpu++) { |
339 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | 342 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) |
340 | continue; | 343 | continue; |
341 | cpu_id = info->cpu[cpu].address; | 344 | cpu_id = info->cpu[cpu].address; |
342 | if (cpu_known(cpu_id)) | 345 | if (cpu_known(cpu_id)) |
343 | continue; | 346 | continue; |
344 | __cpu_logical_map[logical_cpu] = cpu_id; | 347 | __cpu_logical_map[logical_cpu] = cpu_id; |
345 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 348 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
346 | cpu_set(logical_cpu, cpu_present_map); | 349 | cpu_set(logical_cpu, cpu_present_map); |
347 | if (cpu >= info->configured) | 350 | if (cpu >= info->configured) |
348 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 351 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
349 | else | 352 | else |
350 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 353 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
351 | logical_cpu = cpumask_next(logical_cpu, &avail); | 354 | logical_cpu = cpumask_next(logical_cpu, &avail); |
352 | if (logical_cpu >= nr_cpu_ids) | 355 | if (logical_cpu >= nr_cpu_ids) |
353 | break; | 356 | break; |
354 | } | 357 | } |
355 | out: | 358 | out: |
356 | kfree(info); | 359 | kfree(info); |
357 | return rc; | 360 | return rc; |
358 | } | 361 | } |
359 | 362 | ||
360 | static int __smp_rescan_cpus(void) | 363 | static int __smp_rescan_cpus(void) |
361 | { | 364 | { |
362 | cpumask_t avail; | 365 | cpumask_t avail; |
363 | 366 | ||
364 | cpus_xor(avail, cpu_possible_map, cpu_present_map); | 367 | cpus_xor(avail, cpu_possible_map, cpu_present_map); |
365 | if (smp_use_sigp_detection) | 368 | if (smp_use_sigp_detection) |
366 | return smp_rescan_cpus_sigp(avail); | 369 | return smp_rescan_cpus_sigp(avail); |
367 | else | 370 | else |
368 | return smp_rescan_cpus_sclp(avail); | 371 | return smp_rescan_cpus_sclp(avail); |
369 | } | 372 | } |
370 | 373 | ||
371 | static void __init smp_detect_cpus(void) | 374 | static void __init smp_detect_cpus(void) |
372 | { | 375 | { |
373 | unsigned int cpu, c_cpus, s_cpus; | 376 | unsigned int cpu, c_cpus, s_cpus; |
374 | struct sclp_cpu_info *info; | 377 | struct sclp_cpu_info *info; |
375 | u16 boot_cpu_addr, cpu_addr; | 378 | u16 boot_cpu_addr, cpu_addr; |
376 | 379 | ||
377 | c_cpus = 1; | 380 | c_cpus = 1; |
378 | s_cpus = 0; | 381 | s_cpus = 0; |
379 | boot_cpu_addr = __cpu_logical_map[0]; | 382 | boot_cpu_addr = __cpu_logical_map[0]; |
380 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 383 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
381 | if (!info) | 384 | if (!info) |
382 | panic("smp_detect_cpus failed to allocate memory\n"); | 385 | panic("smp_detect_cpus failed to allocate memory\n"); |
383 | /* Use sigp detection algorithm if sclp doesn't work. */ | 386 | /* Use sigp detection algorithm if sclp doesn't work. */ |
384 | if (sclp_get_cpu_info(info)) { | 387 | if (sclp_get_cpu_info(info)) { |
385 | smp_use_sigp_detection = 1; | 388 | smp_use_sigp_detection = 1; |
386 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { | 389 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
387 | if (cpu == boot_cpu_addr) | 390 | if (cpu == boot_cpu_addr) |
388 | continue; | 391 | continue; |
389 | __cpu_logical_map[CPU_INIT_NO] = cpu; | 392 | __cpu_logical_map[CPU_INIT_NO] = cpu; |
390 | if (!cpu_stopped(CPU_INIT_NO)) | 393 | if (!cpu_stopped(CPU_INIT_NO)) |
391 | continue; | 394 | continue; |
392 | smp_get_save_area(c_cpus, cpu); | 395 | smp_get_save_area(c_cpus, cpu); |
393 | c_cpus++; | 396 | c_cpus++; |
394 | } | 397 | } |
395 | goto out; | 398 | goto out; |
396 | } | 399 | } |
397 | 400 | ||
398 | if (info->has_cpu_type) { | 401 | if (info->has_cpu_type) { |
399 | for (cpu = 0; cpu < info->combined; cpu++) { | 402 | for (cpu = 0; cpu < info->combined; cpu++) { |
400 | if (info->cpu[cpu].address == boot_cpu_addr) { | 403 | if (info->cpu[cpu].address == boot_cpu_addr) { |
401 | smp_cpu_type = info->cpu[cpu].type; | 404 | smp_cpu_type = info->cpu[cpu].type; |
402 | break; | 405 | break; |
403 | } | 406 | } |
404 | } | 407 | } |
405 | } | 408 | } |
406 | 409 | ||
407 | for (cpu = 0; cpu < info->combined; cpu++) { | 410 | for (cpu = 0; cpu < info->combined; cpu++) { |
408 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | 411 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) |
409 | continue; | 412 | continue; |
410 | cpu_addr = info->cpu[cpu].address; | 413 | cpu_addr = info->cpu[cpu].address; |
411 | if (cpu_addr == boot_cpu_addr) | 414 | if (cpu_addr == boot_cpu_addr) |
412 | continue; | 415 | continue; |
413 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; | 416 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; |
414 | if (!cpu_stopped(CPU_INIT_NO)) { | 417 | if (!cpu_stopped(CPU_INIT_NO)) { |
415 | s_cpus++; | 418 | s_cpus++; |
416 | continue; | 419 | continue; |
417 | } | 420 | } |
418 | smp_get_save_area(c_cpus, cpu_addr); | 421 | smp_get_save_area(c_cpus, cpu_addr); |
419 | c_cpus++; | 422 | c_cpus++; |
420 | } | 423 | } |
421 | out: | 424 | out: |
422 | kfree(info); | 425 | kfree(info); |
423 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); | 426 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
424 | get_online_cpus(); | 427 | get_online_cpus(); |
425 | __smp_rescan_cpus(); | 428 | __smp_rescan_cpus(); |
426 | put_online_cpus(); | 429 | put_online_cpus(); |
427 | } | 430 | } |
428 | 431 | ||
429 | /* | 432 | /* |
430 | * Activate a secondary processor. | 433 | * Activate a secondary processor. |
431 | */ | 434 | */ |
432 | int __cpuinit start_secondary(void *cpuvoid) | 435 | int __cpuinit start_secondary(void *cpuvoid) |
433 | { | 436 | { |
434 | /* Setup the cpu */ | 437 | /* Setup the cpu */ |
435 | cpu_init(); | 438 | cpu_init(); |
436 | preempt_disable(); | 439 | preempt_disable(); |
437 | /* Enable TOD clock interrupts on the secondary cpu. */ | 440 | /* Enable TOD clock interrupts on the secondary cpu. */ |
438 | init_cpu_timer(); | 441 | init_cpu_timer(); |
439 | /* Enable cpu timer interrupts on the secondary cpu. */ | 442 | /* Enable cpu timer interrupts on the secondary cpu. */ |
440 | init_cpu_vtimer(); | 443 | init_cpu_vtimer(); |
441 | /* Enable pfault pseudo page faults on this cpu. */ | 444 | /* Enable pfault pseudo page faults on this cpu. */ |
442 | pfault_init(); | 445 | pfault_init(); |
443 | 446 | ||
444 | /* call cpu notifiers */ | 447 | /* call cpu notifiers */ |
445 | notify_cpu_starting(smp_processor_id()); | 448 | notify_cpu_starting(smp_processor_id()); |
446 | /* Mark this cpu as online */ | 449 | /* Mark this cpu as online */ |
447 | ipi_call_lock(); | 450 | ipi_call_lock(); |
448 | cpu_set(smp_processor_id(), cpu_online_map); | 451 | cpu_set(smp_processor_id(), cpu_online_map); |
449 | ipi_call_unlock(); | 452 | ipi_call_unlock(); |
450 | /* Switch on interrupts */ | 453 | /* Switch on interrupts */ |
451 | local_irq_enable(); | 454 | local_irq_enable(); |
452 | /* Print info about this processor */ | 455 | /* Print info about this processor */ |
453 | print_cpu_info(); | 456 | print_cpu_info(); |
454 | /* cpu_idle will call schedule for us */ | 457 | /* cpu_idle will call schedule for us */ |
455 | cpu_idle(); | 458 | cpu_idle(); |
456 | return 0; | 459 | return 0; |
457 | } | 460 | } |
458 | 461 | ||
459 | static void __init smp_create_idle(unsigned int cpu) | 462 | static void __init smp_create_idle(unsigned int cpu) |
460 | { | 463 | { |
461 | struct task_struct *p; | 464 | struct task_struct *p; |
462 | 465 | ||
463 | /* | 466 | /* |
464 | * don't care about the psw and regs settings since we'll never | 467 | * don't care about the psw and regs settings since we'll never |
465 | * reschedule the forked task. | 468 | * reschedule the forked task. |
466 | */ | 469 | */ |
467 | p = fork_idle(cpu); | 470 | p = fork_idle(cpu); |
468 | if (IS_ERR(p)) | 471 | if (IS_ERR(p)) |
469 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | 472 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); |
470 | current_set[cpu] = p; | 473 | current_set[cpu] = p; |
471 | } | 474 | } |
472 | 475 | ||
473 | static int __cpuinit smp_alloc_lowcore(int cpu) | 476 | static int __cpuinit smp_alloc_lowcore(int cpu) |
474 | { | 477 | { |
475 | unsigned long async_stack, panic_stack; | 478 | unsigned long async_stack, panic_stack; |
476 | struct _lowcore *lowcore; | 479 | struct _lowcore *lowcore; |
477 | 480 | ||
478 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 481 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
479 | if (!lowcore) | 482 | if (!lowcore) |
480 | return -ENOMEM; | 483 | return -ENOMEM; |
481 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | 484 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
482 | panic_stack = __get_free_page(GFP_KERNEL); | 485 | panic_stack = __get_free_page(GFP_KERNEL); |
483 | if (!panic_stack || !async_stack) | 486 | if (!panic_stack || !async_stack) |
484 | goto out; | 487 | goto out; |
485 | memcpy(lowcore, &S390_lowcore, 512); | 488 | memcpy(lowcore, &S390_lowcore, 512); |
486 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); | 489 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); |
487 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 490 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
488 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 491 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
489 | 492 | ||
490 | #ifndef CONFIG_64BIT | 493 | #ifndef CONFIG_64BIT |
491 | if (MACHINE_HAS_IEEE) { | 494 | if (MACHINE_HAS_IEEE) { |
492 | unsigned long save_area; | 495 | unsigned long save_area; |
493 | 496 | ||
494 | save_area = get_zeroed_page(GFP_KERNEL); | 497 | save_area = get_zeroed_page(GFP_KERNEL); |
495 | if (!save_area) | 498 | if (!save_area) |
496 | goto out; | 499 | goto out; |
497 | lowcore->extended_save_area_addr = (u32) save_area; | 500 | lowcore->extended_save_area_addr = (u32) save_area; |
498 | } | 501 | } |
499 | #else | 502 | #else |
500 | if (vdso_alloc_per_cpu(cpu, lowcore)) | 503 | if (vdso_alloc_per_cpu(cpu, lowcore)) |
501 | goto out; | 504 | goto out; |
502 | #endif | 505 | #endif |
503 | lowcore_ptr[cpu] = lowcore; | 506 | lowcore_ptr[cpu] = lowcore; |
504 | return 0; | 507 | return 0; |
505 | 508 | ||
506 | out: | 509 | out: |
507 | free_page(panic_stack); | 510 | free_page(panic_stack); |
508 | free_pages(async_stack, ASYNC_ORDER); | 511 | free_pages(async_stack, ASYNC_ORDER); |
509 | free_pages((unsigned long) lowcore, LC_ORDER); | 512 | free_pages((unsigned long) lowcore, LC_ORDER); |
510 | return -ENOMEM; | 513 | return -ENOMEM; |
511 | } | 514 | } |
512 | 515 | ||
513 | static void smp_free_lowcore(int cpu) | 516 | static void smp_free_lowcore(int cpu) |
514 | { | 517 | { |
515 | struct _lowcore *lowcore; | 518 | struct _lowcore *lowcore; |
516 | 519 | ||
517 | lowcore = lowcore_ptr[cpu]; | 520 | lowcore = lowcore_ptr[cpu]; |
518 | #ifndef CONFIG_64BIT | 521 | #ifndef CONFIG_64BIT |
519 | if (MACHINE_HAS_IEEE) | 522 | if (MACHINE_HAS_IEEE) |
520 | free_page((unsigned long) lowcore->extended_save_area_addr); | 523 | free_page((unsigned long) lowcore->extended_save_area_addr); |
521 | #else | 524 | #else |
522 | vdso_free_per_cpu(cpu, lowcore); | 525 | vdso_free_per_cpu(cpu, lowcore); |
523 | #endif | 526 | #endif |
524 | free_page(lowcore->panic_stack - PAGE_SIZE); | 527 | free_page(lowcore->panic_stack - PAGE_SIZE); |
525 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); | 528 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); |
526 | free_pages((unsigned long) lowcore, LC_ORDER); | 529 | free_pages((unsigned long) lowcore, LC_ORDER); |
527 | lowcore_ptr[cpu] = NULL; | 530 | lowcore_ptr[cpu] = NULL; |
528 | } | 531 | } |
529 | 532 | ||
530 | /* Upping and downing of CPUs */ | 533 | /* Upping and downing of CPUs */ |
531 | int __cpuinit __cpu_up(unsigned int cpu) | 534 | int __cpuinit __cpu_up(unsigned int cpu) |
532 | { | 535 | { |
533 | struct task_struct *idle; | 536 | struct task_struct *idle; |
534 | struct _lowcore *cpu_lowcore; | 537 | struct _lowcore *cpu_lowcore; |
535 | struct stack_frame *sf; | 538 | struct stack_frame *sf; |
536 | sigp_ccode ccode; | 539 | sigp_ccode ccode; |
537 | u32 lowcore; | 540 | u32 lowcore; |
538 | 541 | ||
539 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 542 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
540 | return -EIO; | 543 | return -EIO; |
541 | if (smp_alloc_lowcore(cpu)) | 544 | if (smp_alloc_lowcore(cpu)) |
542 | return -ENOMEM; | 545 | return -ENOMEM; |
543 | do { | 546 | do { |
544 | ccode = signal_processor(cpu, sigp_initial_cpu_reset); | 547 | ccode = signal_processor(cpu, sigp_initial_cpu_reset); |
545 | if (ccode == sigp_busy) | 548 | if (ccode == sigp_busy) |
546 | udelay(10); | 549 | udelay(10); |
547 | if (ccode == sigp_not_operational) | 550 | if (ccode == sigp_not_operational) |
548 | goto err_out; | 551 | goto err_out; |
549 | } while (ccode == sigp_busy); | 552 | } while (ccode == sigp_busy); |
550 | 553 | ||
551 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; | 554 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; |
552 | while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | 555 | while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) |
553 | udelay(10); | 556 | udelay(10); |
554 | 557 | ||
555 | idle = current_set[cpu]; | 558 | idle = current_set[cpu]; |
556 | cpu_lowcore = lowcore_ptr[cpu]; | 559 | cpu_lowcore = lowcore_ptr[cpu]; |
557 | cpu_lowcore->kernel_stack = (unsigned long) | 560 | cpu_lowcore->kernel_stack = (unsigned long) |
558 | task_stack_page(idle) + THREAD_SIZE; | 561 | task_stack_page(idle) + THREAD_SIZE; |
559 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); | 562 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); |
560 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | 563 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
561 | - sizeof(struct pt_regs) | 564 | - sizeof(struct pt_regs) |
562 | - sizeof(struct stack_frame)); | 565 | - sizeof(struct stack_frame)); |
563 | memset(sf, 0, sizeof(struct stack_frame)); | 566 | memset(sf, 0, sizeof(struct stack_frame)); |
564 | sf->gprs[9] = (unsigned long) sf; | 567 | sf->gprs[9] = (unsigned long) sf; |
565 | cpu_lowcore->save_area[15] = (unsigned long) sf; | 568 | cpu_lowcore->save_area[15] = (unsigned long) sf; |
566 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); | 569 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); |
567 | asm volatile( | 570 | asm volatile( |
568 | " stam 0,15,0(%0)" | 571 | " stam 0,15,0(%0)" |
569 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 572 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
570 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 573 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
571 | cpu_lowcore->current_task = (unsigned long) idle; | 574 | cpu_lowcore->current_task = (unsigned long) idle; |
572 | cpu_lowcore->cpu_nr = cpu; | 575 | cpu_lowcore->cpu_nr = cpu; |
573 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; | 576 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; |
574 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; | 577 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; |
575 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | 578 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; |
576 | eieio(); | 579 | eieio(); |
577 | 580 | ||
578 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 581 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
579 | udelay(10); | 582 | udelay(10); |
580 | 583 | ||
581 | while (!cpu_online(cpu)) | 584 | while (!cpu_online(cpu)) |
582 | cpu_relax(); | 585 | cpu_relax(); |
583 | return 0; | 586 | return 0; |
584 | 587 | ||
585 | err_out: | 588 | err_out: |
586 | smp_free_lowcore(cpu); | 589 | smp_free_lowcore(cpu); |
587 | return -EIO; | 590 | return -EIO; |
588 | } | 591 | } |
589 | 592 | ||
590 | static int __init setup_possible_cpus(char *s) | 593 | static int __init setup_possible_cpus(char *s) |
591 | { | 594 | { |
592 | int pcpus, cpu; | 595 | int pcpus, cpu; |
593 | 596 | ||
594 | pcpus = simple_strtoul(s, NULL, 0); | 597 | pcpus = simple_strtoul(s, NULL, 0); |
595 | init_cpu_possible(cpumask_of(0)); | 598 | init_cpu_possible(cpumask_of(0)); |
596 | for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) | 599 | for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) |
597 | set_cpu_possible(cpu, true); | 600 | set_cpu_possible(cpu, true); |
598 | return 0; | 601 | return 0; |
599 | } | 602 | } |
600 | early_param("possible_cpus", setup_possible_cpus); | 603 | early_param("possible_cpus", setup_possible_cpus); |
601 | 604 | ||
602 | #ifdef CONFIG_HOTPLUG_CPU | 605 | #ifdef CONFIG_HOTPLUG_CPU |
603 | 606 | ||
604 | int __cpu_disable(void) | 607 | int __cpu_disable(void) |
605 | { | 608 | { |
606 | struct ec_creg_mask_parms cr_parms; | 609 | struct ec_creg_mask_parms cr_parms; |
607 | int cpu = smp_processor_id(); | 610 | int cpu = smp_processor_id(); |
608 | 611 | ||
609 | cpu_clear(cpu, cpu_online_map); | 612 | cpu_clear(cpu, cpu_online_map); |
610 | 613 | ||
611 | /* Disable pfault pseudo page faults on this cpu. */ | 614 | /* Disable pfault pseudo page faults on this cpu. */ |
612 | pfault_fini(); | 615 | pfault_fini(); |
613 | 616 | ||
614 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); | 617 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); |
615 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); | 618 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); |
616 | 619 | ||
617 | /* disable all external interrupts */ | 620 | /* disable all external interrupts */ |
618 | cr_parms.orvals[0] = 0; | 621 | cr_parms.orvals[0] = 0; |
619 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | | 622 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | |
620 | 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); | 623 | 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); |
621 | /* disable all I/O interrupts */ | 624 | /* disable all I/O interrupts */ |
622 | cr_parms.orvals[6] = 0; | 625 | cr_parms.orvals[6] = 0; |
623 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | | 626 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | |
624 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); | 627 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); |
625 | /* disable most machine checks */ | 628 | /* disable most machine checks */ |
626 | cr_parms.orvals[14] = 0; | 629 | cr_parms.orvals[14] = 0; |
627 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | | 630 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | |
628 | 1 << 25 | 1 << 24); | 631 | 1 << 25 | 1 << 24); |
629 | 632 | ||
630 | smp_ctl_bit_callback(&cr_parms); | 633 | smp_ctl_bit_callback(&cr_parms); |
631 | 634 | ||
632 | return 0; | 635 | return 0; |
633 | } | 636 | } |
634 | 637 | ||
635 | void __cpu_die(unsigned int cpu) | 638 | void __cpu_die(unsigned int cpu) |
636 | { | 639 | { |
637 | /* Wait until target cpu is down */ | 640 | /* Wait until target cpu is down */ |
638 | while (!cpu_stopped(cpu)) | 641 | while (!cpu_stopped(cpu)) |
639 | cpu_relax(); | 642 | cpu_relax(); |
640 | while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) | 643 | while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) |
641 | udelay(10); | 644 | udelay(10); |
642 | smp_free_lowcore(cpu); | 645 | smp_free_lowcore(cpu); |
643 | pr_info("Processor %d stopped\n", cpu); | 646 | pr_info("Processor %d stopped\n", cpu); |
644 | } | 647 | } |
645 | 648 | ||
646 | void cpu_die(void) | 649 | void cpu_die(void) |
647 | { | 650 | { |
648 | idle_task_exit(); | 651 | idle_task_exit(); |
649 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) | 652 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) |
650 | cpu_relax(); | 653 | cpu_relax(); |
651 | for (;;); | 654 | for (;;); |
652 | } | 655 | } |
653 | 656 | ||
654 | #endif /* CONFIG_HOTPLUG_CPU */ | 657 | #endif /* CONFIG_HOTPLUG_CPU */ |
655 | 658 | ||
656 | void __init smp_prepare_cpus(unsigned int max_cpus) | 659 | void __init smp_prepare_cpus(unsigned int max_cpus) |
657 | { | 660 | { |
658 | #ifndef CONFIG_64BIT | 661 | #ifndef CONFIG_64BIT |
659 | unsigned long save_area = 0; | 662 | unsigned long save_area = 0; |
660 | #endif | 663 | #endif |
661 | unsigned long async_stack, panic_stack; | 664 | unsigned long async_stack, panic_stack; |
662 | struct _lowcore *lowcore; | 665 | struct _lowcore *lowcore; |
663 | unsigned int cpu; | 666 | unsigned int cpu; |
664 | 667 | ||
665 | smp_detect_cpus(); | 668 | smp_detect_cpus(); |
666 | 669 | ||
667 | /* request the 0x1201 emergency signal external interrupt */ | 670 | /* request the 0x1201 emergency signal external interrupt */ |
668 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 671 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
669 | panic("Couldn't request external interrupt 0x1201"); | 672 | panic("Couldn't request external interrupt 0x1201"); |
670 | print_cpu_info(); | 673 | print_cpu_info(); |
671 | 674 | ||
672 | /* Reallocate current lowcore, but keep its contents. */ | 675 | /* Reallocate current lowcore, but keep its contents. */ |
673 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 676 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
674 | panic_stack = __get_free_page(GFP_KERNEL); | 677 | panic_stack = __get_free_page(GFP_KERNEL); |
675 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | 678 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
676 | BUG_ON(!lowcore || !panic_stack || !async_stack); | 679 | BUG_ON(!lowcore || !panic_stack || !async_stack); |
677 | #ifndef CONFIG_64BIT | 680 | #ifndef CONFIG_64BIT |
678 | if (MACHINE_HAS_IEEE) | 681 | if (MACHINE_HAS_IEEE) |
679 | save_area = get_zeroed_page(GFP_KERNEL); | 682 | save_area = get_zeroed_page(GFP_KERNEL); |
680 | #endif | 683 | #endif |
681 | local_irq_disable(); | 684 | local_irq_disable(); |
682 | local_mcck_disable(); | 685 | local_mcck_disable(); |
683 | lowcore_ptr[smp_processor_id()] = lowcore; | 686 | lowcore_ptr[smp_processor_id()] = lowcore; |
684 | *lowcore = S390_lowcore; | 687 | *lowcore = S390_lowcore; |
685 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 688 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
686 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 689 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
687 | #ifndef CONFIG_64BIT | 690 | #ifndef CONFIG_64BIT |
688 | if (MACHINE_HAS_IEEE) | 691 | if (MACHINE_HAS_IEEE) |
689 | lowcore->extended_save_area_addr = (u32) save_area; | 692 | lowcore->extended_save_area_addr = (u32) save_area; |
690 | #endif | 693 | #endif |
691 | set_prefix((u32)(unsigned long) lowcore); | 694 | set_prefix((u32)(unsigned long) lowcore); |
692 | local_mcck_enable(); | 695 | local_mcck_enable(); |
693 | local_irq_enable(); | 696 | local_irq_enable(); |
694 | #ifdef CONFIG_64BIT | 697 | #ifdef CONFIG_64BIT |
695 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) | 698 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) |
696 | BUG(); | 699 | BUG(); |
697 | #endif | 700 | #endif |
698 | for_each_possible_cpu(cpu) | 701 | for_each_possible_cpu(cpu) |
699 | if (cpu != smp_processor_id()) | 702 | if (cpu != smp_processor_id()) |
700 | smp_create_idle(cpu); | 703 | smp_create_idle(cpu); |
701 | } | 704 | } |
702 | 705 | ||
703 | void __init smp_prepare_boot_cpu(void) | 706 | void __init smp_prepare_boot_cpu(void) |
704 | { | 707 | { |
705 | BUG_ON(smp_processor_id() != 0); | 708 | BUG_ON(smp_processor_id() != 0); |
706 | 709 | ||
707 | current_thread_info()->cpu = 0; | 710 | current_thread_info()->cpu = 0; |
708 | cpu_set(0, cpu_present_map); | 711 | cpu_set(0, cpu_present_map); |
709 | cpu_set(0, cpu_online_map); | 712 | cpu_set(0, cpu_online_map); |
710 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 713 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
711 | current_set[0] = current; | 714 | current_set[0] = current; |
712 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 715 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
713 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; | 716 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; |
714 | } | 717 | } |
715 | 718 | ||
716 | void __init smp_cpus_done(unsigned int max_cpus) | 719 | void __init smp_cpus_done(unsigned int max_cpus) |
717 | { | 720 | { |
718 | } | 721 | } |
719 | 722 | ||
720 | void __init smp_setup_processor_id(void) | 723 | void __init smp_setup_processor_id(void) |
721 | { | 724 | { |
722 | S390_lowcore.cpu_nr = 0; | 725 | S390_lowcore.cpu_nr = 0; |
723 | __cpu_logical_map[0] = stap(); | 726 | __cpu_logical_map[0] = stap(); |
724 | } | 727 | } |
725 | 728 | ||
726 | /* | 729 | /* |
727 | * the frequency of the profiling timer can be changed | 730 | * the frequency of the profiling timer can be changed |
728 | * by writing a multiplier value into /proc/profile. | 731 | * by writing a multiplier value into /proc/profile. |
729 | * | 732 | * |
730 | * usually you want to run this on all CPUs ;) | 733 | * usually you want to run this on all CPUs ;) |
731 | */ | 734 | */ |
732 | int setup_profiling_timer(unsigned int multiplier) | 735 | int setup_profiling_timer(unsigned int multiplier) |
733 | { | 736 | { |
734 | return 0; | 737 | return 0; |
735 | } | 738 | } |
736 | 739 | ||
737 | #ifdef CONFIG_HOTPLUG_CPU | 740 | #ifdef CONFIG_HOTPLUG_CPU |
738 | static ssize_t cpu_configure_show(struct sys_device *dev, | 741 | static ssize_t cpu_configure_show(struct sys_device *dev, |
739 | struct sysdev_attribute *attr, char *buf) | 742 | struct sysdev_attribute *attr, char *buf) |
740 | { | 743 | { |
741 | ssize_t count; | 744 | ssize_t count; |
742 | 745 | ||
743 | mutex_lock(&smp_cpu_state_mutex); | 746 | mutex_lock(&smp_cpu_state_mutex); |
744 | count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); | 747 | count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); |
745 | mutex_unlock(&smp_cpu_state_mutex); | 748 | mutex_unlock(&smp_cpu_state_mutex); |
746 | return count; | 749 | return count; |
747 | } | 750 | } |
748 | 751 | ||
749 | static ssize_t cpu_configure_store(struct sys_device *dev, | 752 | static ssize_t cpu_configure_store(struct sys_device *dev, |
750 | struct sysdev_attribute *attr, | 753 | struct sysdev_attribute *attr, |
751 | const char *buf, size_t count) | 754 | const char *buf, size_t count) |
752 | { | 755 | { |
753 | int cpu = dev->id; | 756 | int cpu = dev->id; |
754 | int val, rc; | 757 | int val, rc; |
755 | char delim; | 758 | char delim; |
756 | 759 | ||
757 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 760 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
758 | return -EINVAL; | 761 | return -EINVAL; |
759 | if (val != 0 && val != 1) | 762 | if (val != 0 && val != 1) |
760 | return -EINVAL; | 763 | return -EINVAL; |
761 | 764 | ||
762 | get_online_cpus(); | 765 | get_online_cpus(); |
763 | mutex_lock(&smp_cpu_state_mutex); | 766 | mutex_lock(&smp_cpu_state_mutex); |
764 | rc = -EBUSY; | 767 | rc = -EBUSY; |
765 | if (cpu_online(cpu)) | 768 | if (cpu_online(cpu)) |
766 | goto out; | 769 | goto out; |
767 | rc = 0; | 770 | rc = 0; |
768 | switch (val) { | 771 | switch (val) { |
769 | case 0: | 772 | case 0: |
770 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { | 773 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { |
771 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | 774 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); |
772 | if (!rc) { | 775 | if (!rc) { |
773 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 776 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
774 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 777 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
775 | } | 778 | } |
776 | } | 779 | } |
777 | break; | 780 | break; |
778 | case 1: | 781 | case 1: |
779 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { | 782 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { |
780 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | 783 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); |
781 | if (!rc) { | 784 | if (!rc) { |
782 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 785 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
783 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 786 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
784 | } | 787 | } |
785 | } | 788 | } |
786 | break; | 789 | break; |
787 | default: | 790 | default: |
788 | break; | 791 | break; |
789 | } | 792 | } |
790 | out: | 793 | out: |
791 | mutex_unlock(&smp_cpu_state_mutex); | 794 | mutex_unlock(&smp_cpu_state_mutex); |
792 | put_online_cpus(); | 795 | put_online_cpus(); |
793 | return rc ? rc : count; | 796 | return rc ? rc : count; |
794 | } | 797 | } |
795 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | 798 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
796 | #endif /* CONFIG_HOTPLUG_CPU */ | 799 | #endif /* CONFIG_HOTPLUG_CPU */ |
797 | 800 | ||
798 | static ssize_t cpu_polarization_show(struct sys_device *dev, | 801 | static ssize_t cpu_polarization_show(struct sys_device *dev, |
799 | struct sysdev_attribute *attr, char *buf) | 802 | struct sysdev_attribute *attr, char *buf) |
800 | { | 803 | { |
801 | int cpu = dev->id; | 804 | int cpu = dev->id; |
802 | ssize_t count; | 805 | ssize_t count; |
803 | 806 | ||
804 | mutex_lock(&smp_cpu_state_mutex); | 807 | mutex_lock(&smp_cpu_state_mutex); |
805 | switch (smp_cpu_polarization[cpu]) { | 808 | switch (smp_cpu_polarization[cpu]) { |
806 | case POLARIZATION_HRZ: | 809 | case POLARIZATION_HRZ: |
807 | count = sprintf(buf, "horizontal\n"); | 810 | count = sprintf(buf, "horizontal\n"); |
808 | break; | 811 | break; |
809 | case POLARIZATION_VL: | 812 | case POLARIZATION_VL: |
810 | count = sprintf(buf, "vertical:low\n"); | 813 | count = sprintf(buf, "vertical:low\n"); |
811 | break; | 814 | break; |
812 | case POLARIZATION_VM: | 815 | case POLARIZATION_VM: |
813 | count = sprintf(buf, "vertical:medium\n"); | 816 | count = sprintf(buf, "vertical:medium\n"); |
814 | break; | 817 | break; |
815 | case POLARIZATION_VH: | 818 | case POLARIZATION_VH: |
816 | count = sprintf(buf, "vertical:high\n"); | 819 | count = sprintf(buf, "vertical:high\n"); |
817 | break; | 820 | break; |
818 | default: | 821 | default: |
819 | count = sprintf(buf, "unknown\n"); | 822 | count = sprintf(buf, "unknown\n"); |
820 | break; | 823 | break; |
821 | } | 824 | } |
822 | mutex_unlock(&smp_cpu_state_mutex); | 825 | mutex_unlock(&smp_cpu_state_mutex); |
823 | return count; | 826 | return count; |
824 | } | 827 | } |
825 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); | 828 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); |
826 | 829 | ||
827 | static ssize_t show_cpu_address(struct sys_device *dev, | 830 | static ssize_t show_cpu_address(struct sys_device *dev, |
828 | struct sysdev_attribute *attr, char *buf) | 831 | struct sysdev_attribute *attr, char *buf) |
829 | { | 832 | { |
830 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); | 833 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); |
831 | } | 834 | } |
832 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); | 835 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); |
833 | 836 | ||
834 | 837 | ||
835 | static struct attribute *cpu_common_attrs[] = { | 838 | static struct attribute *cpu_common_attrs[] = { |
836 | #ifdef CONFIG_HOTPLUG_CPU | 839 | #ifdef CONFIG_HOTPLUG_CPU |
837 | &attr_configure.attr, | 840 | &attr_configure.attr, |
838 | #endif | 841 | #endif |
839 | &attr_address.attr, | 842 | &attr_address.attr, |
840 | &attr_polarization.attr, | 843 | &attr_polarization.attr, |
841 | NULL, | 844 | NULL, |
842 | }; | 845 | }; |
843 | 846 | ||
844 | static struct attribute_group cpu_common_attr_group = { | 847 | static struct attribute_group cpu_common_attr_group = { |
845 | .attrs = cpu_common_attrs, | 848 | .attrs = cpu_common_attrs, |
846 | }; | 849 | }; |
847 | 850 | ||
848 | static ssize_t show_capability(struct sys_device *dev, | 851 | static ssize_t show_capability(struct sys_device *dev, |
849 | struct sysdev_attribute *attr, char *buf) | 852 | struct sysdev_attribute *attr, char *buf) |
850 | { | 853 | { |
851 | unsigned int capability; | 854 | unsigned int capability; |
852 | int rc; | 855 | int rc; |
853 | 856 | ||
854 | rc = get_cpu_capability(&capability); | 857 | rc = get_cpu_capability(&capability); |
855 | if (rc) | 858 | if (rc) |
856 | return rc; | 859 | return rc; |
857 | return sprintf(buf, "%u\n", capability); | 860 | return sprintf(buf, "%u\n", capability); |
858 | } | 861 | } |
859 | static SYSDEV_ATTR(capability, 0444, show_capability, NULL); | 862 | static SYSDEV_ATTR(capability, 0444, show_capability, NULL); |
860 | 863 | ||
861 | static ssize_t show_idle_count(struct sys_device *dev, | 864 | static ssize_t show_idle_count(struct sys_device *dev, |
862 | struct sysdev_attribute *attr, char *buf) | 865 | struct sysdev_attribute *attr, char *buf) |
863 | { | 866 | { |
864 | struct s390_idle_data *idle; | 867 | struct s390_idle_data *idle; |
865 | unsigned long long idle_count; | 868 | unsigned long long idle_count; |
866 | unsigned int sequence; | 869 | unsigned int sequence; |
867 | 870 | ||
868 | idle = &per_cpu(s390_idle, dev->id); | 871 | idle = &per_cpu(s390_idle, dev->id); |
869 | repeat: | 872 | repeat: |
870 | sequence = idle->sequence; | 873 | sequence = idle->sequence; |
871 | smp_rmb(); | 874 | smp_rmb(); |
872 | if (sequence & 1) | 875 | if (sequence & 1) |
873 | goto repeat; | 876 | goto repeat; |
874 | idle_count = idle->idle_count; | 877 | idle_count = idle->idle_count; |
875 | if (idle->idle_enter) | 878 | if (idle->idle_enter) |
876 | idle_count++; | 879 | idle_count++; |
877 | smp_rmb(); | 880 | smp_rmb(); |
878 | if (idle->sequence != sequence) | 881 | if (idle->sequence != sequence) |
879 | goto repeat; | 882 | goto repeat; |
880 | return sprintf(buf, "%llu\n", idle_count); | 883 | return sprintf(buf, "%llu\n", idle_count); |
881 | } | 884 | } |
882 | static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); | 885 | static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); |
883 | 886 | ||
884 | static ssize_t show_idle_time(struct sys_device *dev, | 887 | static ssize_t show_idle_time(struct sys_device *dev, |
885 | struct sysdev_attribute *attr, char *buf) | 888 | struct sysdev_attribute *attr, char *buf) |
886 | { | 889 | { |
887 | struct s390_idle_data *idle; | 890 | struct s390_idle_data *idle; |
888 | unsigned long long now, idle_time, idle_enter; | 891 | unsigned long long now, idle_time, idle_enter; |
889 | unsigned int sequence; | 892 | unsigned int sequence; |
890 | 893 | ||
891 | idle = &per_cpu(s390_idle, dev->id); | 894 | idle = &per_cpu(s390_idle, dev->id); |
892 | now = get_clock(); | 895 | now = get_clock(); |
893 | repeat: | 896 | repeat: |
894 | sequence = idle->sequence; | 897 | sequence = idle->sequence; |
895 | smp_rmb(); | 898 | smp_rmb(); |
896 | if (sequence & 1) | 899 | if (sequence & 1) |
897 | goto repeat; | 900 | goto repeat; |
898 | idle_time = idle->idle_time; | 901 | idle_time = idle->idle_time; |
899 | idle_enter = idle->idle_enter; | 902 | idle_enter = idle->idle_enter; |
900 | if (idle_enter != 0ULL && idle_enter < now) | 903 | if (idle_enter != 0ULL && idle_enter < now) |
901 | idle_time += now - idle_enter; | 904 | idle_time += now - idle_enter; |
902 | smp_rmb(); | 905 | smp_rmb(); |
903 | if (idle->sequence != sequence) | 906 | if (idle->sequence != sequence) |
904 | goto repeat; | 907 | goto repeat; |
905 | return sprintf(buf, "%llu\n", idle_time >> 12); | 908 | return sprintf(buf, "%llu\n", idle_time >> 12); |
906 | } | 909 | } |
907 | static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); | 910 | static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); |
908 | 911 | ||
909 | static struct attribute *cpu_online_attrs[] = { | 912 | static struct attribute *cpu_online_attrs[] = { |
910 | &attr_capability.attr, | 913 | &attr_capability.attr, |
911 | &attr_idle_count.attr, | 914 | &attr_idle_count.attr, |
912 | &attr_idle_time_us.attr, | 915 | &attr_idle_time_us.attr, |
913 | NULL, | 916 | NULL, |
914 | }; | 917 | }; |
915 | 918 | ||
916 | static struct attribute_group cpu_online_attr_group = { | 919 | static struct attribute_group cpu_online_attr_group = { |
917 | .attrs = cpu_online_attrs, | 920 | .attrs = cpu_online_attrs, |
918 | }; | 921 | }; |
919 | 922 | ||
920 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, | 923 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, |
921 | unsigned long action, void *hcpu) | 924 | unsigned long action, void *hcpu) |
922 | { | 925 | { |
923 | unsigned int cpu = (unsigned int)(long)hcpu; | 926 | unsigned int cpu = (unsigned int)(long)hcpu; |
924 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 927 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
925 | struct sys_device *s = &c->sysdev; | 928 | struct sys_device *s = &c->sysdev; |
926 | struct s390_idle_data *idle; | 929 | struct s390_idle_data *idle; |
927 | 930 | ||
928 | switch (action) { | 931 | switch (action) { |
929 | case CPU_ONLINE: | 932 | case CPU_ONLINE: |
930 | case CPU_ONLINE_FROZEN: | 933 | case CPU_ONLINE_FROZEN: |
931 | idle = &per_cpu(s390_idle, cpu); | 934 | idle = &per_cpu(s390_idle, cpu); |
932 | memset(idle, 0, sizeof(struct s390_idle_data)); | 935 | memset(idle, 0, sizeof(struct s390_idle_data)); |
933 | if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) | 936 | if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) |
934 | return NOTIFY_BAD; | 937 | return NOTIFY_BAD; |
935 | break; | 938 | break; |
936 | case CPU_DEAD: | 939 | case CPU_DEAD: |
937 | case CPU_DEAD_FROZEN: | 940 | case CPU_DEAD_FROZEN: |
938 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); | 941 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); |
939 | break; | 942 | break; |
940 | } | 943 | } |
941 | return NOTIFY_OK; | 944 | return NOTIFY_OK; |
942 | } | 945 | } |
943 | 946 | ||
944 | static struct notifier_block __cpuinitdata smp_cpu_nb = { | 947 | static struct notifier_block __cpuinitdata smp_cpu_nb = { |
945 | .notifier_call = smp_cpu_notify, | 948 | .notifier_call = smp_cpu_notify, |
946 | }; | 949 | }; |
947 | 950 | ||
948 | static int __devinit smp_add_present_cpu(int cpu) | 951 | static int __devinit smp_add_present_cpu(int cpu) |
949 | { | 952 | { |
950 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 953 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
951 | struct sys_device *s = &c->sysdev; | 954 | struct sys_device *s = &c->sysdev; |
952 | int rc; | 955 | int rc; |
953 | 956 | ||
954 | c->hotpluggable = 1; | 957 | c->hotpluggable = 1; |
955 | rc = register_cpu(c, cpu); | 958 | rc = register_cpu(c, cpu); |
956 | if (rc) | 959 | if (rc) |
957 | goto out; | 960 | goto out; |
958 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); | 961 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); |
959 | if (rc) | 962 | if (rc) |
960 | goto out_cpu; | 963 | goto out_cpu; |
961 | if (!cpu_online(cpu)) | 964 | if (!cpu_online(cpu)) |
962 | goto out; | 965 | goto out; |
963 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | 966 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
964 | if (!rc) | 967 | if (!rc) |
965 | return 0; | 968 | return 0; |
966 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); | 969 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); |
967 | out_cpu: | 970 | out_cpu: |
968 | #ifdef CONFIG_HOTPLUG_CPU | 971 | #ifdef CONFIG_HOTPLUG_CPU |
969 | unregister_cpu(c); | 972 | unregister_cpu(c); |
970 | #endif | 973 | #endif |
971 | out: | 974 | out: |
972 | return rc; | 975 | return rc; |
973 | } | 976 | } |
974 | 977 | ||
975 | #ifdef CONFIG_HOTPLUG_CPU | 978 | #ifdef CONFIG_HOTPLUG_CPU |
976 | 979 | ||
977 | int __ref smp_rescan_cpus(void) | 980 | int __ref smp_rescan_cpus(void) |
978 | { | 981 | { |
979 | cpumask_t newcpus; | 982 | cpumask_t newcpus; |
980 | int cpu; | 983 | int cpu; |
981 | int rc; | 984 | int rc; |
982 | 985 | ||
983 | get_online_cpus(); | 986 | get_online_cpus(); |
984 | mutex_lock(&smp_cpu_state_mutex); | 987 | mutex_lock(&smp_cpu_state_mutex); |
985 | newcpus = cpu_present_map; | 988 | newcpus = cpu_present_map; |
986 | rc = __smp_rescan_cpus(); | 989 | rc = __smp_rescan_cpus(); |
987 | if (rc) | 990 | if (rc) |
988 | goto out; | 991 | goto out; |
989 | cpus_andnot(newcpus, cpu_present_map, newcpus); | 992 | cpus_andnot(newcpus, cpu_present_map, newcpus); |
990 | for_each_cpu_mask(cpu, newcpus) { | 993 | for_each_cpu_mask(cpu, newcpus) { |
991 | rc = smp_add_present_cpu(cpu); | 994 | rc = smp_add_present_cpu(cpu); |
992 | if (rc) | 995 | if (rc) |
993 | cpu_clear(cpu, cpu_present_map); | 996 | cpu_clear(cpu, cpu_present_map); |
994 | } | 997 | } |
995 | rc = 0; | 998 | rc = 0; |
996 | out: | 999 | out: |
997 | mutex_unlock(&smp_cpu_state_mutex); | 1000 | mutex_unlock(&smp_cpu_state_mutex); |
998 | put_online_cpus(); | 1001 | put_online_cpus(); |
999 | if (!cpus_empty(newcpus)) | 1002 | if (!cpus_empty(newcpus)) |
1000 | topology_schedule_update(); | 1003 | topology_schedule_update(); |
1001 | return rc; | 1004 | return rc; |
1002 | } | 1005 | } |
1003 | 1006 | ||
1004 | static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, | 1007 | static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, |
1005 | size_t count) | 1008 | size_t count) |
1006 | { | 1009 | { |
1007 | int rc; | 1010 | int rc; |
1008 | 1011 | ||
1009 | rc = smp_rescan_cpus(); | 1012 | rc = smp_rescan_cpus(); |
1010 | return rc ? rc : count; | 1013 | return rc ? rc : count; |
1011 | } | 1014 | } |
1012 | static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); | 1015 | static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); |
1013 | #endif /* CONFIG_HOTPLUG_CPU */ | 1016 | #endif /* CONFIG_HOTPLUG_CPU */ |
1014 | 1017 | ||
1015 | static ssize_t dispatching_show(struct sysdev_class *class, char *buf) | 1018 | static ssize_t dispatching_show(struct sysdev_class *class, char *buf) |
1016 | { | 1019 | { |
1017 | ssize_t count; | 1020 | ssize_t count; |
1018 | 1021 | ||
1019 | mutex_lock(&smp_cpu_state_mutex); | 1022 | mutex_lock(&smp_cpu_state_mutex); |
1020 | count = sprintf(buf, "%d\n", cpu_management); | 1023 | count = sprintf(buf, "%d\n", cpu_management); |
1021 | mutex_unlock(&smp_cpu_state_mutex); | 1024 | mutex_unlock(&smp_cpu_state_mutex); |
1022 | return count; | 1025 | return count; |
1023 | } | 1026 | } |
1024 | 1027 | ||
1025 | static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, | 1028 | static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, |
1026 | size_t count) | 1029 | size_t count) |
1027 | { | 1030 | { |
1028 | int val, rc; | 1031 | int val, rc; |
1029 | char delim; | 1032 | char delim; |
1030 | 1033 | ||
1031 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 1034 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
1032 | return -EINVAL; | 1035 | return -EINVAL; |
1033 | if (val != 0 && val != 1) | 1036 | if (val != 0 && val != 1) |
1034 | return -EINVAL; | 1037 | return -EINVAL; |
1035 | rc = 0; | 1038 | rc = 0; |
1036 | get_online_cpus(); | 1039 | get_online_cpus(); |
1037 | mutex_lock(&smp_cpu_state_mutex); | 1040 | mutex_lock(&smp_cpu_state_mutex); |
1038 | if (cpu_management == val) | 1041 | if (cpu_management == val) |
1039 | goto out; | 1042 | goto out; |
1040 | rc = topology_set_cpu_management(val); | 1043 | rc = topology_set_cpu_management(val); |
1041 | if (!rc) | 1044 | if (!rc) |
1042 | cpu_management = val; | 1045 | cpu_management = val; |
1043 | out: | 1046 | out: |
1044 | mutex_unlock(&smp_cpu_state_mutex); | 1047 | mutex_unlock(&smp_cpu_state_mutex); |
1045 | put_online_cpus(); | 1048 | put_online_cpus(); |
1046 | return rc ? rc : count; | 1049 | return rc ? rc : count; |
1047 | } | 1050 | } |
1048 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, | 1051 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, |
1049 | dispatching_store); | 1052 | dispatching_store); |
1050 | 1053 | ||
1051 | static int __init topology_init(void) | 1054 | static int __init topology_init(void) |
1052 | { | 1055 | { |
1053 | int cpu; | 1056 | int cpu; |
1054 | int rc; | 1057 | int rc; |
1055 | 1058 | ||
1056 | register_cpu_notifier(&smp_cpu_nb); | 1059 | register_cpu_notifier(&smp_cpu_nb); |
1057 | 1060 | ||
1058 | #ifdef CONFIG_HOTPLUG_CPU | 1061 | #ifdef CONFIG_HOTPLUG_CPU |
1059 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); | 1062 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); |
1060 | if (rc) | 1063 | if (rc) |
1061 | return rc; | 1064 | return rc; |
1062 | #endif | 1065 | #endif |
1063 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); | 1066 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); |
1064 | if (rc) | 1067 | if (rc) |
1065 | return rc; | 1068 | return rc; |
1066 | for_each_present_cpu(cpu) { | 1069 | for_each_present_cpu(cpu) { |
1067 | rc = smp_add_present_cpu(cpu); | 1070 | rc = smp_add_present_cpu(cpu); |
1068 | if (rc) | 1071 | if (rc) |
1069 | return rc; | 1072 | return rc; |
1070 | } | 1073 | } |
1071 | return 0; | 1074 | return 0; |
1072 | } | 1075 | } |
1073 | subsys_initcall(topology_init); | 1076 | subsys_initcall(topology_init); |
1074 | 1077 |
arch/s390/kernel/topology.c
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007 | 2 | * Copyright IBM Corp. 2007 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define KMSG_COMPONENT "cpu" | 6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | 18 | #include <linux/cpuset.h> |
19 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
20 | #include <asm/s390_ext.h> | 20 | #include <asm/s390_ext.h> |
21 | #include <asm/sysinfo.h> | 21 | #include <asm/sysinfo.h> |
22 | 22 | ||
23 | #define CPU_BITS 64 | 23 | #define CPU_BITS 64 |
24 | #define NR_MAG 6 | 24 | #define NR_MAG 6 |
25 | 25 | ||
26 | #define PTF_HORIZONTAL (0UL) | 26 | #define PTF_HORIZONTAL (0UL) |
27 | #define PTF_VERTICAL (1UL) | 27 | #define PTF_VERTICAL (1UL) |
28 | #define PTF_CHECK (2UL) | 28 | #define PTF_CHECK (2UL) |
29 | 29 | ||
30 | struct tl_cpu { | 30 | struct tl_cpu { |
31 | unsigned char reserved0[4]; | 31 | unsigned char reserved0[4]; |
32 | unsigned char :6; | 32 | unsigned char :6; |
33 | unsigned char pp:2; | 33 | unsigned char pp:2; |
34 | unsigned char reserved1; | 34 | unsigned char reserved1; |
35 | unsigned short origin; | 35 | unsigned short origin; |
36 | unsigned long mask[CPU_BITS / BITS_PER_LONG]; | 36 | unsigned long mask[CPU_BITS / BITS_PER_LONG]; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct tl_container { | 39 | struct tl_container { |
40 | unsigned char reserved[8]; | 40 | unsigned char reserved[8]; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | union tl_entry { | 43 | union tl_entry { |
44 | unsigned char nl; | 44 | unsigned char nl; |
45 | struct tl_cpu cpu; | 45 | struct tl_cpu cpu; |
46 | struct tl_container container; | 46 | struct tl_container container; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct tl_info { | 49 | struct tl_info { |
50 | unsigned char reserved0[2]; | 50 | unsigned char reserved0[2]; |
51 | unsigned short length; | 51 | unsigned short length; |
52 | unsigned char mag[NR_MAG]; | 52 | unsigned char mag[NR_MAG]; |
53 | unsigned char reserved1; | 53 | unsigned char reserved1; |
54 | unsigned char mnest; | 54 | unsigned char mnest; |
55 | unsigned char reserved2[4]; | 55 | unsigned char reserved2[4]; |
56 | union tl_entry tle[0]; | 56 | union tl_entry tle[0]; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct core_info { | 59 | struct core_info { |
60 | struct core_info *next; | 60 | struct core_info *next; |
61 | cpumask_t mask; | 61 | cpumask_t mask; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static int topology_enabled; | 64 | static int topology_enabled; |
65 | static void topology_work_fn(struct work_struct *work); | 65 | static void topology_work_fn(struct work_struct *work); |
66 | static struct tl_info *tl_info; | 66 | static struct tl_info *tl_info; |
67 | static struct core_info core_info; | 67 | static struct core_info core_info; |
68 | static int machine_has_topology; | 68 | static int machine_has_topology; |
69 | static struct timer_list topology_timer; | 69 | static struct timer_list topology_timer; |
70 | static void set_topology_timer(void); | 70 | static void set_topology_timer(void); |
71 | static DECLARE_WORK(topology_work, topology_work_fn); | 71 | static DECLARE_WORK(topology_work, topology_work_fn); |
72 | /* topology_lock protects the core linked list */ | 72 | /* topology_lock protects the core linked list */ |
73 | static DEFINE_SPINLOCK(topology_lock); | 73 | static DEFINE_SPINLOCK(topology_lock); |
74 | 74 | ||
75 | cpumask_t cpu_core_map[NR_CPUS]; | 75 | cpumask_t cpu_core_map[NR_CPUS]; |
76 | 76 | ||
77 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 77 | static cpumask_t cpu_coregroup_map(unsigned int cpu) |
78 | { | 78 | { |
79 | struct core_info *core = &core_info; | 79 | struct core_info *core = &core_info; |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | cpumask_t mask; | 81 | cpumask_t mask; |
82 | 82 | ||
83 | cpus_clear(mask); | 83 | cpus_clear(mask); |
84 | if (!topology_enabled || !machine_has_topology) | 84 | if (!topology_enabled || !machine_has_topology) |
85 | return cpu_possible_map; | 85 | return cpu_possible_map; |
86 | spin_lock_irqsave(&topology_lock, flags); | 86 | spin_lock_irqsave(&topology_lock, flags); |
87 | while (core) { | 87 | while (core) { |
88 | if (cpu_isset(cpu, core->mask)) { | 88 | if (cpu_isset(cpu, core->mask)) { |
89 | mask = core->mask; | 89 | mask = core->mask; |
90 | break; | 90 | break; |
91 | } | 91 | } |
92 | core = core->next; | 92 | core = core->next; |
93 | } | 93 | } |
94 | spin_unlock_irqrestore(&topology_lock, flags); | 94 | spin_unlock_irqrestore(&topology_lock, flags); |
95 | if (cpus_empty(mask)) | 95 | if (cpus_empty(mask)) |
96 | mask = cpumask_of_cpu(cpu); | 96 | mask = cpumask_of_cpu(cpu); |
97 | return mask; | 97 | return mask; |
98 | } | 98 | } |
99 | 99 | ||
100 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 100 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) |
101 | { | 101 | { |
102 | return &cpu_core_map[cpu]; | 102 | return &cpu_core_map[cpu]; |
103 | } | 103 | } |
104 | 104 | ||
105 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | 105 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) |
106 | { | 106 | { |
107 | unsigned int cpu; | 107 | unsigned int cpu; |
108 | 108 | ||
109 | for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); | 109 | for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); |
110 | cpu < CPU_BITS; | 110 | cpu < CPU_BITS; |
111 | cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) | 111 | cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) |
112 | { | 112 | { |
113 | unsigned int rcpu, lcpu; | 113 | unsigned int rcpu, lcpu; |
114 | 114 | ||
115 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | 115 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; |
116 | for_each_present_cpu(lcpu) { | 116 | for_each_present_cpu(lcpu) { |
117 | if (__cpu_logical_map[lcpu] == rcpu) { | 117 | if (cpu_logical_map(lcpu) == rcpu) { |
118 | cpu_set(lcpu, core->mask); | 118 | cpu_set(lcpu, core->mask); |
119 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 119 | smp_cpu_polarization[lcpu] = tl_cpu->pp; |
120 | } | 120 | } |
121 | } | 121 | } |
122 | } | 122 | } |
123 | } | 123 | } |
124 | 124 | ||
125 | static void clear_cores(void) | 125 | static void clear_cores(void) |
126 | { | 126 | { |
127 | struct core_info *core = &core_info; | 127 | struct core_info *core = &core_info; |
128 | 128 | ||
129 | while (core) { | 129 | while (core) { |
130 | cpus_clear(core->mask); | 130 | cpus_clear(core->mask); |
131 | core = core->next; | 131 | core = core->next; |
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | static union tl_entry *next_tle(union tl_entry *tle) | 135 | static union tl_entry *next_tle(union tl_entry *tle) |
136 | { | 136 | { |
137 | if (tle->nl) | 137 | if (tle->nl) |
138 | return (union tl_entry *)((struct tl_container *)tle + 1); | 138 | return (union tl_entry *)((struct tl_container *)tle + 1); |
139 | else | 139 | else |
140 | return (union tl_entry *)((struct tl_cpu *)tle + 1); | 140 | return (union tl_entry *)((struct tl_cpu *)tle + 1); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void tl_to_cores(struct tl_info *info) | 143 | static void tl_to_cores(struct tl_info *info) |
144 | { | 144 | { |
145 | union tl_entry *tle, *end; | 145 | union tl_entry *tle, *end; |
146 | struct core_info *core = &core_info; | 146 | struct core_info *core = &core_info; |
147 | 147 | ||
148 | spin_lock_irq(&topology_lock); | 148 | spin_lock_irq(&topology_lock); |
149 | clear_cores(); | 149 | clear_cores(); |
150 | tle = info->tle; | 150 | tle = info->tle; |
151 | end = (union tl_entry *)((unsigned long)info + info->length); | 151 | end = (union tl_entry *)((unsigned long)info + info->length); |
152 | while (tle < end) { | 152 | while (tle < end) { |
153 | switch (tle->nl) { | 153 | switch (tle->nl) { |
154 | case 5: | 154 | case 5: |
155 | case 4: | 155 | case 4: |
156 | case 3: | 156 | case 3: |
157 | case 2: | 157 | case 2: |
158 | break; | 158 | break; |
159 | case 1: | 159 | case 1: |
160 | core = core->next; | 160 | core = core->next; |
161 | break; | 161 | break; |
162 | case 0: | 162 | case 0: |
163 | add_cpus_to_core(&tle->cpu, core); | 163 | add_cpus_to_core(&tle->cpu, core); |
164 | break; | 164 | break; |
165 | default: | 165 | default: |
166 | clear_cores(); | 166 | clear_cores(); |
167 | machine_has_topology = 0; | 167 | machine_has_topology = 0; |
168 | return; | 168 | return; |
169 | } | 169 | } |
170 | tle = next_tle(tle); | 170 | tle = next_tle(tle); |
171 | } | 171 | } |
172 | spin_unlock_irq(&topology_lock); | 172 | spin_unlock_irq(&topology_lock); |
173 | } | 173 | } |
174 | 174 | ||
175 | static void topology_update_polarization_simple(void) | 175 | static void topology_update_polarization_simple(void) |
176 | { | 176 | { |
177 | int cpu; | 177 | int cpu; |
178 | 178 | ||
179 | mutex_lock(&smp_cpu_state_mutex); | 179 | mutex_lock(&smp_cpu_state_mutex); |
180 | for_each_possible_cpu(cpu) | 180 | for_each_possible_cpu(cpu) |
181 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | 181 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; |
182 | mutex_unlock(&smp_cpu_state_mutex); | 182 | mutex_unlock(&smp_cpu_state_mutex); |
183 | } | 183 | } |
184 | 184 | ||
185 | static int ptf(unsigned long fc) | 185 | static int ptf(unsigned long fc) |
186 | { | 186 | { |
187 | int rc; | 187 | int rc; |
188 | 188 | ||
189 | asm volatile( | 189 | asm volatile( |
190 | " .insn rre,0xb9a20000,%1,%1\n" | 190 | " .insn rre,0xb9a20000,%1,%1\n" |
191 | " ipm %0\n" | 191 | " ipm %0\n" |
192 | " srl %0,28\n" | 192 | " srl %0,28\n" |
193 | : "=d" (rc) | 193 | : "=d" (rc) |
194 | : "d" (fc) : "cc"); | 194 | : "d" (fc) : "cc"); |
195 | return rc; | 195 | return rc; |
196 | } | 196 | } |
197 | 197 | ||
198 | int topology_set_cpu_management(int fc) | 198 | int topology_set_cpu_management(int fc) |
199 | { | 199 | { |
200 | int cpu; | 200 | int cpu; |
201 | int rc; | 201 | int rc; |
202 | 202 | ||
203 | if (!machine_has_topology) | 203 | if (!machine_has_topology) |
204 | return -EOPNOTSUPP; | 204 | return -EOPNOTSUPP; |
205 | if (fc) | 205 | if (fc) |
206 | rc = ptf(PTF_VERTICAL); | 206 | rc = ptf(PTF_VERTICAL); |
207 | else | 207 | else |
208 | rc = ptf(PTF_HORIZONTAL); | 208 | rc = ptf(PTF_HORIZONTAL); |
209 | if (rc) | 209 | if (rc) |
210 | return -EBUSY; | 210 | return -EBUSY; |
211 | for_each_possible_cpu(cpu) | 211 | for_each_possible_cpu(cpu) |
212 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 212 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
213 | return rc; | 213 | return rc; |
214 | } | 214 | } |
215 | 215 | ||
216 | static void update_cpu_core_map(void) | 216 | static void update_cpu_core_map(void) |
217 | { | 217 | { |
218 | int cpu; | 218 | int cpu; |
219 | 219 | ||
220 | for_each_possible_cpu(cpu) | 220 | for_each_possible_cpu(cpu) |
221 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 221 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); |
222 | } | 222 | } |
223 | 223 | ||
224 | int arch_update_cpu_topology(void) | 224 | int arch_update_cpu_topology(void) |
225 | { | 225 | { |
226 | struct tl_info *info = tl_info; | 226 | struct tl_info *info = tl_info; |
227 | struct sys_device *sysdev; | 227 | struct sys_device *sysdev; |
228 | int cpu; | 228 | int cpu; |
229 | 229 | ||
230 | if (!machine_has_topology) { | 230 | if (!machine_has_topology) { |
231 | update_cpu_core_map(); | 231 | update_cpu_core_map(); |
232 | topology_update_polarization_simple(); | 232 | topology_update_polarization_simple(); |
233 | return 0; | 233 | return 0; |
234 | } | 234 | } |
235 | stsi(info, 15, 1, 2); | 235 | stsi(info, 15, 1, 2); |
236 | tl_to_cores(info); | 236 | tl_to_cores(info); |
237 | update_cpu_core_map(); | 237 | update_cpu_core_map(); |
238 | for_each_online_cpu(cpu) { | 238 | for_each_online_cpu(cpu) { |
239 | sysdev = get_cpu_sysdev(cpu); | 239 | sysdev = get_cpu_sysdev(cpu); |
240 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | 240 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); |
241 | } | 241 | } |
242 | return 1; | 242 | return 1; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void topology_work_fn(struct work_struct *work) | 245 | static void topology_work_fn(struct work_struct *work) |
246 | { | 246 | { |
247 | rebuild_sched_domains(); | 247 | rebuild_sched_domains(); |
248 | } | 248 | } |
249 | 249 | ||
250 | void topology_schedule_update(void) | 250 | void topology_schedule_update(void) |
251 | { | 251 | { |
252 | schedule_work(&topology_work); | 252 | schedule_work(&topology_work); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void topology_timer_fn(unsigned long ignored) | 255 | static void topology_timer_fn(unsigned long ignored) |
256 | { | 256 | { |
257 | if (ptf(PTF_CHECK)) | 257 | if (ptf(PTF_CHECK)) |
258 | topology_schedule_update(); | 258 | topology_schedule_update(); |
259 | set_topology_timer(); | 259 | set_topology_timer(); |
260 | } | 260 | } |
261 | 261 | ||
262 | static void set_topology_timer(void) | 262 | static void set_topology_timer(void) |
263 | { | 263 | { |
264 | topology_timer.function = topology_timer_fn; | 264 | topology_timer.function = topology_timer_fn; |
265 | topology_timer.data = 0; | 265 | topology_timer.data = 0; |
266 | topology_timer.expires = jiffies + 60 * HZ; | 266 | topology_timer.expires = jiffies + 60 * HZ; |
267 | add_timer(&topology_timer); | 267 | add_timer(&topology_timer); |
268 | } | 268 | } |
269 | 269 | ||
270 | static int __init early_parse_topology(char *p) | 270 | static int __init early_parse_topology(char *p) |
271 | { | 271 | { |
272 | if (strncmp(p, "on", 2)) | 272 | if (strncmp(p, "on", 2)) |
273 | return 0; | 273 | return 0; |
274 | topology_enabled = 1; | 274 | topology_enabled = 1; |
275 | return 0; | 275 | return 0; |
276 | } | 276 | } |
277 | early_param("topology", early_parse_topology); | 277 | early_param("topology", early_parse_topology); |
278 | 278 | ||
279 | static int __init init_topology_update(void) | 279 | static int __init init_topology_update(void) |
280 | { | 280 | { |
281 | int rc; | 281 | int rc; |
282 | 282 | ||
283 | rc = 0; | 283 | rc = 0; |
284 | if (!machine_has_topology) { | 284 | if (!machine_has_topology) { |
285 | topology_update_polarization_simple(); | 285 | topology_update_polarization_simple(); |
286 | goto out; | 286 | goto out; |
287 | } | 287 | } |
288 | init_timer_deferrable(&topology_timer); | 288 | init_timer_deferrable(&topology_timer); |
289 | set_topology_timer(); | 289 | set_topology_timer(); |
290 | out: | 290 | out: |
291 | update_cpu_core_map(); | 291 | update_cpu_core_map(); |
292 | return rc; | 292 | return rc; |
293 | } | 293 | } |
294 | __initcall(init_topology_update); | 294 | __initcall(init_topology_update); |
295 | 295 | ||
296 | void __init s390_init_cpu_topology(void) | 296 | void __init s390_init_cpu_topology(void) |
297 | { | 297 | { |
298 | unsigned long long facility_bits; | 298 | unsigned long long facility_bits; |
299 | struct tl_info *info; | 299 | struct tl_info *info; |
300 | struct core_info *core; | 300 | struct core_info *core; |
301 | int nr_cores; | 301 | int nr_cores; |
302 | int i; | 302 | int i; |
303 | 303 | ||
304 | if (stfle(&facility_bits, 1) <= 0) | 304 | if (stfle(&facility_bits, 1) <= 0) |
305 | return; | 305 | return; |
306 | if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) | 306 | if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) |
307 | return; | 307 | return; |
308 | machine_has_topology = 1; | 308 | machine_has_topology = 1; |
309 | 309 | ||
310 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 310 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
311 | info = tl_info; | 311 | info = tl_info; |
312 | stsi(info, 15, 1, 2); | 312 | stsi(info, 15, 1, 2); |
313 | 313 | ||
314 | nr_cores = info->mag[NR_MAG - 2]; | 314 | nr_cores = info->mag[NR_MAG - 2]; |
315 | for (i = 0; i < info->mnest - 2; i++) | 315 | for (i = 0; i < info->mnest - 2; i++) |
316 | nr_cores *= info->mag[NR_MAG - 3 - i]; | 316 | nr_cores *= info->mag[NR_MAG - 3 - i]; |
317 | 317 | ||
318 | pr_info("The CPU configuration topology of the machine is:"); | 318 | pr_info("The CPU configuration topology of the machine is:"); |
319 | for (i = 0; i < NR_MAG; i++) | 319 | for (i = 0; i < NR_MAG; i++) |
320 | printk(" %d", info->mag[i]); | 320 | printk(" %d", info->mag[i]); |
321 | printk(" / %d\n", info->mnest); | 321 | printk(" / %d\n", info->mnest); |
322 | 322 | ||
323 | core = &core_info; | 323 | core = &core_info; |
324 | for (i = 0; i < nr_cores; i++) { | 324 | for (i = 0; i < nr_cores; i++) { |
325 | core->next = alloc_bootmem(sizeof(struct core_info)); | 325 | core->next = alloc_bootmem(sizeof(struct core_info)); |
326 | core = core->next; | 326 | core = core->next; |
327 | if (!core) | 327 | if (!core) |
328 | goto error; | 328 | goto error; |
329 | } | 329 | } |
330 | return; | 330 | return; |
331 | error: | 331 | error: |
332 | machine_has_topology = 0; | 332 | machine_has_topology = 0; |
333 | } | 333 | } |
334 | 334 |
arch/s390/lib/spinlock.c
1 | /* | 1 | /* |
2 | * arch/s390/lib/spinlock.c | 2 | * arch/s390/lib/spinlock.c |
3 | * Out of line spinlock code. | 3 | * Out of line spinlock code. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2004, 2006 | 5 | * Copyright (C) IBM Corp. 2004, 2006 |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | 14 | ||
15 | int spin_retry = 1000; | 15 | int spin_retry = 1000; |
16 | 16 | ||
17 | /** | 17 | /** |
18 | * spin_retry= parameter | 18 | * spin_retry= parameter |
19 | */ | 19 | */ |
20 | static int __init spin_retry_setup(char *str) | 20 | static int __init spin_retry_setup(char *str) |
21 | { | 21 | { |
22 | spin_retry = simple_strtoul(str, &str, 0); | 22 | spin_retry = simple_strtoul(str, &str, 0); |
23 | return 1; | 23 | return 1; |
24 | } | 24 | } |
25 | __setup("spin_retry=", spin_retry_setup); | 25 | __setup("spin_retry=", spin_retry_setup); |
26 | 26 | ||
27 | static inline void _raw_yield(void) | 27 | static inline void _raw_yield(void) |
28 | { | 28 | { |
29 | if (MACHINE_HAS_DIAG44) | 29 | if (MACHINE_HAS_DIAG44) |
30 | asm volatile("diag 0,0,0x44"); | 30 | asm volatile("diag 0,0,0x44"); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void _raw_yield_cpu(int cpu) | 33 | static inline void _raw_yield_cpu(int cpu) |
34 | { | 34 | { |
35 | if (MACHINE_HAS_DIAG9C) | 35 | if (MACHINE_HAS_DIAG9C) |
36 | asm volatile("diag %0,0,0x9c" | 36 | asm volatile("diag %0,0,0x9c" |
37 | : : "d" (__cpu_logical_map[cpu])); | 37 | : : "d" (cpu_logical_map(cpu))); |
38 | else | 38 | else |
39 | _raw_yield(); | 39 | _raw_yield(); |
40 | } | 40 | } |
41 | 41 | ||
42 | void arch_spin_lock_wait(arch_spinlock_t *lp) | 42 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
43 | { | 43 | { |
44 | int count = spin_retry; | 44 | int count = spin_retry; |
45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
46 | 46 | ||
47 | while (1) { | 47 | while (1) { |
48 | if (count-- <= 0) { | 48 | if (count-- <= 0) { |
49 | unsigned int owner = lp->owner_cpu; | 49 | unsigned int owner = lp->owner_cpu; |
50 | if (owner != 0) | 50 | if (owner != 0) |
51 | _raw_yield_cpu(~owner); | 51 | _raw_yield_cpu(~owner); |
52 | count = spin_retry; | 52 | count = spin_retry; |
53 | } | 53 | } |
54 | if (arch_spin_is_locked(lp)) | 54 | if (arch_spin_is_locked(lp)) |
55 | continue; | 55 | continue; |
56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
57 | return; | 57 | return; |
58 | } | 58 | } |
59 | } | 59 | } |
60 | EXPORT_SYMBOL(arch_spin_lock_wait); | 60 | EXPORT_SYMBOL(arch_spin_lock_wait); |
61 | 61 | ||
62 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | 62 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
63 | { | 63 | { |
64 | int count = spin_retry; | 64 | int count = spin_retry; |
65 | unsigned int cpu = ~smp_processor_id(); | 65 | unsigned int cpu = ~smp_processor_id(); |
66 | 66 | ||
67 | local_irq_restore(flags); | 67 | local_irq_restore(flags); |
68 | while (1) { | 68 | while (1) { |
69 | if (count-- <= 0) { | 69 | if (count-- <= 0) { |
70 | unsigned int owner = lp->owner_cpu; | 70 | unsigned int owner = lp->owner_cpu; |
71 | if (owner != 0) | 71 | if (owner != 0) |
72 | _raw_yield_cpu(~owner); | 72 | _raw_yield_cpu(~owner); |
73 | count = spin_retry; | 73 | count = spin_retry; |
74 | } | 74 | } |
75 | if (arch_spin_is_locked(lp)) | 75 | if (arch_spin_is_locked(lp)) |
76 | continue; | 76 | continue; |
77 | local_irq_disable(); | 77 | local_irq_disable(); |
78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
79 | return; | 79 | return; |
80 | local_irq_restore(flags); | 80 | local_irq_restore(flags); |
81 | } | 81 | } |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); | 83 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); |
84 | 84 | ||
85 | int arch_spin_trylock_retry(arch_spinlock_t *lp) | 85 | int arch_spin_trylock_retry(arch_spinlock_t *lp) |
86 | { | 86 | { |
87 | unsigned int cpu = ~smp_processor_id(); | 87 | unsigned int cpu = ~smp_processor_id(); |
88 | int count; | 88 | int count; |
89 | 89 | ||
90 | for (count = spin_retry; count > 0; count--) { | 90 | for (count = spin_retry; count > 0; count--) { |
91 | if (arch_spin_is_locked(lp)) | 91 | if (arch_spin_is_locked(lp)) |
92 | continue; | 92 | continue; |
93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(arch_spin_trylock_retry); | 98 | EXPORT_SYMBOL(arch_spin_trylock_retry); |
99 | 99 | ||
100 | void arch_spin_relax(arch_spinlock_t *lock) | 100 | void arch_spin_relax(arch_spinlock_t *lock) |
101 | { | 101 | { |
102 | unsigned int cpu = lock->owner_cpu; | 102 | unsigned int cpu = lock->owner_cpu; |
103 | if (cpu != 0) | 103 | if (cpu != 0) |
104 | _raw_yield_cpu(~cpu); | 104 | _raw_yield_cpu(~cpu); |
105 | } | 105 | } |
106 | EXPORT_SYMBOL(arch_spin_relax); | 106 | EXPORT_SYMBOL(arch_spin_relax); |
107 | 107 | ||
108 | void _raw_read_lock_wait(arch_rwlock_t *rw) | 108 | void _raw_read_lock_wait(arch_rwlock_t *rw) |
109 | { | 109 | { |
110 | unsigned int old; | 110 | unsigned int old; |
111 | int count = spin_retry; | 111 | int count = spin_retry; |
112 | 112 | ||
113 | while (1) { | 113 | while (1) { |
114 | if (count-- <= 0) { | 114 | if (count-- <= 0) { |
115 | _raw_yield(); | 115 | _raw_yield(); |
116 | count = spin_retry; | 116 | count = spin_retry; |
117 | } | 117 | } |
118 | if (!arch_read_can_lock(rw)) | 118 | if (!arch_read_can_lock(rw)) |
119 | continue; | 119 | continue; |
120 | old = rw->lock & 0x7fffffffU; | 120 | old = rw->lock & 0x7fffffffU; |
121 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 121 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
122 | return; | 122 | return; |
123 | } | 123 | } |
124 | } | 124 | } |
125 | EXPORT_SYMBOL(_raw_read_lock_wait); | 125 | EXPORT_SYMBOL(_raw_read_lock_wait); |
126 | 126 | ||
127 | void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 127 | void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
128 | { | 128 | { |
129 | unsigned int old; | 129 | unsigned int old; |
130 | int count = spin_retry; | 130 | int count = spin_retry; |
131 | 131 | ||
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
133 | while (1) { | 133 | while (1) { |
134 | if (count-- <= 0) { | 134 | if (count-- <= 0) { |
135 | _raw_yield(); | 135 | _raw_yield(); |
136 | count = spin_retry; | 136 | count = spin_retry; |
137 | } | 137 | } |
138 | if (!arch_read_can_lock(rw)) | 138 | if (!arch_read_can_lock(rw)) |
139 | continue; | 139 | continue; |
140 | old = rw->lock & 0x7fffffffU; | 140 | old = rw->lock & 0x7fffffffU; |
141 | local_irq_disable(); | 141 | local_irq_disable(); |
142 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 142 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
143 | return; | 143 | return; |
144 | } | 144 | } |
145 | } | 145 | } |
146 | EXPORT_SYMBOL(_raw_read_lock_wait_flags); | 146 | EXPORT_SYMBOL(_raw_read_lock_wait_flags); |
147 | 147 | ||
148 | int _raw_read_trylock_retry(arch_rwlock_t *rw) | 148 | int _raw_read_trylock_retry(arch_rwlock_t *rw) |
149 | { | 149 | { |
150 | unsigned int old; | 150 | unsigned int old; |
151 | int count = spin_retry; | 151 | int count = spin_retry; |
152 | 152 | ||
153 | while (count-- > 0) { | 153 | while (count-- > 0) { |
154 | if (!arch_read_can_lock(rw)) | 154 | if (!arch_read_can_lock(rw)) |
155 | continue; | 155 | continue; |
156 | old = rw->lock & 0x7fffffffU; | 156 | old = rw->lock & 0x7fffffffU; |
157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
158 | return 1; | 158 | return 1; |
159 | } | 159 | } |
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | EXPORT_SYMBOL(_raw_read_trylock_retry); | 162 | EXPORT_SYMBOL(_raw_read_trylock_retry); |
163 | 163 | ||
164 | void _raw_write_lock_wait(arch_rwlock_t *rw) | 164 | void _raw_write_lock_wait(arch_rwlock_t *rw) |
165 | { | 165 | { |
166 | int count = spin_retry; | 166 | int count = spin_retry; |
167 | 167 | ||
168 | while (1) { | 168 | while (1) { |
169 | if (count-- <= 0) { | 169 | if (count-- <= 0) { |
170 | _raw_yield(); | 170 | _raw_yield(); |
171 | count = spin_retry; | 171 | count = spin_retry; |
172 | } | 172 | } |
173 | if (!arch_write_can_lock(rw)) | 173 | if (!arch_write_can_lock(rw)) |
174 | continue; | 174 | continue; |
175 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 175 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
176 | return; | 176 | return; |
177 | } | 177 | } |
178 | } | 178 | } |
179 | EXPORT_SYMBOL(_raw_write_lock_wait); | 179 | EXPORT_SYMBOL(_raw_write_lock_wait); |
180 | 180 | ||
181 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 181 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
182 | { | 182 | { |
183 | int count = spin_retry; | 183 | int count = spin_retry; |
184 | 184 | ||
185 | local_irq_restore(flags); | 185 | local_irq_restore(flags); |
186 | while (1) { | 186 | while (1) { |
187 | if (count-- <= 0) { | 187 | if (count-- <= 0) { |
188 | _raw_yield(); | 188 | _raw_yield(); |
189 | count = spin_retry; | 189 | count = spin_retry; |
190 | } | 190 | } |
191 | if (!arch_write_can_lock(rw)) | 191 | if (!arch_write_can_lock(rw)) |
192 | continue; | 192 | continue; |
193 | local_irq_disable(); | 193 | local_irq_disable(); |
194 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 194 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
195 | return; | 195 | return; |
196 | } | 196 | } |
197 | } | 197 | } |
198 | EXPORT_SYMBOL(_raw_write_lock_wait_flags); | 198 | EXPORT_SYMBOL(_raw_write_lock_wait_flags); |
199 | 199 | ||
200 | int _raw_write_trylock_retry(arch_rwlock_t *rw) | 200 | int _raw_write_trylock_retry(arch_rwlock_t *rw) |
201 | { | 201 | { |
202 | int count = spin_retry; | 202 | int count = spin_retry; |
203 | 203 | ||
204 | while (count-- > 0) { | 204 | while (count-- > 0) { |
205 | if (!arch_write_can_lock(rw)) | 205 | if (!arch_write_can_lock(rw)) |
206 | continue; | 206 | continue; |
207 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 207 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
208 | return 1; | 208 | return 1; |
209 | } | 209 | } |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | EXPORT_SYMBOL(_raw_write_trylock_retry); | 212 | EXPORT_SYMBOL(_raw_write_trylock_retry); |
213 | 213 |