Commit 41459d36cf0d57813017dae6080a879cc038e5fe
Committed by
Martin Schwidefsky
1 parent
6b563d8c26
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
s390: add uninitialized_var() to suppress false positive compiler warnings
Get rid of these: arch/s390/kernel/smp.c:134:19: warning: ‘status’ may be used uninitialized in this function [-Wuninitialized] arch/s390/mm/pgtable.c:641:10: warning: ‘table’ may be used uninitialized in this function [-Wuninitialized] arch/s390/mm/pgtable.c:644:12: warning: ‘page’ may be used uninitialized in this function [-Wuninitialized] drivers/s390/cio/cio.c:1037:14: warning: ‘schid’ may be used uninitialized in this function [-Wuninitialized] Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Showing 3 changed files with 4 additions and 4 deletions Inline Diff
arch/s390/kernel/smp.c
1 | /* | 1 | /* |
2 | * SMP related functions | 2 | * SMP related functions |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999, 2012 | 4 | * Copyright IBM Corp. 1999, 2012 |
5 | * Author(s): Denis Joseph Barrow, | 5 | * Author(s): Denis Joseph Barrow, |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, |
7 | * Heiko Carstens <heiko.carstens@de.ibm.com>, | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
8 | * | 8 | * |
9 | * based on other smp stuff by | 9 | * based on other smp stuff by |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | 10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> |
11 | * (c) 1998 Ingo Molnar | 11 | * (c) 1998 Ingo Molnar |
12 | * | 12 | * |
13 | * The code outside of smp.c uses logical cpu numbers, only smp.c does | 13 | * The code outside of smp.c uses logical cpu numbers, only smp.c does |
14 | * the translation of logical to physical cpu ids. All new code that | 14 | * the translation of logical to physical cpu ids. All new code that |
15 | * operates on physical cpu numbers needs to go into smp.c. | 15 | * operates on physical cpu numbers needs to go into smp.c. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define KMSG_COMPONENT "cpu" | 18 | #define KMSG_COMPONENT "cpu" |
19 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 19 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
20 | 20 | ||
21 | #include <linux/workqueue.h> | 21 | #include <linux/workqueue.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/kernel_stat.h> | 27 | #include <linux/kernel_stat.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/irqflags.h> | 30 | #include <linux/irqflags.h> |
31 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/crash_dump.h> | 33 | #include <linux/crash_dump.h> |
34 | #include <asm/asm-offsets.h> | 34 | #include <asm/asm-offsets.h> |
35 | #include <asm/switch_to.h> | 35 | #include <asm/switch_to.h> |
36 | #include <asm/facility.h> | 36 | #include <asm/facility.h> |
37 | #include <asm/ipl.h> | 37 | #include <asm/ipl.h> |
38 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
39 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
40 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | #include <asm/vtimer.h> | 41 | #include <asm/vtimer.h> |
42 | #include <asm/lowcore.h> | 42 | #include <asm/lowcore.h> |
43 | #include <asm/sclp.h> | 43 | #include <asm/sclp.h> |
44 | #include <asm/vdso.h> | 44 | #include <asm/vdso.h> |
45 | #include <asm/debug.h> | 45 | #include <asm/debug.h> |
46 | #include <asm/os_info.h> | 46 | #include <asm/os_info.h> |
47 | #include <asm/sigp.h> | 47 | #include <asm/sigp.h> |
48 | #include "entry.h" | 48 | #include "entry.h" |
49 | 49 | ||
50 | enum { | 50 | enum { |
51 | ec_schedule = 0, | 51 | ec_schedule = 0, |
52 | ec_call_function, | 52 | ec_call_function, |
53 | ec_call_function_single, | 53 | ec_call_function_single, |
54 | ec_stop_cpu, | 54 | ec_stop_cpu, |
55 | }; | 55 | }; |
56 | 56 | ||
57 | enum { | 57 | enum { |
58 | CPU_STATE_STANDBY, | 58 | CPU_STATE_STANDBY, |
59 | CPU_STATE_CONFIGURED, | 59 | CPU_STATE_CONFIGURED, |
60 | }; | 60 | }; |
61 | 61 | ||
62 | struct pcpu { | 62 | struct pcpu { |
63 | struct cpu cpu; | 63 | struct cpu cpu; |
64 | struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ | 64 | struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ |
65 | unsigned long async_stack; /* async stack for the cpu */ | 65 | unsigned long async_stack; /* async stack for the cpu */ |
66 | unsigned long panic_stack; /* panic stack for the cpu */ | 66 | unsigned long panic_stack; /* panic stack for the cpu */ |
67 | unsigned long ec_mask; /* bit mask for ec_xxx functions */ | 67 | unsigned long ec_mask; /* bit mask for ec_xxx functions */ |
68 | int state; /* physical cpu state */ | 68 | int state; /* physical cpu state */ |
69 | int polarization; /* physical polarization */ | 69 | int polarization; /* physical polarization */ |
70 | u16 address; /* physical cpu address */ | 70 | u16 address; /* physical cpu address */ |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static u8 boot_cpu_type; | 73 | static u8 boot_cpu_type; |
74 | static u16 boot_cpu_address; | 74 | static u16 boot_cpu_address; |
75 | static struct pcpu pcpu_devices[NR_CPUS]; | 75 | static struct pcpu pcpu_devices[NR_CPUS]; |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * The smp_cpu_state_mutex must be held when changing the state or polarization | 78 | * The smp_cpu_state_mutex must be held when changing the state or polarization |
79 | * member of a pcpu data structure within the pcpu_devices arreay. | 79 | * member of a pcpu data structure within the pcpu_devices arreay. |
80 | */ | 80 | */ |
81 | DEFINE_MUTEX(smp_cpu_state_mutex); | 81 | DEFINE_MUTEX(smp_cpu_state_mutex); |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Signal processor helper functions. | 84 | * Signal processor helper functions. |
85 | */ | 85 | */ |
86 | static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) | 86 | static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) |
87 | { | 87 | { |
88 | register unsigned int reg1 asm ("1") = parm; | 88 | register unsigned int reg1 asm ("1") = parm; |
89 | int cc; | 89 | int cc; |
90 | 90 | ||
91 | asm volatile( | 91 | asm volatile( |
92 | " sigp %1,%2,0(%3)\n" | 92 | " sigp %1,%2,0(%3)\n" |
93 | " ipm %0\n" | 93 | " ipm %0\n" |
94 | " srl %0,28\n" | 94 | " srl %0,28\n" |
95 | : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc"); | 95 | : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc"); |
96 | if (status && cc == 1) | 96 | if (status && cc == 1) |
97 | *status = reg1; | 97 | *status = reg1; |
98 | return cc; | 98 | return cc; |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) | 101 | static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) |
102 | { | 102 | { |
103 | int cc; | 103 | int cc; |
104 | 104 | ||
105 | while (1) { | 105 | while (1) { |
106 | cc = __pcpu_sigp(addr, order, parm, NULL); | 106 | cc = __pcpu_sigp(addr, order, parm, NULL); |
107 | if (cc != SIGP_CC_BUSY) | 107 | if (cc != SIGP_CC_BUSY) |
108 | return cc; | 108 | return cc; |
109 | cpu_relax(); | 109 | cpu_relax(); |
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
113 | static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) | 113 | static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) |
114 | { | 114 | { |
115 | int cc, retry; | 115 | int cc, retry; |
116 | 116 | ||
117 | for (retry = 0; ; retry++) { | 117 | for (retry = 0; ; retry++) { |
118 | cc = __pcpu_sigp(pcpu->address, order, parm, NULL); | 118 | cc = __pcpu_sigp(pcpu->address, order, parm, NULL); |
119 | if (cc != SIGP_CC_BUSY) | 119 | if (cc != SIGP_CC_BUSY) |
120 | break; | 120 | break; |
121 | if (retry >= 3) | 121 | if (retry >= 3) |
122 | udelay(10); | 122 | udelay(10); |
123 | } | 123 | } |
124 | return cc; | 124 | return cc; |
125 | } | 125 | } |
126 | 126 | ||
127 | static inline int pcpu_stopped(struct pcpu *pcpu) | 127 | static inline int pcpu_stopped(struct pcpu *pcpu) |
128 | { | 128 | { |
129 | u32 status; | 129 | u32 uninitialized_var(status); |
130 | 130 | ||
131 | if (__pcpu_sigp(pcpu->address, SIGP_SENSE, | 131 | if (__pcpu_sigp(pcpu->address, SIGP_SENSE, |
132 | 0, &status) != SIGP_CC_STATUS_STORED) | 132 | 0, &status) != SIGP_CC_STATUS_STORED) |
133 | return 0; | 133 | return 0; |
134 | return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); | 134 | return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline int pcpu_running(struct pcpu *pcpu) | 137 | static inline int pcpu_running(struct pcpu *pcpu) |
138 | { | 138 | { |
139 | if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, | 139 | if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, |
140 | 0, NULL) != SIGP_CC_STATUS_STORED) | 140 | 0, NULL) != SIGP_CC_STATUS_STORED) |
141 | return 1; | 141 | return 1; |
142 | /* Status stored condition code is equivalent to cpu not running. */ | 142 | /* Status stored condition code is equivalent to cpu not running. */ |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Find struct pcpu by cpu address. | 147 | * Find struct pcpu by cpu address. |
148 | */ | 148 | */ |
149 | static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) | 149 | static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) |
150 | { | 150 | { |
151 | int cpu; | 151 | int cpu; |
152 | 152 | ||
153 | for_each_cpu(cpu, mask) | 153 | for_each_cpu(cpu, mask) |
154 | if (pcpu_devices[cpu].address == address) | 154 | if (pcpu_devices[cpu].address == address) |
155 | return pcpu_devices + cpu; | 155 | return pcpu_devices + cpu; |
156 | return NULL; | 156 | return NULL; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) | 159 | static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) |
160 | { | 160 | { |
161 | int order; | 161 | int order; |
162 | 162 | ||
163 | set_bit(ec_bit, &pcpu->ec_mask); | 163 | set_bit(ec_bit, &pcpu->ec_mask); |
164 | order = pcpu_running(pcpu) ? | 164 | order = pcpu_running(pcpu) ? |
165 | SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; | 165 | SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; |
166 | pcpu_sigp_retry(pcpu, order, 0); | 166 | pcpu_sigp_retry(pcpu, order, 0); |
167 | } | 167 | } |
168 | 168 | ||
169 | static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) | 169 | static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) |
170 | { | 170 | { |
171 | struct _lowcore *lc; | 171 | struct _lowcore *lc; |
172 | 172 | ||
173 | if (pcpu != &pcpu_devices[0]) { | 173 | if (pcpu != &pcpu_devices[0]) { |
174 | pcpu->lowcore = (struct _lowcore *) | 174 | pcpu->lowcore = (struct _lowcore *) |
175 | __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 175 | __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
176 | pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | 176 | pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
177 | pcpu->panic_stack = __get_free_page(GFP_KERNEL); | 177 | pcpu->panic_stack = __get_free_page(GFP_KERNEL); |
178 | if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) | 178 | if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) |
179 | goto out; | 179 | goto out; |
180 | } | 180 | } |
181 | lc = pcpu->lowcore; | 181 | lc = pcpu->lowcore; |
182 | memcpy(lc, &S390_lowcore, 512); | 182 | memcpy(lc, &S390_lowcore, 512); |
183 | memset((char *) lc + 512, 0, sizeof(*lc) - 512); | 183 | memset((char *) lc + 512, 0, sizeof(*lc) - 512); |
184 | lc->async_stack = pcpu->async_stack + ASYNC_SIZE; | 184 | lc->async_stack = pcpu->async_stack + ASYNC_SIZE; |
185 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; | 185 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; |
186 | lc->cpu_nr = cpu; | 186 | lc->cpu_nr = cpu; |
187 | #ifndef CONFIG_64BIT | 187 | #ifndef CONFIG_64BIT |
188 | if (MACHINE_HAS_IEEE) { | 188 | if (MACHINE_HAS_IEEE) { |
189 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); | 189 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); |
190 | if (!lc->extended_save_area_addr) | 190 | if (!lc->extended_save_area_addr) |
191 | goto out; | 191 | goto out; |
192 | } | 192 | } |
193 | #else | 193 | #else |
194 | if (vdso_alloc_per_cpu(lc)) | 194 | if (vdso_alloc_per_cpu(lc)) |
195 | goto out; | 195 | goto out; |
196 | #endif | 196 | #endif |
197 | lowcore_ptr[cpu] = lc; | 197 | lowcore_ptr[cpu] = lc; |
198 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); | 198 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); |
199 | return 0; | 199 | return 0; |
200 | out: | 200 | out: |
201 | if (pcpu != &pcpu_devices[0]) { | 201 | if (pcpu != &pcpu_devices[0]) { |
202 | free_page(pcpu->panic_stack); | 202 | free_page(pcpu->panic_stack); |
203 | free_pages(pcpu->async_stack, ASYNC_ORDER); | 203 | free_pages(pcpu->async_stack, ASYNC_ORDER); |
204 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); | 204 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); |
205 | } | 205 | } |
206 | return -ENOMEM; | 206 | return -ENOMEM; |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_HOTPLUG_CPU | 209 | #ifdef CONFIG_HOTPLUG_CPU |
210 | 210 | ||
211 | static void pcpu_free_lowcore(struct pcpu *pcpu) | 211 | static void pcpu_free_lowcore(struct pcpu *pcpu) |
212 | { | 212 | { |
213 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); | 213 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); |
214 | lowcore_ptr[pcpu - pcpu_devices] = NULL; | 214 | lowcore_ptr[pcpu - pcpu_devices] = NULL; |
215 | #ifndef CONFIG_64BIT | 215 | #ifndef CONFIG_64BIT |
216 | if (MACHINE_HAS_IEEE) { | 216 | if (MACHINE_HAS_IEEE) { |
217 | struct _lowcore *lc = pcpu->lowcore; | 217 | struct _lowcore *lc = pcpu->lowcore; |
218 | 218 | ||
219 | free_page((unsigned long) lc->extended_save_area_addr); | 219 | free_page((unsigned long) lc->extended_save_area_addr); |
220 | lc->extended_save_area_addr = 0; | 220 | lc->extended_save_area_addr = 0; |
221 | } | 221 | } |
222 | #else | 222 | #else |
223 | vdso_free_per_cpu(pcpu->lowcore); | 223 | vdso_free_per_cpu(pcpu->lowcore); |
224 | #endif | 224 | #endif |
225 | if (pcpu != &pcpu_devices[0]) { | 225 | if (pcpu != &pcpu_devices[0]) { |
226 | free_page(pcpu->panic_stack); | 226 | free_page(pcpu->panic_stack); |
227 | free_pages(pcpu->async_stack, ASYNC_ORDER); | 227 | free_pages(pcpu->async_stack, ASYNC_ORDER); |
228 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); | 228 | free_pages((unsigned long) pcpu->lowcore, LC_ORDER); |
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | #endif /* CONFIG_HOTPLUG_CPU */ | 232 | #endif /* CONFIG_HOTPLUG_CPU */ |
233 | 233 | ||
234 | static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) | 234 | static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) |
235 | { | 235 | { |
236 | struct _lowcore *lc = pcpu->lowcore; | 236 | struct _lowcore *lc = pcpu->lowcore; |
237 | 237 | ||
238 | atomic_inc(&init_mm.context.attach_count); | 238 | atomic_inc(&init_mm.context.attach_count); |
239 | lc->cpu_nr = cpu; | 239 | lc->cpu_nr = cpu; |
240 | lc->percpu_offset = __per_cpu_offset[cpu]; | 240 | lc->percpu_offset = __per_cpu_offset[cpu]; |
241 | lc->kernel_asce = S390_lowcore.kernel_asce; | 241 | lc->kernel_asce = S390_lowcore.kernel_asce; |
242 | lc->machine_flags = S390_lowcore.machine_flags; | 242 | lc->machine_flags = S390_lowcore.machine_flags; |
243 | lc->ftrace_func = S390_lowcore.ftrace_func; | 243 | lc->ftrace_func = S390_lowcore.ftrace_func; |
244 | lc->user_timer = lc->system_timer = lc->steal_timer = 0; | 244 | lc->user_timer = lc->system_timer = lc->steal_timer = 0; |
245 | __ctl_store(lc->cregs_save_area, 0, 15); | 245 | __ctl_store(lc->cregs_save_area, 0, 15); |
246 | save_access_regs((unsigned int *) lc->access_regs_save_area); | 246 | save_access_regs((unsigned int *) lc->access_regs_save_area); |
247 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, | 247 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, |
248 | MAX_FACILITY_BIT/8); | 248 | MAX_FACILITY_BIT/8); |
249 | } | 249 | } |
250 | 250 | ||
251 | static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) | 251 | static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) |
252 | { | 252 | { |
253 | struct _lowcore *lc = pcpu->lowcore; | 253 | struct _lowcore *lc = pcpu->lowcore; |
254 | struct thread_info *ti = task_thread_info(tsk); | 254 | struct thread_info *ti = task_thread_info(tsk); |
255 | 255 | ||
256 | lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; | 256 | lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; |
257 | lc->thread_info = (unsigned long) task_thread_info(tsk); | 257 | lc->thread_info = (unsigned long) task_thread_info(tsk); |
258 | lc->current_task = (unsigned long) tsk; | 258 | lc->current_task = (unsigned long) tsk; |
259 | lc->user_timer = ti->user_timer; | 259 | lc->user_timer = ti->user_timer; |
260 | lc->system_timer = ti->system_timer; | 260 | lc->system_timer = ti->system_timer; |
261 | lc->steal_timer = 0; | 261 | lc->steal_timer = 0; |
262 | } | 262 | } |
263 | 263 | ||
264 | static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) | 264 | static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) |
265 | { | 265 | { |
266 | struct _lowcore *lc = pcpu->lowcore; | 266 | struct _lowcore *lc = pcpu->lowcore; |
267 | 267 | ||
268 | lc->restart_stack = lc->kernel_stack; | 268 | lc->restart_stack = lc->kernel_stack; |
269 | lc->restart_fn = (unsigned long) func; | 269 | lc->restart_fn = (unsigned long) func; |
270 | lc->restart_data = (unsigned long) data; | 270 | lc->restart_data = (unsigned long) data; |
271 | lc->restart_source = -1UL; | 271 | lc->restart_source = -1UL; |
272 | pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); | 272 | pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); |
273 | } | 273 | } |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * Call function via PSW restart on pcpu and stop the current cpu. | 276 | * Call function via PSW restart on pcpu and stop the current cpu. |
277 | */ | 277 | */ |
278 | static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), | 278 | static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), |
279 | void *data, unsigned long stack) | 279 | void *data, unsigned long stack) |
280 | { | 280 | { |
281 | struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; | 281 | struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; |
282 | unsigned long source_cpu = stap(); | 282 | unsigned long source_cpu = stap(); |
283 | 283 | ||
284 | __load_psw_mask(psw_kernel_bits); | 284 | __load_psw_mask(psw_kernel_bits); |
285 | if (pcpu->address == source_cpu) | 285 | if (pcpu->address == source_cpu) |
286 | func(data); /* should not return */ | 286 | func(data); /* should not return */ |
287 | /* Stop target cpu (if func returns this stops the current cpu). */ | 287 | /* Stop target cpu (if func returns this stops the current cpu). */ |
288 | pcpu_sigp_retry(pcpu, SIGP_STOP, 0); | 288 | pcpu_sigp_retry(pcpu, SIGP_STOP, 0); |
289 | /* Restart func on the target cpu and stop the current cpu. */ | 289 | /* Restart func on the target cpu and stop the current cpu. */ |
290 | mem_assign_absolute(lc->restart_stack, stack); | 290 | mem_assign_absolute(lc->restart_stack, stack); |
291 | mem_assign_absolute(lc->restart_fn, (unsigned long) func); | 291 | mem_assign_absolute(lc->restart_fn, (unsigned long) func); |
292 | mem_assign_absolute(lc->restart_data, (unsigned long) data); | 292 | mem_assign_absolute(lc->restart_data, (unsigned long) data); |
293 | mem_assign_absolute(lc->restart_source, source_cpu); | 293 | mem_assign_absolute(lc->restart_source, source_cpu); |
294 | asm volatile( | 294 | asm volatile( |
295 | "0: sigp 0,%0,%2 # sigp restart to target cpu\n" | 295 | "0: sigp 0,%0,%2 # sigp restart to target cpu\n" |
296 | " brc 2,0b # busy, try again\n" | 296 | " brc 2,0b # busy, try again\n" |
297 | "1: sigp 0,%1,%3 # sigp stop to current cpu\n" | 297 | "1: sigp 0,%1,%3 # sigp stop to current cpu\n" |
298 | " brc 2,1b # busy, try again\n" | 298 | " brc 2,1b # busy, try again\n" |
299 | : : "d" (pcpu->address), "d" (source_cpu), | 299 | : : "d" (pcpu->address), "d" (source_cpu), |
300 | "K" (SIGP_RESTART), "K" (SIGP_STOP) | 300 | "K" (SIGP_RESTART), "K" (SIGP_STOP) |
301 | : "0", "1", "cc"); | 301 | : "0", "1", "cc"); |
302 | for (;;) ; | 302 | for (;;) ; |
303 | } | 303 | } |
304 | 304 | ||
305 | /* | 305 | /* |
306 | * Call function on an online CPU. | 306 | * Call function on an online CPU. |
307 | */ | 307 | */ |
308 | void smp_call_online_cpu(void (*func)(void *), void *data) | 308 | void smp_call_online_cpu(void (*func)(void *), void *data) |
309 | { | 309 | { |
310 | struct pcpu *pcpu; | 310 | struct pcpu *pcpu; |
311 | 311 | ||
312 | /* Use the current cpu if it is online. */ | 312 | /* Use the current cpu if it is online. */ |
313 | pcpu = pcpu_find_address(cpu_online_mask, stap()); | 313 | pcpu = pcpu_find_address(cpu_online_mask, stap()); |
314 | if (!pcpu) | 314 | if (!pcpu) |
315 | /* Use the first online cpu. */ | 315 | /* Use the first online cpu. */ |
316 | pcpu = pcpu_devices + cpumask_first(cpu_online_mask); | 316 | pcpu = pcpu_devices + cpumask_first(cpu_online_mask); |
317 | pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); | 317 | pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); |
318 | } | 318 | } |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * Call function on the ipl CPU. | 321 | * Call function on the ipl CPU. |
322 | */ | 322 | */ |
323 | void smp_call_ipl_cpu(void (*func)(void *), void *data) | 323 | void smp_call_ipl_cpu(void (*func)(void *), void *data) |
324 | { | 324 | { |
325 | pcpu_delegate(&pcpu_devices[0], func, data, | 325 | pcpu_delegate(&pcpu_devices[0], func, data, |
326 | pcpu_devices->panic_stack + PAGE_SIZE); | 326 | pcpu_devices->panic_stack + PAGE_SIZE); |
327 | } | 327 | } |
328 | 328 | ||
329 | int smp_find_processor_id(u16 address) | 329 | int smp_find_processor_id(u16 address) |
330 | { | 330 | { |
331 | int cpu; | 331 | int cpu; |
332 | 332 | ||
333 | for_each_present_cpu(cpu) | 333 | for_each_present_cpu(cpu) |
334 | if (pcpu_devices[cpu].address == address) | 334 | if (pcpu_devices[cpu].address == address) |
335 | return cpu; | 335 | return cpu; |
336 | return -1; | 336 | return -1; |
337 | } | 337 | } |
338 | 338 | ||
339 | int smp_vcpu_scheduled(int cpu) | 339 | int smp_vcpu_scheduled(int cpu) |
340 | { | 340 | { |
341 | return pcpu_running(pcpu_devices + cpu); | 341 | return pcpu_running(pcpu_devices + cpu); |
342 | } | 342 | } |
343 | 343 | ||
344 | void smp_yield(void) | 344 | void smp_yield(void) |
345 | { | 345 | { |
346 | if (MACHINE_HAS_DIAG44) | 346 | if (MACHINE_HAS_DIAG44) |
347 | asm volatile("diag 0,0,0x44"); | 347 | asm volatile("diag 0,0,0x44"); |
348 | } | 348 | } |
349 | 349 | ||
350 | void smp_yield_cpu(int cpu) | 350 | void smp_yield_cpu(int cpu) |
351 | { | 351 | { |
352 | if (MACHINE_HAS_DIAG9C) | 352 | if (MACHINE_HAS_DIAG9C) |
353 | asm volatile("diag %0,0,0x9c" | 353 | asm volatile("diag %0,0,0x9c" |
354 | : : "d" (pcpu_devices[cpu].address)); | 354 | : : "d" (pcpu_devices[cpu].address)); |
355 | else if (MACHINE_HAS_DIAG44) | 355 | else if (MACHINE_HAS_DIAG44) |
356 | asm volatile("diag 0,0,0x44"); | 356 | asm volatile("diag 0,0,0x44"); |
357 | } | 357 | } |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * Send cpus emergency shutdown signal. This gives the cpus the | 360 | * Send cpus emergency shutdown signal. This gives the cpus the |
361 | * opportunity to complete outstanding interrupts. | 361 | * opportunity to complete outstanding interrupts. |
362 | */ | 362 | */ |
363 | void smp_emergency_stop(cpumask_t *cpumask) | 363 | void smp_emergency_stop(cpumask_t *cpumask) |
364 | { | 364 | { |
365 | u64 end; | 365 | u64 end; |
366 | int cpu; | 366 | int cpu; |
367 | 367 | ||
368 | end = get_clock() + (1000000UL << 12); | 368 | end = get_clock() + (1000000UL << 12); |
369 | for_each_cpu(cpu, cpumask) { | 369 | for_each_cpu(cpu, cpumask) { |
370 | struct pcpu *pcpu = pcpu_devices + cpu; | 370 | struct pcpu *pcpu = pcpu_devices + cpu; |
371 | set_bit(ec_stop_cpu, &pcpu->ec_mask); | 371 | set_bit(ec_stop_cpu, &pcpu->ec_mask); |
372 | while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, | 372 | while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, |
373 | 0, NULL) == SIGP_CC_BUSY && | 373 | 0, NULL) == SIGP_CC_BUSY && |
374 | get_clock() < end) | 374 | get_clock() < end) |
375 | cpu_relax(); | 375 | cpu_relax(); |
376 | } | 376 | } |
377 | while (get_clock() < end) { | 377 | while (get_clock() < end) { |
378 | for_each_cpu(cpu, cpumask) | 378 | for_each_cpu(cpu, cpumask) |
379 | if (pcpu_stopped(pcpu_devices + cpu)) | 379 | if (pcpu_stopped(pcpu_devices + cpu)) |
380 | cpumask_clear_cpu(cpu, cpumask); | 380 | cpumask_clear_cpu(cpu, cpumask); |
381 | if (cpumask_empty(cpumask)) | 381 | if (cpumask_empty(cpumask)) |
382 | break; | 382 | break; |
383 | cpu_relax(); | 383 | cpu_relax(); |
384 | } | 384 | } |
385 | } | 385 | } |
386 | 386 | ||
387 | /* | 387 | /* |
388 | * Stop all cpus but the current one. | 388 | * Stop all cpus but the current one. |
389 | */ | 389 | */ |
390 | void smp_send_stop(void) | 390 | void smp_send_stop(void) |
391 | { | 391 | { |
392 | cpumask_t cpumask; | 392 | cpumask_t cpumask; |
393 | int cpu; | 393 | int cpu; |
394 | 394 | ||
395 | /* Disable all interrupts/machine checks */ | 395 | /* Disable all interrupts/machine checks */ |
396 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | 396 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); |
397 | trace_hardirqs_off(); | 397 | trace_hardirqs_off(); |
398 | 398 | ||
399 | debug_set_critical(); | 399 | debug_set_critical(); |
400 | cpumask_copy(&cpumask, cpu_online_mask); | 400 | cpumask_copy(&cpumask, cpu_online_mask); |
401 | cpumask_clear_cpu(smp_processor_id(), &cpumask); | 401 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
402 | 402 | ||
403 | if (oops_in_progress) | 403 | if (oops_in_progress) |
404 | smp_emergency_stop(&cpumask); | 404 | smp_emergency_stop(&cpumask); |
405 | 405 | ||
406 | /* stop all processors */ | 406 | /* stop all processors */ |
407 | for_each_cpu(cpu, &cpumask) { | 407 | for_each_cpu(cpu, &cpumask) { |
408 | struct pcpu *pcpu = pcpu_devices + cpu; | 408 | struct pcpu *pcpu = pcpu_devices + cpu; |
409 | pcpu_sigp_retry(pcpu, SIGP_STOP, 0); | 409 | pcpu_sigp_retry(pcpu, SIGP_STOP, 0); |
410 | while (!pcpu_stopped(pcpu)) | 410 | while (!pcpu_stopped(pcpu)) |
411 | cpu_relax(); | 411 | cpu_relax(); |
412 | } | 412 | } |
413 | } | 413 | } |
414 | 414 | ||
415 | /* | 415 | /* |
416 | * Stop the current cpu. | 416 | * Stop the current cpu. |
417 | */ | 417 | */ |
418 | void smp_stop_cpu(void) | 418 | void smp_stop_cpu(void) |
419 | { | 419 | { |
420 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); | 420 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); |
421 | for (;;) ; | 421 | for (;;) ; |
422 | } | 422 | } |
423 | 423 | ||
424 | /* | 424 | /* |
425 | * This is the main routine where commands issued by other | 425 | * This is the main routine where commands issued by other |
426 | * cpus are handled. | 426 | * cpus are handled. |
427 | */ | 427 | */ |
428 | static void do_ext_call_interrupt(struct ext_code ext_code, | 428 | static void do_ext_call_interrupt(struct ext_code ext_code, |
429 | unsigned int param32, unsigned long param64) | 429 | unsigned int param32, unsigned long param64) |
430 | { | 430 | { |
431 | unsigned long bits; | 431 | unsigned long bits; |
432 | int cpu; | 432 | int cpu; |
433 | 433 | ||
434 | cpu = smp_processor_id(); | 434 | cpu = smp_processor_id(); |
435 | if (ext_code.code == 0x1202) | 435 | if (ext_code.code == 0x1202) |
436 | kstat_cpu(cpu).irqs[EXTINT_EXC]++; | 436 | kstat_cpu(cpu).irqs[EXTINT_EXC]++; |
437 | else | 437 | else |
438 | kstat_cpu(cpu).irqs[EXTINT_EMS]++; | 438 | kstat_cpu(cpu).irqs[EXTINT_EMS]++; |
439 | /* | 439 | /* |
440 | * handle bit signal external calls | 440 | * handle bit signal external calls |
441 | */ | 441 | */ |
442 | bits = xchg(&pcpu_devices[cpu].ec_mask, 0); | 442 | bits = xchg(&pcpu_devices[cpu].ec_mask, 0); |
443 | 443 | ||
444 | if (test_bit(ec_stop_cpu, &bits)) | 444 | if (test_bit(ec_stop_cpu, &bits)) |
445 | smp_stop_cpu(); | 445 | smp_stop_cpu(); |
446 | 446 | ||
447 | if (test_bit(ec_schedule, &bits)) | 447 | if (test_bit(ec_schedule, &bits)) |
448 | scheduler_ipi(); | 448 | scheduler_ipi(); |
449 | 449 | ||
450 | if (test_bit(ec_call_function, &bits)) | 450 | if (test_bit(ec_call_function, &bits)) |
451 | generic_smp_call_function_interrupt(); | 451 | generic_smp_call_function_interrupt(); |
452 | 452 | ||
453 | if (test_bit(ec_call_function_single, &bits)) | 453 | if (test_bit(ec_call_function_single, &bits)) |
454 | generic_smp_call_function_single_interrupt(); | 454 | generic_smp_call_function_single_interrupt(); |
455 | 455 | ||
456 | } | 456 | } |
457 | 457 | ||
458 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 458 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
459 | { | 459 | { |
460 | int cpu; | 460 | int cpu; |
461 | 461 | ||
462 | for_each_cpu(cpu, mask) | 462 | for_each_cpu(cpu, mask) |
463 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function); | 463 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function); |
464 | } | 464 | } |
465 | 465 | ||
466 | void arch_send_call_function_single_ipi(int cpu) | 466 | void arch_send_call_function_single_ipi(int cpu) |
467 | { | 467 | { |
468 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); | 468 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); |
469 | } | 469 | } |
470 | 470 | ||
471 | #ifndef CONFIG_64BIT | 471 | #ifndef CONFIG_64BIT |
472 | /* | 472 | /* |
473 | * this function sends a 'purge tlb' signal to another CPU. | 473 | * this function sends a 'purge tlb' signal to another CPU. |
474 | */ | 474 | */ |
475 | static void smp_ptlb_callback(void *info) | 475 | static void smp_ptlb_callback(void *info) |
476 | { | 476 | { |
477 | __tlb_flush_local(); | 477 | __tlb_flush_local(); |
478 | } | 478 | } |
479 | 479 | ||
480 | void smp_ptlb_all(void) | 480 | void smp_ptlb_all(void) |
481 | { | 481 | { |
482 | on_each_cpu(smp_ptlb_callback, NULL, 1); | 482 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
483 | } | 483 | } |
484 | EXPORT_SYMBOL(smp_ptlb_all); | 484 | EXPORT_SYMBOL(smp_ptlb_all); |
485 | #endif /* ! CONFIG_64BIT */ | 485 | #endif /* ! CONFIG_64BIT */ |
486 | 486 | ||
487 | /* | 487 | /* |
488 | * this function sends a 'reschedule' IPI to another CPU. | 488 | * this function sends a 'reschedule' IPI to another CPU. |
489 | * it goes straight through and wastes no time serializing | 489 | * it goes straight through and wastes no time serializing |
490 | * anything. Worst case is that we lose a reschedule ... | 490 | * anything. Worst case is that we lose a reschedule ... |
491 | */ | 491 | */ |
492 | void smp_send_reschedule(int cpu) | 492 | void smp_send_reschedule(int cpu) |
493 | { | 493 | { |
494 | pcpu_ec_call(pcpu_devices + cpu, ec_schedule); | 494 | pcpu_ec_call(pcpu_devices + cpu, ec_schedule); |
495 | } | 495 | } |
496 | 496 | ||
497 | /* | 497 | /* |
498 | * parameter area for the set/clear control bit callbacks | 498 | * parameter area for the set/clear control bit callbacks |
499 | */ | 499 | */ |
500 | struct ec_creg_mask_parms { | 500 | struct ec_creg_mask_parms { |
501 | unsigned long orval; | 501 | unsigned long orval; |
502 | unsigned long andval; | 502 | unsigned long andval; |
503 | int cr; | 503 | int cr; |
504 | }; | 504 | }; |
505 | 505 | ||
506 | /* | 506 | /* |
507 | * callback for setting/clearing control bits | 507 | * callback for setting/clearing control bits |
508 | */ | 508 | */ |
509 | static void smp_ctl_bit_callback(void *info) | 509 | static void smp_ctl_bit_callback(void *info) |
510 | { | 510 | { |
511 | struct ec_creg_mask_parms *pp = info; | 511 | struct ec_creg_mask_parms *pp = info; |
512 | unsigned long cregs[16]; | 512 | unsigned long cregs[16]; |
513 | 513 | ||
514 | __ctl_store(cregs, 0, 15); | 514 | __ctl_store(cregs, 0, 15); |
515 | cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; | 515 | cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; |
516 | __ctl_load(cregs, 0, 15); | 516 | __ctl_load(cregs, 0, 15); |
517 | } | 517 | } |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * Set a bit in a control register of all cpus | 520 | * Set a bit in a control register of all cpus |
521 | */ | 521 | */ |
522 | void smp_ctl_set_bit(int cr, int bit) | 522 | void smp_ctl_set_bit(int cr, int bit) |
523 | { | 523 | { |
524 | struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; | 524 | struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; |
525 | 525 | ||
526 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 526 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
527 | } | 527 | } |
528 | EXPORT_SYMBOL(smp_ctl_set_bit); | 528 | EXPORT_SYMBOL(smp_ctl_set_bit); |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * Clear a bit in a control register of all cpus | 531 | * Clear a bit in a control register of all cpus |
532 | */ | 532 | */ |
533 | void smp_ctl_clear_bit(int cr, int bit) | 533 | void smp_ctl_clear_bit(int cr, int bit) |
534 | { | 534 | { |
535 | struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; | 535 | struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; |
536 | 536 | ||
537 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 537 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
538 | } | 538 | } |
539 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 539 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
540 | 540 | ||
541 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) | 541 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) |
542 | 542 | ||
543 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 543 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
544 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | 544 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); |
545 | 545 | ||
546 | static void __init smp_get_save_area(int cpu, u16 address) | 546 | static void __init smp_get_save_area(int cpu, u16 address) |
547 | { | 547 | { |
548 | void *lc = pcpu_devices[0].lowcore; | 548 | void *lc = pcpu_devices[0].lowcore; |
549 | struct save_area *save_area; | 549 | struct save_area *save_area; |
550 | 550 | ||
551 | if (is_kdump_kernel()) | 551 | if (is_kdump_kernel()) |
552 | return; | 552 | return; |
553 | if (!OLDMEM_BASE && (address == boot_cpu_address || | 553 | if (!OLDMEM_BASE && (address == boot_cpu_address || |
554 | ipl_info.type != IPL_TYPE_FCP_DUMP)) | 554 | ipl_info.type != IPL_TYPE_FCP_DUMP)) |
555 | return; | 555 | return; |
556 | if (cpu >= NR_CPUS) { | 556 | if (cpu >= NR_CPUS) { |
557 | pr_warning("CPU %i exceeds the maximum %i and is excluded " | 557 | pr_warning("CPU %i exceeds the maximum %i and is excluded " |
558 | "from the dump\n", cpu, NR_CPUS - 1); | 558 | "from the dump\n", cpu, NR_CPUS - 1); |
559 | return; | 559 | return; |
560 | } | 560 | } |
561 | save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL); | 561 | save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL); |
562 | if (!save_area) | 562 | if (!save_area) |
563 | panic("could not allocate memory for save area\n"); | 563 | panic("could not allocate memory for save area\n"); |
564 | zfcpdump_save_areas[cpu] = save_area; | 564 | zfcpdump_save_areas[cpu] = save_area; |
565 | #ifdef CONFIG_CRASH_DUMP | 565 | #ifdef CONFIG_CRASH_DUMP |
566 | if (address == boot_cpu_address) { | 566 | if (address == boot_cpu_address) { |
567 | /* Copy the registers of the boot cpu. */ | 567 | /* Copy the registers of the boot cpu. */ |
568 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | 568 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), |
569 | SAVE_AREA_BASE - PAGE_SIZE, 0); | 569 | SAVE_AREA_BASE - PAGE_SIZE, 0); |
570 | return; | 570 | return; |
571 | } | 571 | } |
572 | #endif | 572 | #endif |
573 | /* Get the registers of a non-boot cpu. */ | 573 | /* Get the registers of a non-boot cpu. */ |
574 | __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); | 574 | __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); |
575 | memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); | 575 | memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); |
576 | } | 576 | } |
577 | 577 | ||
578 | int smp_store_status(int cpu) | 578 | int smp_store_status(int cpu) |
579 | { | 579 | { |
580 | struct pcpu *pcpu; | 580 | struct pcpu *pcpu; |
581 | 581 | ||
582 | pcpu = pcpu_devices + cpu; | 582 | pcpu = pcpu_devices + cpu; |
583 | if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, | 583 | if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, |
584 | 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) | 584 | 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) |
585 | return -EIO; | 585 | return -EIO; |
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | 588 | ||
589 | #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ | 589 | #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ |
590 | 590 | ||
591 | static inline void smp_get_save_area(int cpu, u16 address) { } | 591 | static inline void smp_get_save_area(int cpu, u16 address) { } |
592 | 592 | ||
593 | #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ | 593 | #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ |
594 | 594 | ||
595 | void smp_cpu_set_polarization(int cpu, int val) | 595 | void smp_cpu_set_polarization(int cpu, int val) |
596 | { | 596 | { |
597 | pcpu_devices[cpu].polarization = val; | 597 | pcpu_devices[cpu].polarization = val; |
598 | } | 598 | } |
599 | 599 | ||
600 | int smp_cpu_get_polarization(int cpu) | 600 | int smp_cpu_get_polarization(int cpu) |
601 | { | 601 | { |
602 | return pcpu_devices[cpu].polarization; | 602 | return pcpu_devices[cpu].polarization; |
603 | } | 603 | } |
604 | 604 | ||
605 | static struct sclp_cpu_info *smp_get_cpu_info(void) | 605 | static struct sclp_cpu_info *smp_get_cpu_info(void) |
606 | { | 606 | { |
607 | static int use_sigp_detection; | 607 | static int use_sigp_detection; |
608 | struct sclp_cpu_info *info; | 608 | struct sclp_cpu_info *info; |
609 | int address; | 609 | int address; |
610 | 610 | ||
611 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 611 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
612 | if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { | 612 | if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { |
613 | use_sigp_detection = 1; | 613 | use_sigp_detection = 1; |
614 | for (address = 0; address <= MAX_CPU_ADDRESS; address++) { | 614 | for (address = 0; address <= MAX_CPU_ADDRESS; address++) { |
615 | if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == | 615 | if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == |
616 | SIGP_CC_NOT_OPERATIONAL) | 616 | SIGP_CC_NOT_OPERATIONAL) |
617 | continue; | 617 | continue; |
618 | info->cpu[info->configured].address = address; | 618 | info->cpu[info->configured].address = address; |
619 | info->configured++; | 619 | info->configured++; |
620 | } | 620 | } |
621 | info->combined = info->configured; | 621 | info->combined = info->configured; |
622 | } | 622 | } |
623 | return info; | 623 | return info; |
624 | } | 624 | } |
625 | 625 | ||
626 | static int __devinit smp_add_present_cpu(int cpu); | 626 | static int __devinit smp_add_present_cpu(int cpu); |
627 | 627 | ||
628 | static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info, | 628 | static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info, |
629 | int sysfs_add) | 629 | int sysfs_add) |
630 | { | 630 | { |
631 | struct pcpu *pcpu; | 631 | struct pcpu *pcpu; |
632 | cpumask_t avail; | 632 | cpumask_t avail; |
633 | int cpu, nr, i; | 633 | int cpu, nr, i; |
634 | 634 | ||
635 | nr = 0; | 635 | nr = 0; |
636 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); | 636 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); |
637 | cpu = cpumask_first(&avail); | 637 | cpu = cpumask_first(&avail); |
638 | for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { | 638 | for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { |
639 | if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) | 639 | if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) |
640 | continue; | 640 | continue; |
641 | if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) | 641 | if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) |
642 | continue; | 642 | continue; |
643 | pcpu = pcpu_devices + cpu; | 643 | pcpu = pcpu_devices + cpu; |
644 | pcpu->address = info->cpu[i].address; | 644 | pcpu->address = info->cpu[i].address; |
645 | pcpu->state = (cpu >= info->configured) ? | 645 | pcpu->state = (cpu >= info->configured) ? |
646 | CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; | 646 | CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; |
647 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 647 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
648 | set_cpu_present(cpu, true); | 648 | set_cpu_present(cpu, true); |
649 | if (sysfs_add && smp_add_present_cpu(cpu) != 0) | 649 | if (sysfs_add && smp_add_present_cpu(cpu) != 0) |
650 | set_cpu_present(cpu, false); | 650 | set_cpu_present(cpu, false); |
651 | else | 651 | else |
652 | nr++; | 652 | nr++; |
653 | cpu = cpumask_next(cpu, &avail); | 653 | cpu = cpumask_next(cpu, &avail); |
654 | } | 654 | } |
655 | return nr; | 655 | return nr; |
656 | } | 656 | } |
657 | 657 | ||
658 | static void __init smp_detect_cpus(void) | 658 | static void __init smp_detect_cpus(void) |
659 | { | 659 | { |
660 | unsigned int cpu, c_cpus, s_cpus; | 660 | unsigned int cpu, c_cpus, s_cpus; |
661 | struct sclp_cpu_info *info; | 661 | struct sclp_cpu_info *info; |
662 | 662 | ||
663 | info = smp_get_cpu_info(); | 663 | info = smp_get_cpu_info(); |
664 | if (!info) | 664 | if (!info) |
665 | panic("smp_detect_cpus failed to allocate memory\n"); | 665 | panic("smp_detect_cpus failed to allocate memory\n"); |
666 | if (info->has_cpu_type) { | 666 | if (info->has_cpu_type) { |
667 | for (cpu = 0; cpu < info->combined; cpu++) { | 667 | for (cpu = 0; cpu < info->combined; cpu++) { |
668 | if (info->cpu[cpu].address != boot_cpu_address) | 668 | if (info->cpu[cpu].address != boot_cpu_address) |
669 | continue; | 669 | continue; |
670 | /* The boot cpu dictates the cpu type. */ | 670 | /* The boot cpu dictates the cpu type. */ |
671 | boot_cpu_type = info->cpu[cpu].type; | 671 | boot_cpu_type = info->cpu[cpu].type; |
672 | break; | 672 | break; |
673 | } | 673 | } |
674 | } | 674 | } |
675 | c_cpus = s_cpus = 0; | 675 | c_cpus = s_cpus = 0; |
676 | for (cpu = 0; cpu < info->combined; cpu++) { | 676 | for (cpu = 0; cpu < info->combined; cpu++) { |
677 | if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) | 677 | if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) |
678 | continue; | 678 | continue; |
679 | if (cpu < info->configured) { | 679 | if (cpu < info->configured) { |
680 | smp_get_save_area(c_cpus, info->cpu[cpu].address); | 680 | smp_get_save_area(c_cpus, info->cpu[cpu].address); |
681 | c_cpus++; | 681 | c_cpus++; |
682 | } else | 682 | } else |
683 | s_cpus++; | 683 | s_cpus++; |
684 | } | 684 | } |
685 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); | 685 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
686 | get_online_cpus(); | 686 | get_online_cpus(); |
687 | __smp_rescan_cpus(info, 0); | 687 | __smp_rescan_cpus(info, 0); |
688 | put_online_cpus(); | 688 | put_online_cpus(); |
689 | kfree(info); | 689 | kfree(info); |
690 | } | 690 | } |
691 | 691 | ||
692 | /* | 692 | /* |
693 | * Activate a secondary processor. | 693 | * Activate a secondary processor. |
694 | */ | 694 | */ |
695 | static void __cpuinit smp_start_secondary(void *cpuvoid) | 695 | static void __cpuinit smp_start_secondary(void *cpuvoid) |
696 | { | 696 | { |
697 | S390_lowcore.last_update_clock = get_clock(); | 697 | S390_lowcore.last_update_clock = get_clock(); |
698 | S390_lowcore.restart_stack = (unsigned long) restart_stack; | 698 | S390_lowcore.restart_stack = (unsigned long) restart_stack; |
699 | S390_lowcore.restart_fn = (unsigned long) do_restart; | 699 | S390_lowcore.restart_fn = (unsigned long) do_restart; |
700 | S390_lowcore.restart_data = 0; | 700 | S390_lowcore.restart_data = 0; |
701 | S390_lowcore.restart_source = -1UL; | 701 | S390_lowcore.restart_source = -1UL; |
702 | restore_access_regs(S390_lowcore.access_regs_save_area); | 702 | restore_access_regs(S390_lowcore.access_regs_save_area); |
703 | __ctl_load(S390_lowcore.cregs_save_area, 0, 15); | 703 | __ctl_load(S390_lowcore.cregs_save_area, 0, 15); |
704 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | 704 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); |
705 | cpu_init(); | 705 | cpu_init(); |
706 | preempt_disable(); | 706 | preempt_disable(); |
707 | init_cpu_timer(); | 707 | init_cpu_timer(); |
708 | init_cpu_vtimer(); | 708 | init_cpu_vtimer(); |
709 | pfault_init(); | 709 | pfault_init(); |
710 | notify_cpu_starting(smp_processor_id()); | 710 | notify_cpu_starting(smp_processor_id()); |
711 | set_cpu_online(smp_processor_id(), true); | 711 | set_cpu_online(smp_processor_id(), true); |
712 | local_irq_enable(); | 712 | local_irq_enable(); |
713 | /* cpu_idle will call schedule for us */ | 713 | /* cpu_idle will call schedule for us */ |
714 | cpu_idle(); | 714 | cpu_idle(); |
715 | } | 715 | } |
716 | 716 | ||
717 | /* Upping and downing of CPUs */ | 717 | /* Upping and downing of CPUs */ |
718 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) | 718 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) |
719 | { | 719 | { |
720 | struct pcpu *pcpu; | 720 | struct pcpu *pcpu; |
721 | int rc; | 721 | int rc; |
722 | 722 | ||
723 | pcpu = pcpu_devices + cpu; | 723 | pcpu = pcpu_devices + cpu; |
724 | if (pcpu->state != CPU_STATE_CONFIGURED) | 724 | if (pcpu->state != CPU_STATE_CONFIGURED) |
725 | return -EIO; | 725 | return -EIO; |
726 | if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != | 726 | if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != |
727 | SIGP_CC_ORDER_CODE_ACCEPTED) | 727 | SIGP_CC_ORDER_CODE_ACCEPTED) |
728 | return -EIO; | 728 | return -EIO; |
729 | 729 | ||
730 | rc = pcpu_alloc_lowcore(pcpu, cpu); | 730 | rc = pcpu_alloc_lowcore(pcpu, cpu); |
731 | if (rc) | 731 | if (rc) |
732 | return rc; | 732 | return rc; |
733 | pcpu_prepare_secondary(pcpu, cpu); | 733 | pcpu_prepare_secondary(pcpu, cpu); |
734 | pcpu_attach_task(pcpu, tidle); | 734 | pcpu_attach_task(pcpu, tidle); |
735 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); | 735 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); |
736 | while (!cpu_online(cpu)) | 736 | while (!cpu_online(cpu)) |
737 | cpu_relax(); | 737 | cpu_relax(); |
738 | return 0; | 738 | return 0; |
739 | } | 739 | } |
740 | 740 | ||
741 | static int __init setup_possible_cpus(char *s) | 741 | static int __init setup_possible_cpus(char *s) |
742 | { | 742 | { |
743 | int max, cpu; | 743 | int max, cpu; |
744 | 744 | ||
745 | if (kstrtoint(s, 0, &max) < 0) | 745 | if (kstrtoint(s, 0, &max) < 0) |
746 | return 0; | 746 | return 0; |
747 | init_cpu_possible(cpumask_of(0)); | 747 | init_cpu_possible(cpumask_of(0)); |
748 | for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) | 748 | for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) |
749 | set_cpu_possible(cpu, true); | 749 | set_cpu_possible(cpu, true); |
750 | return 0; | 750 | return 0; |
751 | } | 751 | } |
752 | early_param("possible_cpus", setup_possible_cpus); | 752 | early_param("possible_cpus", setup_possible_cpus); |
753 | 753 | ||
754 | #ifdef CONFIG_HOTPLUG_CPU | 754 | #ifdef CONFIG_HOTPLUG_CPU |
755 | 755 | ||
756 | int __cpu_disable(void) | 756 | int __cpu_disable(void) |
757 | { | 757 | { |
758 | unsigned long cregs[16]; | 758 | unsigned long cregs[16]; |
759 | 759 | ||
760 | set_cpu_online(smp_processor_id(), false); | 760 | set_cpu_online(smp_processor_id(), false); |
761 | /* Disable pseudo page faults on this cpu. */ | 761 | /* Disable pseudo page faults on this cpu. */ |
762 | pfault_fini(); | 762 | pfault_fini(); |
763 | /* Disable interrupt sources via control register. */ | 763 | /* Disable interrupt sources via control register. */ |
764 | __ctl_store(cregs, 0, 15); | 764 | __ctl_store(cregs, 0, 15); |
765 | cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ | 765 | cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ |
766 | cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ | 766 | cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ |
767 | cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ | 767 | cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ |
768 | __ctl_load(cregs, 0, 15); | 768 | __ctl_load(cregs, 0, 15); |
769 | return 0; | 769 | return 0; |
770 | } | 770 | } |
771 | 771 | ||
772 | void __cpu_die(unsigned int cpu) | 772 | void __cpu_die(unsigned int cpu) |
773 | { | 773 | { |
774 | struct pcpu *pcpu; | 774 | struct pcpu *pcpu; |
775 | 775 | ||
776 | /* Wait until target cpu is down */ | 776 | /* Wait until target cpu is down */ |
777 | pcpu = pcpu_devices + cpu; | 777 | pcpu = pcpu_devices + cpu; |
778 | while (!pcpu_stopped(pcpu)) | 778 | while (!pcpu_stopped(pcpu)) |
779 | cpu_relax(); | 779 | cpu_relax(); |
780 | pcpu_free_lowcore(pcpu); | 780 | pcpu_free_lowcore(pcpu); |
781 | atomic_dec(&init_mm.context.attach_count); | 781 | atomic_dec(&init_mm.context.attach_count); |
782 | } | 782 | } |
783 | 783 | ||
784 | void __noreturn cpu_die(void) | 784 | void __noreturn cpu_die(void) |
785 | { | 785 | { |
786 | idle_task_exit(); | 786 | idle_task_exit(); |
787 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); | 787 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); |
788 | for (;;) ; | 788 | for (;;) ; |
789 | } | 789 | } |
790 | 790 | ||
791 | #endif /* CONFIG_HOTPLUG_CPU */ | 791 | #endif /* CONFIG_HOTPLUG_CPU */ |
792 | 792 | ||
793 | void __init smp_prepare_cpus(unsigned int max_cpus) | 793 | void __init smp_prepare_cpus(unsigned int max_cpus) |
794 | { | 794 | { |
795 | /* request the 0x1201 emergency signal external interrupt */ | 795 | /* request the 0x1201 emergency signal external interrupt */ |
796 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 796 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
797 | panic("Couldn't request external interrupt 0x1201"); | 797 | panic("Couldn't request external interrupt 0x1201"); |
798 | /* request the 0x1202 external call external interrupt */ | 798 | /* request the 0x1202 external call external interrupt */ |
799 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | 799 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) |
800 | panic("Couldn't request external interrupt 0x1202"); | 800 | panic("Couldn't request external interrupt 0x1202"); |
801 | smp_detect_cpus(); | 801 | smp_detect_cpus(); |
802 | } | 802 | } |
803 | 803 | ||
804 | void __init smp_prepare_boot_cpu(void) | 804 | void __init smp_prepare_boot_cpu(void) |
805 | { | 805 | { |
806 | struct pcpu *pcpu = pcpu_devices; | 806 | struct pcpu *pcpu = pcpu_devices; |
807 | 807 | ||
808 | boot_cpu_address = stap(); | 808 | boot_cpu_address = stap(); |
809 | pcpu->state = CPU_STATE_CONFIGURED; | 809 | pcpu->state = CPU_STATE_CONFIGURED; |
810 | pcpu->address = boot_cpu_address; | 810 | pcpu->address = boot_cpu_address; |
811 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); | 811 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); |
812 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; | 812 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; |
813 | pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; | 813 | pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; |
814 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 814 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
815 | smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); | 815 | smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); |
816 | set_cpu_present(0, true); | 816 | set_cpu_present(0, true); |
817 | set_cpu_online(0, true); | 817 | set_cpu_online(0, true); |
818 | } | 818 | } |
819 | 819 | ||
820 | void __init smp_cpus_done(unsigned int max_cpus) | 820 | void __init smp_cpus_done(unsigned int max_cpus) |
821 | { | 821 | { |
822 | } | 822 | } |
823 | 823 | ||
824 | void __init smp_setup_processor_id(void) | 824 | void __init smp_setup_processor_id(void) |
825 | { | 825 | { |
826 | S390_lowcore.cpu_nr = 0; | 826 | S390_lowcore.cpu_nr = 0; |
827 | } | 827 | } |
828 | 828 | ||
829 | /* | 829 | /* |
830 | * the frequency of the profiling timer can be changed | 830 | * the frequency of the profiling timer can be changed |
831 | * by writing a multiplier value into /proc/profile. | 831 | * by writing a multiplier value into /proc/profile. |
832 | * | 832 | * |
833 | * usually you want to run this on all CPUs ;) | 833 | * usually you want to run this on all CPUs ;) |
834 | */ | 834 | */ |
835 | int setup_profiling_timer(unsigned int multiplier) | 835 | int setup_profiling_timer(unsigned int multiplier) |
836 | { | 836 | { |
837 | return 0; | 837 | return 0; |
838 | } | 838 | } |
839 | 839 | ||
840 | #ifdef CONFIG_HOTPLUG_CPU | 840 | #ifdef CONFIG_HOTPLUG_CPU |
841 | static ssize_t cpu_configure_show(struct device *dev, | 841 | static ssize_t cpu_configure_show(struct device *dev, |
842 | struct device_attribute *attr, char *buf) | 842 | struct device_attribute *attr, char *buf) |
843 | { | 843 | { |
844 | ssize_t count; | 844 | ssize_t count; |
845 | 845 | ||
846 | mutex_lock(&smp_cpu_state_mutex); | 846 | mutex_lock(&smp_cpu_state_mutex); |
847 | count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); | 847 | count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); |
848 | mutex_unlock(&smp_cpu_state_mutex); | 848 | mutex_unlock(&smp_cpu_state_mutex); |
849 | return count; | 849 | return count; |
850 | } | 850 | } |
851 | 851 | ||
852 | static ssize_t cpu_configure_store(struct device *dev, | 852 | static ssize_t cpu_configure_store(struct device *dev, |
853 | struct device_attribute *attr, | 853 | struct device_attribute *attr, |
854 | const char *buf, size_t count) | 854 | const char *buf, size_t count) |
855 | { | 855 | { |
856 | struct pcpu *pcpu; | 856 | struct pcpu *pcpu; |
857 | int cpu, val, rc; | 857 | int cpu, val, rc; |
858 | char delim; | 858 | char delim; |
859 | 859 | ||
860 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 860 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
861 | return -EINVAL; | 861 | return -EINVAL; |
862 | if (val != 0 && val != 1) | 862 | if (val != 0 && val != 1) |
863 | return -EINVAL; | 863 | return -EINVAL; |
864 | get_online_cpus(); | 864 | get_online_cpus(); |
865 | mutex_lock(&smp_cpu_state_mutex); | 865 | mutex_lock(&smp_cpu_state_mutex); |
866 | rc = -EBUSY; | 866 | rc = -EBUSY; |
867 | /* disallow configuration changes of online cpus and cpu 0 */ | 867 | /* disallow configuration changes of online cpus and cpu 0 */ |
868 | cpu = dev->id; | 868 | cpu = dev->id; |
869 | if (cpu_online(cpu) || cpu == 0) | 869 | if (cpu_online(cpu) || cpu == 0) |
870 | goto out; | 870 | goto out; |
871 | pcpu = pcpu_devices + cpu; | 871 | pcpu = pcpu_devices + cpu; |
872 | rc = 0; | 872 | rc = 0; |
873 | switch (val) { | 873 | switch (val) { |
874 | case 0: | 874 | case 0: |
875 | if (pcpu->state != CPU_STATE_CONFIGURED) | 875 | if (pcpu->state != CPU_STATE_CONFIGURED) |
876 | break; | 876 | break; |
877 | rc = sclp_cpu_deconfigure(pcpu->address); | 877 | rc = sclp_cpu_deconfigure(pcpu->address); |
878 | if (rc) | 878 | if (rc) |
879 | break; | 879 | break; |
880 | pcpu->state = CPU_STATE_STANDBY; | 880 | pcpu->state = CPU_STATE_STANDBY; |
881 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 881 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
882 | topology_expect_change(); | 882 | topology_expect_change(); |
883 | break; | 883 | break; |
884 | case 1: | 884 | case 1: |
885 | if (pcpu->state != CPU_STATE_STANDBY) | 885 | if (pcpu->state != CPU_STATE_STANDBY) |
886 | break; | 886 | break; |
887 | rc = sclp_cpu_configure(pcpu->address); | 887 | rc = sclp_cpu_configure(pcpu->address); |
888 | if (rc) | 888 | if (rc) |
889 | break; | 889 | break; |
890 | pcpu->state = CPU_STATE_CONFIGURED; | 890 | pcpu->state = CPU_STATE_CONFIGURED; |
891 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 891 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
892 | topology_expect_change(); | 892 | topology_expect_change(); |
893 | break; | 893 | break; |
894 | default: | 894 | default: |
895 | break; | 895 | break; |
896 | } | 896 | } |
897 | out: | 897 | out: |
898 | mutex_unlock(&smp_cpu_state_mutex); | 898 | mutex_unlock(&smp_cpu_state_mutex); |
899 | put_online_cpus(); | 899 | put_online_cpus(); |
900 | return rc ? rc : count; | 900 | return rc ? rc : count; |
901 | } | 901 | } |
902 | static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | 902 | static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
903 | #endif /* CONFIG_HOTPLUG_CPU */ | 903 | #endif /* CONFIG_HOTPLUG_CPU */ |
904 | 904 | ||
905 | static ssize_t show_cpu_address(struct device *dev, | 905 | static ssize_t show_cpu_address(struct device *dev, |
906 | struct device_attribute *attr, char *buf) | 906 | struct device_attribute *attr, char *buf) |
907 | { | 907 | { |
908 | return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); | 908 | return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); |
909 | } | 909 | } |
910 | static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); | 910 | static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); |
911 | 911 | ||
912 | static struct attribute *cpu_common_attrs[] = { | 912 | static struct attribute *cpu_common_attrs[] = { |
913 | #ifdef CONFIG_HOTPLUG_CPU | 913 | #ifdef CONFIG_HOTPLUG_CPU |
914 | &dev_attr_configure.attr, | 914 | &dev_attr_configure.attr, |
915 | #endif | 915 | #endif |
916 | &dev_attr_address.attr, | 916 | &dev_attr_address.attr, |
917 | NULL, | 917 | NULL, |
918 | }; | 918 | }; |
919 | 919 | ||
920 | static struct attribute_group cpu_common_attr_group = { | 920 | static struct attribute_group cpu_common_attr_group = { |
921 | .attrs = cpu_common_attrs, | 921 | .attrs = cpu_common_attrs, |
922 | }; | 922 | }; |
923 | 923 | ||
924 | static ssize_t show_idle_count(struct device *dev, | 924 | static ssize_t show_idle_count(struct device *dev, |
925 | struct device_attribute *attr, char *buf) | 925 | struct device_attribute *attr, char *buf) |
926 | { | 926 | { |
927 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); | 927 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); |
928 | unsigned long long idle_count; | 928 | unsigned long long idle_count; |
929 | unsigned int sequence; | 929 | unsigned int sequence; |
930 | 930 | ||
931 | do { | 931 | do { |
932 | sequence = ACCESS_ONCE(idle->sequence); | 932 | sequence = ACCESS_ONCE(idle->sequence); |
933 | idle_count = ACCESS_ONCE(idle->idle_count); | 933 | idle_count = ACCESS_ONCE(idle->idle_count); |
934 | if (ACCESS_ONCE(idle->clock_idle_enter)) | 934 | if (ACCESS_ONCE(idle->clock_idle_enter)) |
935 | idle_count++; | 935 | idle_count++; |
936 | } while ((sequence & 1) || (idle->sequence != sequence)); | 936 | } while ((sequence & 1) || (idle->sequence != sequence)); |
937 | return sprintf(buf, "%llu\n", idle_count); | 937 | return sprintf(buf, "%llu\n", idle_count); |
938 | } | 938 | } |
939 | static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); | 939 | static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); |
940 | 940 | ||
941 | static ssize_t show_idle_time(struct device *dev, | 941 | static ssize_t show_idle_time(struct device *dev, |
942 | struct device_attribute *attr, char *buf) | 942 | struct device_attribute *attr, char *buf) |
943 | { | 943 | { |
944 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); | 944 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); |
945 | unsigned long long now, idle_time, idle_enter, idle_exit; | 945 | unsigned long long now, idle_time, idle_enter, idle_exit; |
946 | unsigned int sequence; | 946 | unsigned int sequence; |
947 | 947 | ||
948 | do { | 948 | do { |
949 | now = get_clock(); | 949 | now = get_clock(); |
950 | sequence = ACCESS_ONCE(idle->sequence); | 950 | sequence = ACCESS_ONCE(idle->sequence); |
951 | idle_time = ACCESS_ONCE(idle->idle_time); | 951 | idle_time = ACCESS_ONCE(idle->idle_time); |
952 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); | 952 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); |
953 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); | 953 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); |
954 | } while ((sequence & 1) || (idle->sequence != sequence)); | 954 | } while ((sequence & 1) || (idle->sequence != sequence)); |
955 | idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; | 955 | idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; |
956 | return sprintf(buf, "%llu\n", idle_time >> 12); | 956 | return sprintf(buf, "%llu\n", idle_time >> 12); |
957 | } | 957 | } |
958 | static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); | 958 | static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); |
959 | 959 | ||
960 | static struct attribute *cpu_online_attrs[] = { | 960 | static struct attribute *cpu_online_attrs[] = { |
961 | &dev_attr_idle_count.attr, | 961 | &dev_attr_idle_count.attr, |
962 | &dev_attr_idle_time_us.attr, | 962 | &dev_attr_idle_time_us.attr, |
963 | NULL, | 963 | NULL, |
964 | }; | 964 | }; |
965 | 965 | ||
966 | static struct attribute_group cpu_online_attr_group = { | 966 | static struct attribute_group cpu_online_attr_group = { |
967 | .attrs = cpu_online_attrs, | 967 | .attrs = cpu_online_attrs, |
968 | }; | 968 | }; |
969 | 969 | ||
970 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, | 970 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, |
971 | unsigned long action, void *hcpu) | 971 | unsigned long action, void *hcpu) |
972 | { | 972 | { |
973 | unsigned int cpu = (unsigned int)(long)hcpu; | 973 | unsigned int cpu = (unsigned int)(long)hcpu; |
974 | struct cpu *c = &pcpu_devices[cpu].cpu; | 974 | struct cpu *c = &pcpu_devices[cpu].cpu; |
975 | struct device *s = &c->dev; | 975 | struct device *s = &c->dev; |
976 | int err = 0; | 976 | int err = 0; |
977 | 977 | ||
978 | switch (action & ~CPU_TASKS_FROZEN) { | 978 | switch (action & ~CPU_TASKS_FROZEN) { |
979 | case CPU_ONLINE: | 979 | case CPU_ONLINE: |
980 | err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | 980 | err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
981 | break; | 981 | break; |
982 | case CPU_DEAD: | 982 | case CPU_DEAD: |
983 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); | 983 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); |
984 | break; | 984 | break; |
985 | } | 985 | } |
986 | return notifier_from_errno(err); | 986 | return notifier_from_errno(err); |
987 | } | 987 | } |
988 | 988 | ||
989 | static int __devinit smp_add_present_cpu(int cpu) | 989 | static int __devinit smp_add_present_cpu(int cpu) |
990 | { | 990 | { |
991 | struct cpu *c = &pcpu_devices[cpu].cpu; | 991 | struct cpu *c = &pcpu_devices[cpu].cpu; |
992 | struct device *s = &c->dev; | 992 | struct device *s = &c->dev; |
993 | int rc; | 993 | int rc; |
994 | 994 | ||
995 | c->hotpluggable = 1; | 995 | c->hotpluggable = 1; |
996 | rc = register_cpu(c, cpu); | 996 | rc = register_cpu(c, cpu); |
997 | if (rc) | 997 | if (rc) |
998 | goto out; | 998 | goto out; |
999 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); | 999 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); |
1000 | if (rc) | 1000 | if (rc) |
1001 | goto out_cpu; | 1001 | goto out_cpu; |
1002 | if (cpu_online(cpu)) { | 1002 | if (cpu_online(cpu)) { |
1003 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | 1003 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
1004 | if (rc) | 1004 | if (rc) |
1005 | goto out_online; | 1005 | goto out_online; |
1006 | } | 1006 | } |
1007 | rc = topology_cpu_init(c); | 1007 | rc = topology_cpu_init(c); |
1008 | if (rc) | 1008 | if (rc) |
1009 | goto out_topology; | 1009 | goto out_topology; |
1010 | return 0; | 1010 | return 0; |
1011 | 1011 | ||
1012 | out_topology: | 1012 | out_topology: |
1013 | if (cpu_online(cpu)) | 1013 | if (cpu_online(cpu)) |
1014 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); | 1014 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); |
1015 | out_online: | 1015 | out_online: |
1016 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); | 1016 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); |
1017 | out_cpu: | 1017 | out_cpu: |
1018 | #ifdef CONFIG_HOTPLUG_CPU | 1018 | #ifdef CONFIG_HOTPLUG_CPU |
1019 | unregister_cpu(c); | 1019 | unregister_cpu(c); |
1020 | #endif | 1020 | #endif |
1021 | out: | 1021 | out: |
1022 | return rc; | 1022 | return rc; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | #ifdef CONFIG_HOTPLUG_CPU | 1025 | #ifdef CONFIG_HOTPLUG_CPU |
1026 | 1026 | ||
1027 | int __ref smp_rescan_cpus(void) | 1027 | int __ref smp_rescan_cpus(void) |
1028 | { | 1028 | { |
1029 | struct sclp_cpu_info *info; | 1029 | struct sclp_cpu_info *info; |
1030 | int nr; | 1030 | int nr; |
1031 | 1031 | ||
1032 | info = smp_get_cpu_info(); | 1032 | info = smp_get_cpu_info(); |
1033 | if (!info) | 1033 | if (!info) |
1034 | return -ENOMEM; | 1034 | return -ENOMEM; |
1035 | get_online_cpus(); | 1035 | get_online_cpus(); |
1036 | mutex_lock(&smp_cpu_state_mutex); | 1036 | mutex_lock(&smp_cpu_state_mutex); |
1037 | nr = __smp_rescan_cpus(info, 1); | 1037 | nr = __smp_rescan_cpus(info, 1); |
1038 | mutex_unlock(&smp_cpu_state_mutex); | 1038 | mutex_unlock(&smp_cpu_state_mutex); |
1039 | put_online_cpus(); | 1039 | put_online_cpus(); |
1040 | kfree(info); | 1040 | kfree(info); |
1041 | if (nr) | 1041 | if (nr) |
1042 | topology_schedule_update(); | 1042 | topology_schedule_update(); |
1043 | return 0; | 1043 | return 0; |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | static ssize_t __ref rescan_store(struct device *dev, | 1046 | static ssize_t __ref rescan_store(struct device *dev, |
1047 | struct device_attribute *attr, | 1047 | struct device_attribute *attr, |
1048 | const char *buf, | 1048 | const char *buf, |
1049 | size_t count) | 1049 | size_t count) |
1050 | { | 1050 | { |
1051 | int rc; | 1051 | int rc; |
1052 | 1052 | ||
1053 | rc = smp_rescan_cpus(); | 1053 | rc = smp_rescan_cpus(); |
1054 | return rc ? rc : count; | 1054 | return rc ? rc : count; |
1055 | } | 1055 | } |
1056 | static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); | 1056 | static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); |
1057 | #endif /* CONFIG_HOTPLUG_CPU */ | 1057 | #endif /* CONFIG_HOTPLUG_CPU */ |
1058 | 1058 | ||
1059 | static int __init s390_smp_init(void) | 1059 | static int __init s390_smp_init(void) |
1060 | { | 1060 | { |
1061 | int cpu, rc; | 1061 | int cpu, rc; |
1062 | 1062 | ||
1063 | hotcpu_notifier(smp_cpu_notify, 0); | 1063 | hotcpu_notifier(smp_cpu_notify, 0); |
1064 | #ifdef CONFIG_HOTPLUG_CPU | 1064 | #ifdef CONFIG_HOTPLUG_CPU |
1065 | rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); | 1065 | rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); |
1066 | if (rc) | 1066 | if (rc) |
1067 | return rc; | 1067 | return rc; |
1068 | #endif | 1068 | #endif |
1069 | for_each_present_cpu(cpu) { | 1069 | for_each_present_cpu(cpu) { |
1070 | rc = smp_add_present_cpu(cpu); | 1070 | rc = smp_add_present_cpu(cpu); |
1071 | if (rc) | 1071 | if (rc) |
1072 | return rc; | 1072 | return rc; |
1073 | } | 1073 | } |
1074 | return 0; | 1074 | return 0; |
1075 | } | 1075 | } |
1076 | subsys_initcall(s390_smp_init); | 1076 | subsys_initcall(s390_smp_init); |
1077 | 1077 |
arch/s390/mm/pgtable.c
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007, 2011 | 2 | * Copyright IBM Corp. 2007, 2011 |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/gfp.h> | 9 | #include <linux/gfp.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | 11 | #include <linux/swap.h> |
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/highmem.h> | 13 | #include <linux/highmem.h> |
14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/quicklist.h> | 17 | #include <linux/quicklist.h> |
18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
21 | #include <asm/pgtable.h> | 21 | #include <asm/pgtable.h> |
22 | #include <asm/pgalloc.h> | 22 | #include <asm/pgalloc.h> |
23 | #include <asm/tlb.h> | 23 | #include <asm/tlb.h> |
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
26 | 26 | ||
27 | #ifndef CONFIG_64BIT | 27 | #ifndef CONFIG_64BIT |
28 | #define ALLOC_ORDER 1 | 28 | #define ALLOC_ORDER 1 |
29 | #define FRAG_MASK 0x0f | 29 | #define FRAG_MASK 0x0f |
30 | #else | 30 | #else |
31 | #define ALLOC_ORDER 2 | 31 | #define ALLOC_ORDER 2 |
32 | #define FRAG_MASK 0x03 | 32 | #define FRAG_MASK 0x03 |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | 35 | ||
36 | unsigned long *crst_table_alloc(struct mm_struct *mm) | 36 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
37 | { | 37 | { |
38 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | 38 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
39 | 39 | ||
40 | if (!page) | 40 | if (!page) |
41 | return NULL; | 41 | return NULL; |
42 | return (unsigned long *) page_to_phys(page); | 42 | return (unsigned long *) page_to_phys(page); |
43 | } | 43 | } |
44 | 44 | ||
45 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | 45 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
46 | { | 46 | { |
47 | free_pages((unsigned long) table, ALLOC_ORDER); | 47 | free_pages((unsigned long) table, ALLOC_ORDER); |
48 | } | 48 | } |
49 | 49 | ||
50 | #ifdef CONFIG_64BIT | 50 | #ifdef CONFIG_64BIT |
51 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | 51 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) |
52 | { | 52 | { |
53 | unsigned long *table, *pgd; | 53 | unsigned long *table, *pgd; |
54 | unsigned long entry; | 54 | unsigned long entry; |
55 | 55 | ||
56 | BUG_ON(limit > (1UL << 53)); | 56 | BUG_ON(limit > (1UL << 53)); |
57 | repeat: | 57 | repeat: |
58 | table = crst_table_alloc(mm); | 58 | table = crst_table_alloc(mm); |
59 | if (!table) | 59 | if (!table) |
60 | return -ENOMEM; | 60 | return -ENOMEM; |
61 | spin_lock_bh(&mm->page_table_lock); | 61 | spin_lock_bh(&mm->page_table_lock); |
62 | if (mm->context.asce_limit < limit) { | 62 | if (mm->context.asce_limit < limit) { |
63 | pgd = (unsigned long *) mm->pgd; | 63 | pgd = (unsigned long *) mm->pgd; |
64 | if (mm->context.asce_limit <= (1UL << 31)) { | 64 | if (mm->context.asce_limit <= (1UL << 31)) { |
65 | entry = _REGION3_ENTRY_EMPTY; | 65 | entry = _REGION3_ENTRY_EMPTY; |
66 | mm->context.asce_limit = 1UL << 42; | 66 | mm->context.asce_limit = 1UL << 42; |
67 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 67 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
68 | _ASCE_USER_BITS | | 68 | _ASCE_USER_BITS | |
69 | _ASCE_TYPE_REGION3; | 69 | _ASCE_TYPE_REGION3; |
70 | } else { | 70 | } else { |
71 | entry = _REGION2_ENTRY_EMPTY; | 71 | entry = _REGION2_ENTRY_EMPTY; |
72 | mm->context.asce_limit = 1UL << 53; | 72 | mm->context.asce_limit = 1UL << 53; |
73 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 73 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
74 | _ASCE_USER_BITS | | 74 | _ASCE_USER_BITS | |
75 | _ASCE_TYPE_REGION2; | 75 | _ASCE_TYPE_REGION2; |
76 | } | 76 | } |
77 | crst_table_init(table, entry); | 77 | crst_table_init(table, entry); |
78 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | 78 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); |
79 | mm->pgd = (pgd_t *) table; | 79 | mm->pgd = (pgd_t *) table; |
80 | mm->task_size = mm->context.asce_limit; | 80 | mm->task_size = mm->context.asce_limit; |
81 | table = NULL; | 81 | table = NULL; |
82 | } | 82 | } |
83 | spin_unlock_bh(&mm->page_table_lock); | 83 | spin_unlock_bh(&mm->page_table_lock); |
84 | if (table) | 84 | if (table) |
85 | crst_table_free(mm, table); | 85 | crst_table_free(mm, table); |
86 | if (mm->context.asce_limit < limit) | 86 | if (mm->context.asce_limit < limit) |
87 | goto repeat; | 87 | goto repeat; |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
90 | 90 | ||
91 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | 91 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) |
92 | { | 92 | { |
93 | pgd_t *pgd; | 93 | pgd_t *pgd; |
94 | 94 | ||
95 | while (mm->context.asce_limit > limit) { | 95 | while (mm->context.asce_limit > limit) { |
96 | pgd = mm->pgd; | 96 | pgd = mm->pgd; |
97 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | 97 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { |
98 | case _REGION_ENTRY_TYPE_R2: | 98 | case _REGION_ENTRY_TYPE_R2: |
99 | mm->context.asce_limit = 1UL << 42; | 99 | mm->context.asce_limit = 1UL << 42; |
100 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 100 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
101 | _ASCE_USER_BITS | | 101 | _ASCE_USER_BITS | |
102 | _ASCE_TYPE_REGION3; | 102 | _ASCE_TYPE_REGION3; |
103 | break; | 103 | break; |
104 | case _REGION_ENTRY_TYPE_R3: | 104 | case _REGION_ENTRY_TYPE_R3: |
105 | mm->context.asce_limit = 1UL << 31; | 105 | mm->context.asce_limit = 1UL << 31; |
106 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 106 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
107 | _ASCE_USER_BITS | | 107 | _ASCE_USER_BITS | |
108 | _ASCE_TYPE_SEGMENT; | 108 | _ASCE_TYPE_SEGMENT; |
109 | break; | 109 | break; |
110 | default: | 110 | default: |
111 | BUG(); | 111 | BUG(); |
112 | } | 112 | } |
113 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | 113 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
114 | mm->task_size = mm->context.asce_limit; | 114 | mm->task_size = mm->context.asce_limit; |
115 | crst_table_free(mm, (unsigned long *) pgd); | 115 | crst_table_free(mm, (unsigned long *) pgd); |
116 | } | 116 | } |
117 | } | 117 | } |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #ifdef CONFIG_PGSTE | 120 | #ifdef CONFIG_PGSTE |
121 | 121 | ||
122 | /** | 122 | /** |
123 | * gmap_alloc - allocate a guest address space | 123 | * gmap_alloc - allocate a guest address space |
124 | * @mm: pointer to the parent mm_struct | 124 | * @mm: pointer to the parent mm_struct |
125 | * | 125 | * |
126 | * Returns a guest address space structure. | 126 | * Returns a guest address space structure. |
127 | */ | 127 | */ |
128 | struct gmap *gmap_alloc(struct mm_struct *mm) | 128 | struct gmap *gmap_alloc(struct mm_struct *mm) |
129 | { | 129 | { |
130 | struct gmap *gmap; | 130 | struct gmap *gmap; |
131 | struct page *page; | 131 | struct page *page; |
132 | unsigned long *table; | 132 | unsigned long *table; |
133 | 133 | ||
134 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); | 134 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
135 | if (!gmap) | 135 | if (!gmap) |
136 | goto out; | 136 | goto out; |
137 | INIT_LIST_HEAD(&gmap->crst_list); | 137 | INIT_LIST_HEAD(&gmap->crst_list); |
138 | gmap->mm = mm; | 138 | gmap->mm = mm; |
139 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | 139 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
140 | if (!page) | 140 | if (!page) |
141 | goto out_free; | 141 | goto out_free; |
142 | list_add(&page->lru, &gmap->crst_list); | 142 | list_add(&page->lru, &gmap->crst_list); |
143 | table = (unsigned long *) page_to_phys(page); | 143 | table = (unsigned long *) page_to_phys(page); |
144 | crst_table_init(table, _REGION1_ENTRY_EMPTY); | 144 | crst_table_init(table, _REGION1_ENTRY_EMPTY); |
145 | gmap->table = table; | 145 | gmap->table = table; |
146 | gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | | 146 | gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | |
147 | _ASCE_USER_BITS | __pa(table); | 147 | _ASCE_USER_BITS | __pa(table); |
148 | list_add(&gmap->list, &mm->context.gmap_list); | 148 | list_add(&gmap->list, &mm->context.gmap_list); |
149 | return gmap; | 149 | return gmap; |
150 | 150 | ||
151 | out_free: | 151 | out_free: |
152 | kfree(gmap); | 152 | kfree(gmap); |
153 | out: | 153 | out: |
154 | return NULL; | 154 | return NULL; |
155 | } | 155 | } |
156 | EXPORT_SYMBOL_GPL(gmap_alloc); | 156 | EXPORT_SYMBOL_GPL(gmap_alloc); |
157 | 157 | ||
158 | static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) | 158 | static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) |
159 | { | 159 | { |
160 | struct gmap_pgtable *mp; | 160 | struct gmap_pgtable *mp; |
161 | struct gmap_rmap *rmap; | 161 | struct gmap_rmap *rmap; |
162 | struct page *page; | 162 | struct page *page; |
163 | 163 | ||
164 | if (*table & _SEGMENT_ENTRY_INV) | 164 | if (*table & _SEGMENT_ENTRY_INV) |
165 | return 0; | 165 | return 0; |
166 | page = pfn_to_page(*table >> PAGE_SHIFT); | 166 | page = pfn_to_page(*table >> PAGE_SHIFT); |
167 | mp = (struct gmap_pgtable *) page->index; | 167 | mp = (struct gmap_pgtable *) page->index; |
168 | list_for_each_entry(rmap, &mp->mapper, list) { | 168 | list_for_each_entry(rmap, &mp->mapper, list) { |
169 | if (rmap->entry != table) | 169 | if (rmap->entry != table) |
170 | continue; | 170 | continue; |
171 | list_del(&rmap->list); | 171 | list_del(&rmap->list); |
172 | kfree(rmap); | 172 | kfree(rmap); |
173 | break; | 173 | break; |
174 | } | 174 | } |
175 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | 175 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; |
176 | return 1; | 176 | return 1; |
177 | } | 177 | } |
178 | 178 | ||
179 | static void gmap_flush_tlb(struct gmap *gmap) | 179 | static void gmap_flush_tlb(struct gmap *gmap) |
180 | { | 180 | { |
181 | if (MACHINE_HAS_IDTE) | 181 | if (MACHINE_HAS_IDTE) |
182 | __tlb_flush_idte((unsigned long) gmap->table | | 182 | __tlb_flush_idte((unsigned long) gmap->table | |
183 | _ASCE_TYPE_REGION1); | 183 | _ASCE_TYPE_REGION1); |
184 | else | 184 | else |
185 | __tlb_flush_global(); | 185 | __tlb_flush_global(); |
186 | } | 186 | } |
187 | 187 | ||
188 | /** | 188 | /** |
189 | * gmap_free - free a guest address space | 189 | * gmap_free - free a guest address space |
190 | * @gmap: pointer to the guest address space structure | 190 | * @gmap: pointer to the guest address space structure |
191 | */ | 191 | */ |
192 | void gmap_free(struct gmap *gmap) | 192 | void gmap_free(struct gmap *gmap) |
193 | { | 193 | { |
194 | struct page *page, *next; | 194 | struct page *page, *next; |
195 | unsigned long *table; | 195 | unsigned long *table; |
196 | int i; | 196 | int i; |
197 | 197 | ||
198 | 198 | ||
199 | /* Flush tlb. */ | 199 | /* Flush tlb. */ |
200 | if (MACHINE_HAS_IDTE) | 200 | if (MACHINE_HAS_IDTE) |
201 | __tlb_flush_idte((unsigned long) gmap->table | | 201 | __tlb_flush_idte((unsigned long) gmap->table | |
202 | _ASCE_TYPE_REGION1); | 202 | _ASCE_TYPE_REGION1); |
203 | else | 203 | else |
204 | __tlb_flush_global(); | 204 | __tlb_flush_global(); |
205 | 205 | ||
206 | /* Free all segment & region tables. */ | 206 | /* Free all segment & region tables. */ |
207 | down_read(&gmap->mm->mmap_sem); | 207 | down_read(&gmap->mm->mmap_sem); |
208 | spin_lock(&gmap->mm->page_table_lock); | 208 | spin_lock(&gmap->mm->page_table_lock); |
209 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { | 209 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { |
210 | table = (unsigned long *) page_to_phys(page); | 210 | table = (unsigned long *) page_to_phys(page); |
211 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) | 211 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) |
212 | /* Remove gmap rmap structures for segment table. */ | 212 | /* Remove gmap rmap structures for segment table. */ |
213 | for (i = 0; i < PTRS_PER_PMD; i++, table++) | 213 | for (i = 0; i < PTRS_PER_PMD; i++, table++) |
214 | gmap_unlink_segment(gmap, table); | 214 | gmap_unlink_segment(gmap, table); |
215 | __free_pages(page, ALLOC_ORDER); | 215 | __free_pages(page, ALLOC_ORDER); |
216 | } | 216 | } |
217 | spin_unlock(&gmap->mm->page_table_lock); | 217 | spin_unlock(&gmap->mm->page_table_lock); |
218 | up_read(&gmap->mm->mmap_sem); | 218 | up_read(&gmap->mm->mmap_sem); |
219 | list_del(&gmap->list); | 219 | list_del(&gmap->list); |
220 | kfree(gmap); | 220 | kfree(gmap); |
221 | } | 221 | } |
222 | EXPORT_SYMBOL_GPL(gmap_free); | 222 | EXPORT_SYMBOL_GPL(gmap_free); |
223 | 223 | ||
224 | /** | 224 | /** |
225 | * gmap_enable - switch primary space to the guest address space | 225 | * gmap_enable - switch primary space to the guest address space |
226 | * @gmap: pointer to the guest address space structure | 226 | * @gmap: pointer to the guest address space structure |
227 | */ | 227 | */ |
228 | void gmap_enable(struct gmap *gmap) | 228 | void gmap_enable(struct gmap *gmap) |
229 | { | 229 | { |
230 | S390_lowcore.gmap = (unsigned long) gmap; | 230 | S390_lowcore.gmap = (unsigned long) gmap; |
231 | } | 231 | } |
232 | EXPORT_SYMBOL_GPL(gmap_enable); | 232 | EXPORT_SYMBOL_GPL(gmap_enable); |
233 | 233 | ||
234 | /** | 234 | /** |
235 | * gmap_disable - switch back to the standard primary address space | 235 | * gmap_disable - switch back to the standard primary address space |
236 | * @gmap: pointer to the guest address space structure | 236 | * @gmap: pointer to the guest address space structure |
237 | */ | 237 | */ |
238 | void gmap_disable(struct gmap *gmap) | 238 | void gmap_disable(struct gmap *gmap) |
239 | { | 239 | { |
240 | S390_lowcore.gmap = 0UL; | 240 | S390_lowcore.gmap = 0UL; |
241 | } | 241 | } |
242 | EXPORT_SYMBOL_GPL(gmap_disable); | 242 | EXPORT_SYMBOL_GPL(gmap_disable); |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * gmap_alloc_table is assumed to be called with mmap_sem held | 245 | * gmap_alloc_table is assumed to be called with mmap_sem held |
246 | */ | 246 | */ |
247 | static int gmap_alloc_table(struct gmap *gmap, | 247 | static int gmap_alloc_table(struct gmap *gmap, |
248 | unsigned long *table, unsigned long init) | 248 | unsigned long *table, unsigned long init) |
249 | { | 249 | { |
250 | struct page *page; | 250 | struct page *page; |
251 | unsigned long *new; | 251 | unsigned long *new; |
252 | 252 | ||
253 | /* since we dont free the gmap table until gmap_free we can unlock */ | 253 | /* since we dont free the gmap table until gmap_free we can unlock */ |
254 | spin_unlock(&gmap->mm->page_table_lock); | 254 | spin_unlock(&gmap->mm->page_table_lock); |
255 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | 255 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
256 | spin_lock(&gmap->mm->page_table_lock); | 256 | spin_lock(&gmap->mm->page_table_lock); |
257 | if (!page) | 257 | if (!page) |
258 | return -ENOMEM; | 258 | return -ENOMEM; |
259 | new = (unsigned long *) page_to_phys(page); | 259 | new = (unsigned long *) page_to_phys(page); |
260 | crst_table_init(new, init); | 260 | crst_table_init(new, init); |
261 | if (*table & _REGION_ENTRY_INV) { | 261 | if (*table & _REGION_ENTRY_INV) { |
262 | list_add(&page->lru, &gmap->crst_list); | 262 | list_add(&page->lru, &gmap->crst_list); |
263 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | 263 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
264 | (*table & _REGION_ENTRY_TYPE_MASK); | 264 | (*table & _REGION_ENTRY_TYPE_MASK); |
265 | } else | 265 | } else |
266 | __free_pages(page, ALLOC_ORDER); | 266 | __free_pages(page, ALLOC_ORDER); |
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | /** | 270 | /** |
271 | * gmap_unmap_segment - unmap segment from the guest address space | 271 | * gmap_unmap_segment - unmap segment from the guest address space |
272 | * @gmap: pointer to the guest address space structure | 272 | * @gmap: pointer to the guest address space structure |
273 | * @addr: address in the guest address space | 273 | * @addr: address in the guest address space |
274 | * @len: length of the memory area to unmap | 274 | * @len: length of the memory area to unmap |
275 | * | 275 | * |
276 | * Returns 0 if the unmap succeded, -EINVAL if not. | 276 | * Returns 0 if the unmap succeded, -EINVAL if not. |
277 | */ | 277 | */ |
278 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | 278 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) |
279 | { | 279 | { |
280 | unsigned long *table; | 280 | unsigned long *table; |
281 | unsigned long off; | 281 | unsigned long off; |
282 | int flush; | 282 | int flush; |
283 | 283 | ||
284 | if ((to | len) & (PMD_SIZE - 1)) | 284 | if ((to | len) & (PMD_SIZE - 1)) |
285 | return -EINVAL; | 285 | return -EINVAL; |
286 | if (len == 0 || to + len < to) | 286 | if (len == 0 || to + len < to) |
287 | return -EINVAL; | 287 | return -EINVAL; |
288 | 288 | ||
289 | flush = 0; | 289 | flush = 0; |
290 | down_read(&gmap->mm->mmap_sem); | 290 | down_read(&gmap->mm->mmap_sem); |
291 | spin_lock(&gmap->mm->page_table_lock); | 291 | spin_lock(&gmap->mm->page_table_lock); |
292 | for (off = 0; off < len; off += PMD_SIZE) { | 292 | for (off = 0; off < len; off += PMD_SIZE) { |
293 | /* Walk the guest addr space page table */ | 293 | /* Walk the guest addr space page table */ |
294 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 294 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
295 | if (*table & _REGION_ENTRY_INV) | 295 | if (*table & _REGION_ENTRY_INV) |
296 | goto out; | 296 | goto out; |
297 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 297 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
298 | table = table + (((to + off) >> 42) & 0x7ff); | 298 | table = table + (((to + off) >> 42) & 0x7ff); |
299 | if (*table & _REGION_ENTRY_INV) | 299 | if (*table & _REGION_ENTRY_INV) |
300 | goto out; | 300 | goto out; |
301 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 301 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
302 | table = table + (((to + off) >> 31) & 0x7ff); | 302 | table = table + (((to + off) >> 31) & 0x7ff); |
303 | if (*table & _REGION_ENTRY_INV) | 303 | if (*table & _REGION_ENTRY_INV) |
304 | goto out; | 304 | goto out; |
305 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 305 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
306 | table = table + (((to + off) >> 20) & 0x7ff); | 306 | table = table + (((to + off) >> 20) & 0x7ff); |
307 | 307 | ||
308 | /* Clear segment table entry in guest address space. */ | 308 | /* Clear segment table entry in guest address space. */ |
309 | flush |= gmap_unlink_segment(gmap, table); | 309 | flush |= gmap_unlink_segment(gmap, table); |
310 | *table = _SEGMENT_ENTRY_INV; | 310 | *table = _SEGMENT_ENTRY_INV; |
311 | } | 311 | } |
312 | out: | 312 | out: |
313 | spin_unlock(&gmap->mm->page_table_lock); | 313 | spin_unlock(&gmap->mm->page_table_lock); |
314 | up_read(&gmap->mm->mmap_sem); | 314 | up_read(&gmap->mm->mmap_sem); |
315 | if (flush) | 315 | if (flush) |
316 | gmap_flush_tlb(gmap); | 316 | gmap_flush_tlb(gmap); |
317 | return 0; | 317 | return 0; |
318 | } | 318 | } |
319 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); | 319 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); |
320 | 320 | ||
321 | /** | 321 | /** |
322 | * gmap_mmap_segment - map a segment to the guest address space | 322 | * gmap_mmap_segment - map a segment to the guest address space |
323 | * @gmap: pointer to the guest address space structure | 323 | * @gmap: pointer to the guest address space structure |
324 | * @from: source address in the parent address space | 324 | * @from: source address in the parent address space |
325 | * @to: target address in the guest address space | 325 | * @to: target address in the guest address space |
326 | * | 326 | * |
327 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. | 327 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. |
328 | */ | 328 | */ |
329 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | 329 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
330 | unsigned long to, unsigned long len) | 330 | unsigned long to, unsigned long len) |
331 | { | 331 | { |
332 | unsigned long *table; | 332 | unsigned long *table; |
333 | unsigned long off; | 333 | unsigned long off; |
334 | int flush; | 334 | int flush; |
335 | 335 | ||
336 | if ((from | to | len) & (PMD_SIZE - 1)) | 336 | if ((from | to | len) & (PMD_SIZE - 1)) |
337 | return -EINVAL; | 337 | return -EINVAL; |
338 | if (len == 0 || from + len > PGDIR_SIZE || | 338 | if (len == 0 || from + len > PGDIR_SIZE || |
339 | from + len < from || to + len < to) | 339 | from + len < from || to + len < to) |
340 | return -EINVAL; | 340 | return -EINVAL; |
341 | 341 | ||
342 | flush = 0; | 342 | flush = 0; |
343 | down_read(&gmap->mm->mmap_sem); | 343 | down_read(&gmap->mm->mmap_sem); |
344 | spin_lock(&gmap->mm->page_table_lock); | 344 | spin_lock(&gmap->mm->page_table_lock); |
345 | for (off = 0; off < len; off += PMD_SIZE) { | 345 | for (off = 0; off < len; off += PMD_SIZE) { |
346 | /* Walk the gmap address space page table */ | 346 | /* Walk the gmap address space page table */ |
347 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 347 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
348 | if ((*table & _REGION_ENTRY_INV) && | 348 | if ((*table & _REGION_ENTRY_INV) && |
349 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) | 349 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) |
350 | goto out_unmap; | 350 | goto out_unmap; |
351 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 351 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
352 | table = table + (((to + off) >> 42) & 0x7ff); | 352 | table = table + (((to + off) >> 42) & 0x7ff); |
353 | if ((*table & _REGION_ENTRY_INV) && | 353 | if ((*table & _REGION_ENTRY_INV) && |
354 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) | 354 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) |
355 | goto out_unmap; | 355 | goto out_unmap; |
356 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 356 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
357 | table = table + (((to + off) >> 31) & 0x7ff); | 357 | table = table + (((to + off) >> 31) & 0x7ff); |
358 | if ((*table & _REGION_ENTRY_INV) && | 358 | if ((*table & _REGION_ENTRY_INV) && |
359 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) | 359 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) |
360 | goto out_unmap; | 360 | goto out_unmap; |
361 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); | 361 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); |
362 | table = table + (((to + off) >> 20) & 0x7ff); | 362 | table = table + (((to + off) >> 20) & 0x7ff); |
363 | 363 | ||
364 | /* Store 'from' address in an invalid segment table entry. */ | 364 | /* Store 'from' address in an invalid segment table entry. */ |
365 | flush |= gmap_unlink_segment(gmap, table); | 365 | flush |= gmap_unlink_segment(gmap, table); |
366 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); | 366 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); |
367 | } | 367 | } |
368 | spin_unlock(&gmap->mm->page_table_lock); | 368 | spin_unlock(&gmap->mm->page_table_lock); |
369 | up_read(&gmap->mm->mmap_sem); | 369 | up_read(&gmap->mm->mmap_sem); |
370 | if (flush) | 370 | if (flush) |
371 | gmap_flush_tlb(gmap); | 371 | gmap_flush_tlb(gmap); |
372 | return 0; | 372 | return 0; |
373 | 373 | ||
374 | out_unmap: | 374 | out_unmap: |
375 | spin_unlock(&gmap->mm->page_table_lock); | 375 | spin_unlock(&gmap->mm->page_table_lock); |
376 | up_read(&gmap->mm->mmap_sem); | 376 | up_read(&gmap->mm->mmap_sem); |
377 | gmap_unmap_segment(gmap, to, len); | 377 | gmap_unmap_segment(gmap, to, len); |
378 | return -ENOMEM; | 378 | return -ENOMEM; |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(gmap_map_segment); | 380 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * this function is assumed to be called with mmap_sem held | 383 | * this function is assumed to be called with mmap_sem held |
384 | */ | 384 | */ |
385 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | 385 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) |
386 | { | 386 | { |
387 | unsigned long *table, vmaddr, segment; | 387 | unsigned long *table, vmaddr, segment; |
388 | struct mm_struct *mm; | 388 | struct mm_struct *mm; |
389 | struct gmap_pgtable *mp; | 389 | struct gmap_pgtable *mp; |
390 | struct gmap_rmap *rmap; | 390 | struct gmap_rmap *rmap; |
391 | struct vm_area_struct *vma; | 391 | struct vm_area_struct *vma; |
392 | struct page *page; | 392 | struct page *page; |
393 | pgd_t *pgd; | 393 | pgd_t *pgd; |
394 | pud_t *pud; | 394 | pud_t *pud; |
395 | pmd_t *pmd; | 395 | pmd_t *pmd; |
396 | 396 | ||
397 | current->thread.gmap_addr = address; | 397 | current->thread.gmap_addr = address; |
398 | mm = gmap->mm; | 398 | mm = gmap->mm; |
399 | /* Walk the gmap address space page table */ | 399 | /* Walk the gmap address space page table */ |
400 | table = gmap->table + ((address >> 53) & 0x7ff); | 400 | table = gmap->table + ((address >> 53) & 0x7ff); |
401 | if (unlikely(*table & _REGION_ENTRY_INV)) | 401 | if (unlikely(*table & _REGION_ENTRY_INV)) |
402 | return -EFAULT; | 402 | return -EFAULT; |
403 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 403 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
404 | table = table + ((address >> 42) & 0x7ff); | 404 | table = table + ((address >> 42) & 0x7ff); |
405 | if (unlikely(*table & _REGION_ENTRY_INV)) | 405 | if (unlikely(*table & _REGION_ENTRY_INV)) |
406 | return -EFAULT; | 406 | return -EFAULT; |
407 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 407 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
408 | table = table + ((address >> 31) & 0x7ff); | 408 | table = table + ((address >> 31) & 0x7ff); |
409 | if (unlikely(*table & _REGION_ENTRY_INV)) | 409 | if (unlikely(*table & _REGION_ENTRY_INV)) |
410 | return -EFAULT; | 410 | return -EFAULT; |
411 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 411 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
412 | table = table + ((address >> 20) & 0x7ff); | 412 | table = table + ((address >> 20) & 0x7ff); |
413 | 413 | ||
414 | /* Convert the gmap address to an mm address. */ | 414 | /* Convert the gmap address to an mm address. */ |
415 | segment = *table; | 415 | segment = *table; |
416 | if (likely(!(segment & _SEGMENT_ENTRY_INV))) { | 416 | if (likely(!(segment & _SEGMENT_ENTRY_INV))) { |
417 | page = pfn_to_page(segment >> PAGE_SHIFT); | 417 | page = pfn_to_page(segment >> PAGE_SHIFT); |
418 | mp = (struct gmap_pgtable *) page->index; | 418 | mp = (struct gmap_pgtable *) page->index; |
419 | return mp->vmaddr | (address & ~PMD_MASK); | 419 | return mp->vmaddr | (address & ~PMD_MASK); |
420 | } else if (segment & _SEGMENT_ENTRY_RO) { | 420 | } else if (segment & _SEGMENT_ENTRY_RO) { |
421 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | 421 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; |
422 | vma = find_vma(mm, vmaddr); | 422 | vma = find_vma(mm, vmaddr); |
423 | if (!vma || vma->vm_start > vmaddr) | 423 | if (!vma || vma->vm_start > vmaddr) |
424 | return -EFAULT; | 424 | return -EFAULT; |
425 | 425 | ||
426 | /* Walk the parent mm page table */ | 426 | /* Walk the parent mm page table */ |
427 | pgd = pgd_offset(mm, vmaddr); | 427 | pgd = pgd_offset(mm, vmaddr); |
428 | pud = pud_alloc(mm, pgd, vmaddr); | 428 | pud = pud_alloc(mm, pgd, vmaddr); |
429 | if (!pud) | 429 | if (!pud) |
430 | return -ENOMEM; | 430 | return -ENOMEM; |
431 | pmd = pmd_alloc(mm, pud, vmaddr); | 431 | pmd = pmd_alloc(mm, pud, vmaddr); |
432 | if (!pmd) | 432 | if (!pmd) |
433 | return -ENOMEM; | 433 | return -ENOMEM; |
434 | if (!pmd_present(*pmd) && | 434 | if (!pmd_present(*pmd) && |
435 | __pte_alloc(mm, vma, pmd, vmaddr)) | 435 | __pte_alloc(mm, vma, pmd, vmaddr)) |
436 | return -ENOMEM; | 436 | return -ENOMEM; |
437 | /* pmd now points to a valid segment table entry. */ | 437 | /* pmd now points to a valid segment table entry. */ |
438 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | 438 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); |
439 | if (!rmap) | 439 | if (!rmap) |
440 | return -ENOMEM; | 440 | return -ENOMEM; |
441 | /* Link gmap segment table entry location to page table. */ | 441 | /* Link gmap segment table entry location to page table. */ |
442 | page = pmd_page(*pmd); | 442 | page = pmd_page(*pmd); |
443 | mp = (struct gmap_pgtable *) page->index; | 443 | mp = (struct gmap_pgtable *) page->index; |
444 | rmap->entry = table; | 444 | rmap->entry = table; |
445 | spin_lock(&mm->page_table_lock); | 445 | spin_lock(&mm->page_table_lock); |
446 | list_add(&rmap->list, &mp->mapper); | 446 | list_add(&rmap->list, &mp->mapper); |
447 | spin_unlock(&mm->page_table_lock); | 447 | spin_unlock(&mm->page_table_lock); |
448 | /* Set gmap segment table entry to page table. */ | 448 | /* Set gmap segment table entry to page table. */ |
449 | *table = pmd_val(*pmd) & PAGE_MASK; | 449 | *table = pmd_val(*pmd) & PAGE_MASK; |
450 | return vmaddr | (address & ~PMD_MASK); | 450 | return vmaddr | (address & ~PMD_MASK); |
451 | } | 451 | } |
452 | return -EFAULT; | 452 | return -EFAULT; |
453 | } | 453 | } |
454 | 454 | ||
455 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | 455 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) |
456 | { | 456 | { |
457 | unsigned long rc; | 457 | unsigned long rc; |
458 | 458 | ||
459 | down_read(&gmap->mm->mmap_sem); | 459 | down_read(&gmap->mm->mmap_sem); |
460 | rc = __gmap_fault(address, gmap); | 460 | rc = __gmap_fault(address, gmap); |
461 | up_read(&gmap->mm->mmap_sem); | 461 | up_read(&gmap->mm->mmap_sem); |
462 | 462 | ||
463 | return rc; | 463 | return rc; |
464 | } | 464 | } |
465 | EXPORT_SYMBOL_GPL(gmap_fault); | 465 | EXPORT_SYMBOL_GPL(gmap_fault); |
466 | 466 | ||
467 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | 467 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) |
468 | { | 468 | { |
469 | 469 | ||
470 | unsigned long *table, address, size; | 470 | unsigned long *table, address, size; |
471 | struct vm_area_struct *vma; | 471 | struct vm_area_struct *vma; |
472 | struct gmap_pgtable *mp; | 472 | struct gmap_pgtable *mp; |
473 | struct page *page; | 473 | struct page *page; |
474 | 474 | ||
475 | down_read(&gmap->mm->mmap_sem); | 475 | down_read(&gmap->mm->mmap_sem); |
476 | address = from; | 476 | address = from; |
477 | while (address < to) { | 477 | while (address < to) { |
478 | /* Walk the gmap address space page table */ | 478 | /* Walk the gmap address space page table */ |
479 | table = gmap->table + ((address >> 53) & 0x7ff); | 479 | table = gmap->table + ((address >> 53) & 0x7ff); |
480 | if (unlikely(*table & _REGION_ENTRY_INV)) { | 480 | if (unlikely(*table & _REGION_ENTRY_INV)) { |
481 | address = (address + PMD_SIZE) & PMD_MASK; | 481 | address = (address + PMD_SIZE) & PMD_MASK; |
482 | continue; | 482 | continue; |
483 | } | 483 | } |
484 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 484 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
485 | table = table + ((address >> 42) & 0x7ff); | 485 | table = table + ((address >> 42) & 0x7ff); |
486 | if (unlikely(*table & _REGION_ENTRY_INV)) { | 486 | if (unlikely(*table & _REGION_ENTRY_INV)) { |
487 | address = (address + PMD_SIZE) & PMD_MASK; | 487 | address = (address + PMD_SIZE) & PMD_MASK; |
488 | continue; | 488 | continue; |
489 | } | 489 | } |
490 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 490 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
491 | table = table + ((address >> 31) & 0x7ff); | 491 | table = table + ((address >> 31) & 0x7ff); |
492 | if (unlikely(*table & _REGION_ENTRY_INV)) { | 492 | if (unlikely(*table & _REGION_ENTRY_INV)) { |
493 | address = (address + PMD_SIZE) & PMD_MASK; | 493 | address = (address + PMD_SIZE) & PMD_MASK; |
494 | continue; | 494 | continue; |
495 | } | 495 | } |
496 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 496 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
497 | table = table + ((address >> 20) & 0x7ff); | 497 | table = table + ((address >> 20) & 0x7ff); |
498 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) { | 498 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) { |
499 | address = (address + PMD_SIZE) & PMD_MASK; | 499 | address = (address + PMD_SIZE) & PMD_MASK; |
500 | continue; | 500 | continue; |
501 | } | 501 | } |
502 | page = pfn_to_page(*table >> PAGE_SHIFT); | 502 | page = pfn_to_page(*table >> PAGE_SHIFT); |
503 | mp = (struct gmap_pgtable *) page->index; | 503 | mp = (struct gmap_pgtable *) page->index; |
504 | vma = find_vma(gmap->mm, mp->vmaddr); | 504 | vma = find_vma(gmap->mm, mp->vmaddr); |
505 | size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); | 505 | size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); |
506 | zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), | 506 | zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), |
507 | size, NULL); | 507 | size, NULL); |
508 | address = (address + PMD_SIZE) & PMD_MASK; | 508 | address = (address + PMD_SIZE) & PMD_MASK; |
509 | } | 509 | } |
510 | up_read(&gmap->mm->mmap_sem); | 510 | up_read(&gmap->mm->mmap_sem); |
511 | } | 511 | } |
512 | EXPORT_SYMBOL_GPL(gmap_discard); | 512 | EXPORT_SYMBOL_GPL(gmap_discard); |
513 | 513 | ||
514 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) | 514 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) |
515 | { | 515 | { |
516 | struct gmap_rmap *rmap, *next; | 516 | struct gmap_rmap *rmap, *next; |
517 | struct gmap_pgtable *mp; | 517 | struct gmap_pgtable *mp; |
518 | struct page *page; | 518 | struct page *page; |
519 | int flush; | 519 | int flush; |
520 | 520 | ||
521 | flush = 0; | 521 | flush = 0; |
522 | spin_lock(&mm->page_table_lock); | 522 | spin_lock(&mm->page_table_lock); |
523 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 523 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
524 | mp = (struct gmap_pgtable *) page->index; | 524 | mp = (struct gmap_pgtable *) page->index; |
525 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | 525 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { |
526 | *rmap->entry = | 526 | *rmap->entry = |
527 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | 527 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; |
528 | list_del(&rmap->list); | 528 | list_del(&rmap->list); |
529 | kfree(rmap); | 529 | kfree(rmap); |
530 | flush = 1; | 530 | flush = 1; |
531 | } | 531 | } |
532 | spin_unlock(&mm->page_table_lock); | 532 | spin_unlock(&mm->page_table_lock); |
533 | if (flush) | 533 | if (flush) |
534 | __tlb_flush_global(); | 534 | __tlb_flush_global(); |
535 | } | 535 | } |
536 | 536 | ||
537 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | 537 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
538 | unsigned long vmaddr) | 538 | unsigned long vmaddr) |
539 | { | 539 | { |
540 | struct page *page; | 540 | struct page *page; |
541 | unsigned long *table; | 541 | unsigned long *table; |
542 | struct gmap_pgtable *mp; | 542 | struct gmap_pgtable *mp; |
543 | 543 | ||
544 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 544 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
545 | if (!page) | 545 | if (!page) |
546 | return NULL; | 546 | return NULL; |
547 | mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); | 547 | mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); |
548 | if (!mp) { | 548 | if (!mp) { |
549 | __free_page(page); | 549 | __free_page(page); |
550 | return NULL; | 550 | return NULL; |
551 | } | 551 | } |
552 | pgtable_page_ctor(page); | 552 | pgtable_page_ctor(page); |
553 | mp->vmaddr = vmaddr & PMD_MASK; | 553 | mp->vmaddr = vmaddr & PMD_MASK; |
554 | INIT_LIST_HEAD(&mp->mapper); | 554 | INIT_LIST_HEAD(&mp->mapper); |
555 | page->index = (unsigned long) mp; | 555 | page->index = (unsigned long) mp; |
556 | atomic_set(&page->_mapcount, 3); | 556 | atomic_set(&page->_mapcount, 3); |
557 | table = (unsigned long *) page_to_phys(page); | 557 | table = (unsigned long *) page_to_phys(page); |
558 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); | 558 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); |
559 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); | 559 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); |
560 | return table; | 560 | return table; |
561 | } | 561 | } |
562 | 562 | ||
563 | static inline void page_table_free_pgste(unsigned long *table) | 563 | static inline void page_table_free_pgste(unsigned long *table) |
564 | { | 564 | { |
565 | struct page *page; | 565 | struct page *page; |
566 | struct gmap_pgtable *mp; | 566 | struct gmap_pgtable *mp; |
567 | 567 | ||
568 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 568 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
569 | mp = (struct gmap_pgtable *) page->index; | 569 | mp = (struct gmap_pgtable *) page->index; |
570 | BUG_ON(!list_empty(&mp->mapper)); | 570 | BUG_ON(!list_empty(&mp->mapper)); |
571 | pgtable_page_dtor(page); | 571 | pgtable_page_dtor(page); |
572 | atomic_set(&page->_mapcount, -1); | 572 | atomic_set(&page->_mapcount, -1); |
573 | kfree(mp); | 573 | kfree(mp); |
574 | __free_page(page); | 574 | __free_page(page); |
575 | } | 575 | } |
576 | 576 | ||
577 | #else /* CONFIG_PGSTE */ | 577 | #else /* CONFIG_PGSTE */ |
578 | 578 | ||
579 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | 579 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
580 | unsigned long vmaddr) | 580 | unsigned long vmaddr) |
581 | { | 581 | { |
582 | return NULL; | 582 | return NULL; |
583 | } | 583 | } |
584 | 584 | ||
585 | static inline void page_table_free_pgste(unsigned long *table) | 585 | static inline void page_table_free_pgste(unsigned long *table) |
586 | { | 586 | { |
587 | } | 587 | } |
588 | 588 | ||
589 | static inline void gmap_unmap_notifier(struct mm_struct *mm, | 589 | static inline void gmap_unmap_notifier(struct mm_struct *mm, |
590 | unsigned long *table) | 590 | unsigned long *table) |
591 | { | 591 | { |
592 | } | 592 | } |
593 | 593 | ||
594 | #endif /* CONFIG_PGSTE */ | 594 | #endif /* CONFIG_PGSTE */ |
595 | 595 | ||
596 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) | 596 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) |
597 | { | 597 | { |
598 | unsigned int old, new; | 598 | unsigned int old, new; |
599 | 599 | ||
600 | do { | 600 | do { |
601 | old = atomic_read(v); | 601 | old = atomic_read(v); |
602 | new = old ^ bits; | 602 | new = old ^ bits; |
603 | } while (atomic_cmpxchg(v, old, new) != old); | 603 | } while (atomic_cmpxchg(v, old, new) != old); |
604 | return new; | 604 | return new; |
605 | } | 605 | } |
606 | 606 | ||
607 | /* | 607 | /* |
608 | * page table entry allocation/free routines. | 608 | * page table entry allocation/free routines. |
609 | */ | 609 | */ |
610 | unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) | 610 | unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) |
611 | { | 611 | { |
612 | struct page *page; | 612 | unsigned long *uninitialized_var(table); |
613 | unsigned long *table; | 613 | struct page *uninitialized_var(page); |
614 | unsigned int mask, bit; | 614 | unsigned int mask, bit; |
615 | 615 | ||
616 | if (mm_has_pgste(mm)) | 616 | if (mm_has_pgste(mm)) |
617 | return page_table_alloc_pgste(mm, vmaddr); | 617 | return page_table_alloc_pgste(mm, vmaddr); |
618 | /* Allocate fragments of a 4K page as 1K/2K page table */ | 618 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
619 | spin_lock_bh(&mm->context.list_lock); | 619 | spin_lock_bh(&mm->context.list_lock); |
620 | mask = FRAG_MASK; | 620 | mask = FRAG_MASK; |
621 | if (!list_empty(&mm->context.pgtable_list)) { | 621 | if (!list_empty(&mm->context.pgtable_list)) { |
622 | page = list_first_entry(&mm->context.pgtable_list, | 622 | page = list_first_entry(&mm->context.pgtable_list, |
623 | struct page, lru); | 623 | struct page, lru); |
624 | table = (unsigned long *) page_to_phys(page); | 624 | table = (unsigned long *) page_to_phys(page); |
625 | mask = atomic_read(&page->_mapcount); | 625 | mask = atomic_read(&page->_mapcount); |
626 | mask = mask | (mask >> 4); | 626 | mask = mask | (mask >> 4); |
627 | } | 627 | } |
628 | if ((mask & FRAG_MASK) == FRAG_MASK) { | 628 | if ((mask & FRAG_MASK) == FRAG_MASK) { |
629 | spin_unlock_bh(&mm->context.list_lock); | 629 | spin_unlock_bh(&mm->context.list_lock); |
630 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 630 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
631 | if (!page) | 631 | if (!page) |
632 | return NULL; | 632 | return NULL; |
633 | pgtable_page_ctor(page); | 633 | pgtable_page_ctor(page); |
634 | atomic_set(&page->_mapcount, 1); | 634 | atomic_set(&page->_mapcount, 1); |
635 | table = (unsigned long *) page_to_phys(page); | 635 | table = (unsigned long *) page_to_phys(page); |
636 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 636 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
637 | spin_lock_bh(&mm->context.list_lock); | 637 | spin_lock_bh(&mm->context.list_lock); |
638 | list_add(&page->lru, &mm->context.pgtable_list); | 638 | list_add(&page->lru, &mm->context.pgtable_list); |
639 | } else { | 639 | } else { |
640 | for (bit = 1; mask & bit; bit <<= 1) | 640 | for (bit = 1; mask & bit; bit <<= 1) |
641 | table += PTRS_PER_PTE; | 641 | table += PTRS_PER_PTE; |
642 | mask = atomic_xor_bits(&page->_mapcount, bit); | 642 | mask = atomic_xor_bits(&page->_mapcount, bit); |
643 | if ((mask & FRAG_MASK) == FRAG_MASK) | 643 | if ((mask & FRAG_MASK) == FRAG_MASK) |
644 | list_del(&page->lru); | 644 | list_del(&page->lru); |
645 | } | 645 | } |
646 | spin_unlock_bh(&mm->context.list_lock); | 646 | spin_unlock_bh(&mm->context.list_lock); |
647 | return table; | 647 | return table; |
648 | } | 648 | } |
649 | 649 | ||
650 | void page_table_free(struct mm_struct *mm, unsigned long *table) | 650 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
651 | { | 651 | { |
652 | struct page *page; | 652 | struct page *page; |
653 | unsigned int bit, mask; | 653 | unsigned int bit, mask; |
654 | 654 | ||
655 | if (mm_has_pgste(mm)) { | 655 | if (mm_has_pgste(mm)) { |
656 | gmap_unmap_notifier(mm, table); | 656 | gmap_unmap_notifier(mm, table); |
657 | return page_table_free_pgste(table); | 657 | return page_table_free_pgste(table); |
658 | } | 658 | } |
659 | /* Free 1K/2K page table fragment of a 4K page */ | 659 | /* Free 1K/2K page table fragment of a 4K page */ |
660 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 660 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
661 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); | 661 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); |
662 | spin_lock_bh(&mm->context.list_lock); | 662 | spin_lock_bh(&mm->context.list_lock); |
663 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) | 663 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
664 | list_del(&page->lru); | 664 | list_del(&page->lru); |
665 | mask = atomic_xor_bits(&page->_mapcount, bit); | 665 | mask = atomic_xor_bits(&page->_mapcount, bit); |
666 | if (mask & FRAG_MASK) | 666 | if (mask & FRAG_MASK) |
667 | list_add(&page->lru, &mm->context.pgtable_list); | 667 | list_add(&page->lru, &mm->context.pgtable_list); |
668 | spin_unlock_bh(&mm->context.list_lock); | 668 | spin_unlock_bh(&mm->context.list_lock); |
669 | if (mask == 0) { | 669 | if (mask == 0) { |
670 | pgtable_page_dtor(page); | 670 | pgtable_page_dtor(page); |
671 | atomic_set(&page->_mapcount, -1); | 671 | atomic_set(&page->_mapcount, -1); |
672 | __free_page(page); | 672 | __free_page(page); |
673 | } | 673 | } |
674 | } | 674 | } |
675 | 675 | ||
676 | static void __page_table_free_rcu(void *table, unsigned bit) | 676 | static void __page_table_free_rcu(void *table, unsigned bit) |
677 | { | 677 | { |
678 | struct page *page; | 678 | struct page *page; |
679 | 679 | ||
680 | if (bit == FRAG_MASK) | 680 | if (bit == FRAG_MASK) |
681 | return page_table_free_pgste(table); | 681 | return page_table_free_pgste(table); |
682 | /* Free 1K/2K page table fragment of a 4K page */ | 682 | /* Free 1K/2K page table fragment of a 4K page */ |
683 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 683 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
684 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { | 684 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { |
685 | pgtable_page_dtor(page); | 685 | pgtable_page_dtor(page); |
686 | atomic_set(&page->_mapcount, -1); | 686 | atomic_set(&page->_mapcount, -1); |
687 | __free_page(page); | 687 | __free_page(page); |
688 | } | 688 | } |
689 | } | 689 | } |
690 | 690 | ||
691 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) | 691 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) |
692 | { | 692 | { |
693 | struct mm_struct *mm; | 693 | struct mm_struct *mm; |
694 | struct page *page; | 694 | struct page *page; |
695 | unsigned int bit, mask; | 695 | unsigned int bit, mask; |
696 | 696 | ||
697 | mm = tlb->mm; | 697 | mm = tlb->mm; |
698 | if (mm_has_pgste(mm)) { | 698 | if (mm_has_pgste(mm)) { |
699 | gmap_unmap_notifier(mm, table); | 699 | gmap_unmap_notifier(mm, table); |
700 | table = (unsigned long *) (__pa(table) | FRAG_MASK); | 700 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
701 | tlb_remove_table(tlb, table); | 701 | tlb_remove_table(tlb, table); |
702 | return; | 702 | return; |
703 | } | 703 | } |
704 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); | 704 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); |
705 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 705 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
706 | spin_lock_bh(&mm->context.list_lock); | 706 | spin_lock_bh(&mm->context.list_lock); |
707 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) | 707 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
708 | list_del(&page->lru); | 708 | list_del(&page->lru); |
709 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); | 709 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); |
710 | if (mask & FRAG_MASK) | 710 | if (mask & FRAG_MASK) |
711 | list_add_tail(&page->lru, &mm->context.pgtable_list); | 711 | list_add_tail(&page->lru, &mm->context.pgtable_list); |
712 | spin_unlock_bh(&mm->context.list_lock); | 712 | spin_unlock_bh(&mm->context.list_lock); |
713 | table = (unsigned long *) (__pa(table) | (bit << 4)); | 713 | table = (unsigned long *) (__pa(table) | (bit << 4)); |
714 | tlb_remove_table(tlb, table); | 714 | tlb_remove_table(tlb, table); |
715 | } | 715 | } |
716 | 716 | ||
717 | void __tlb_remove_table(void *_table) | 717 | void __tlb_remove_table(void *_table) |
718 | { | 718 | { |
719 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; | 719 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; |
720 | void *table = (void *)((unsigned long) _table & ~mask); | 720 | void *table = (void *)((unsigned long) _table & ~mask); |
721 | unsigned type = (unsigned long) _table & mask; | 721 | unsigned type = (unsigned long) _table & mask; |
722 | 722 | ||
723 | if (type) | 723 | if (type) |
724 | __page_table_free_rcu(table, type); | 724 | __page_table_free_rcu(table, type); |
725 | else | 725 | else |
726 | free_pages((unsigned long) table, ALLOC_ORDER); | 726 | free_pages((unsigned long) table, ALLOC_ORDER); |
727 | } | 727 | } |
728 | 728 | ||
729 | static void tlb_remove_table_smp_sync(void *arg) | 729 | static void tlb_remove_table_smp_sync(void *arg) |
730 | { | 730 | { |
731 | /* Simply deliver the interrupt */ | 731 | /* Simply deliver the interrupt */ |
732 | } | 732 | } |
733 | 733 | ||
734 | static void tlb_remove_table_one(void *table) | 734 | static void tlb_remove_table_one(void *table) |
735 | { | 735 | { |
736 | /* | 736 | /* |
737 | * This isn't an RCU grace period and hence the page-tables cannot be | 737 | * This isn't an RCU grace period and hence the page-tables cannot be |
738 | * assumed to be actually RCU-freed. | 738 | * assumed to be actually RCU-freed. |
739 | * | 739 | * |
740 | * It is however sufficient for software page-table walkers that rely | 740 | * It is however sufficient for software page-table walkers that rely |
741 | * on IRQ disabling. See the comment near struct mmu_table_batch. | 741 | * on IRQ disabling. See the comment near struct mmu_table_batch. |
742 | */ | 742 | */ |
743 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | 743 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); |
744 | __tlb_remove_table(table); | 744 | __tlb_remove_table(table); |
745 | } | 745 | } |
746 | 746 | ||
747 | static void tlb_remove_table_rcu(struct rcu_head *head) | 747 | static void tlb_remove_table_rcu(struct rcu_head *head) |
748 | { | 748 | { |
749 | struct mmu_table_batch *batch; | 749 | struct mmu_table_batch *batch; |
750 | int i; | 750 | int i; |
751 | 751 | ||
752 | batch = container_of(head, struct mmu_table_batch, rcu); | 752 | batch = container_of(head, struct mmu_table_batch, rcu); |
753 | 753 | ||
754 | for (i = 0; i < batch->nr; i++) | 754 | for (i = 0; i < batch->nr; i++) |
755 | __tlb_remove_table(batch->tables[i]); | 755 | __tlb_remove_table(batch->tables[i]); |
756 | 756 | ||
757 | free_page((unsigned long)batch); | 757 | free_page((unsigned long)batch); |
758 | } | 758 | } |
759 | 759 | ||
760 | void tlb_table_flush(struct mmu_gather *tlb) | 760 | void tlb_table_flush(struct mmu_gather *tlb) |
761 | { | 761 | { |
762 | struct mmu_table_batch **batch = &tlb->batch; | 762 | struct mmu_table_batch **batch = &tlb->batch; |
763 | 763 | ||
764 | if (*batch) { | 764 | if (*batch) { |
765 | __tlb_flush_mm(tlb->mm); | 765 | __tlb_flush_mm(tlb->mm); |
766 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | 766 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); |
767 | *batch = NULL; | 767 | *batch = NULL; |
768 | } | 768 | } |
769 | } | 769 | } |
770 | 770 | ||
771 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | 771 | void tlb_remove_table(struct mmu_gather *tlb, void *table) |
772 | { | 772 | { |
773 | struct mmu_table_batch **batch = &tlb->batch; | 773 | struct mmu_table_batch **batch = &tlb->batch; |
774 | 774 | ||
775 | if (*batch == NULL) { | 775 | if (*batch == NULL) { |
776 | *batch = (struct mmu_table_batch *) | 776 | *batch = (struct mmu_table_batch *) |
777 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | 777 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
778 | if (*batch == NULL) { | 778 | if (*batch == NULL) { |
779 | __tlb_flush_mm(tlb->mm); | 779 | __tlb_flush_mm(tlb->mm); |
780 | tlb_remove_table_one(table); | 780 | tlb_remove_table_one(table); |
781 | return; | 781 | return; |
782 | } | 782 | } |
783 | (*batch)->nr = 0; | 783 | (*batch)->nr = 0; |
784 | } | 784 | } |
785 | (*batch)->tables[(*batch)->nr++] = table; | 785 | (*batch)->tables[(*batch)->nr++] = table; |
786 | if ((*batch)->nr == MAX_TABLE_BATCH) | 786 | if ((*batch)->nr == MAX_TABLE_BATCH) |
787 | tlb_table_flush(tlb); | 787 | tlb_table_flush(tlb); |
788 | } | 788 | } |
789 | 789 | ||
790 | /* | 790 | /* |
791 | * switch on pgstes for its userspace process (for kvm) | 791 | * switch on pgstes for its userspace process (for kvm) |
792 | */ | 792 | */ |
793 | int s390_enable_sie(void) | 793 | int s390_enable_sie(void) |
794 | { | 794 | { |
795 | struct task_struct *tsk = current; | 795 | struct task_struct *tsk = current; |
796 | struct mm_struct *mm, *old_mm; | 796 | struct mm_struct *mm, *old_mm; |
797 | 797 | ||
798 | /* Do we have switched amode? If no, we cannot do sie */ | 798 | /* Do we have switched amode? If no, we cannot do sie */ |
799 | if (s390_user_mode == HOME_SPACE_MODE) | 799 | if (s390_user_mode == HOME_SPACE_MODE) |
800 | return -EINVAL; | 800 | return -EINVAL; |
801 | 801 | ||
802 | /* Do we have pgstes? if yes, we are done */ | 802 | /* Do we have pgstes? if yes, we are done */ |
803 | if (mm_has_pgste(tsk->mm)) | 803 | if (mm_has_pgste(tsk->mm)) |
804 | return 0; | 804 | return 0; |
805 | 805 | ||
806 | /* lets check if we are allowed to replace the mm */ | 806 | /* lets check if we are allowed to replace the mm */ |
807 | task_lock(tsk); | 807 | task_lock(tsk); |
808 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 808 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
809 | #ifdef CONFIG_AIO | 809 | #ifdef CONFIG_AIO |
810 | !hlist_empty(&tsk->mm->ioctx_list) || | 810 | !hlist_empty(&tsk->mm->ioctx_list) || |
811 | #endif | 811 | #endif |
812 | tsk->mm != tsk->active_mm) { | 812 | tsk->mm != tsk->active_mm) { |
813 | task_unlock(tsk); | 813 | task_unlock(tsk); |
814 | return -EINVAL; | 814 | return -EINVAL; |
815 | } | 815 | } |
816 | task_unlock(tsk); | 816 | task_unlock(tsk); |
817 | 817 | ||
818 | /* we copy the mm and let dup_mm create the page tables with_pgstes */ | 818 | /* we copy the mm and let dup_mm create the page tables with_pgstes */ |
819 | tsk->mm->context.alloc_pgste = 1; | 819 | tsk->mm->context.alloc_pgste = 1; |
820 | /* make sure that both mms have a correct rss state */ | 820 | /* make sure that both mms have a correct rss state */ |
821 | sync_mm_rss(tsk->mm); | 821 | sync_mm_rss(tsk->mm); |
822 | mm = dup_mm(tsk); | 822 | mm = dup_mm(tsk); |
823 | tsk->mm->context.alloc_pgste = 0; | 823 | tsk->mm->context.alloc_pgste = 0; |
824 | if (!mm) | 824 | if (!mm) |
825 | return -ENOMEM; | 825 | return -ENOMEM; |
826 | 826 | ||
827 | /* Now lets check again if something happened */ | 827 | /* Now lets check again if something happened */ |
828 | task_lock(tsk); | 828 | task_lock(tsk); |
829 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 829 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
830 | #ifdef CONFIG_AIO | 830 | #ifdef CONFIG_AIO |
831 | !hlist_empty(&tsk->mm->ioctx_list) || | 831 | !hlist_empty(&tsk->mm->ioctx_list) || |
832 | #endif | 832 | #endif |
833 | tsk->mm != tsk->active_mm) { | 833 | tsk->mm != tsk->active_mm) { |
834 | mmput(mm); | 834 | mmput(mm); |
835 | task_unlock(tsk); | 835 | task_unlock(tsk); |
836 | return -EINVAL; | 836 | return -EINVAL; |
837 | } | 837 | } |
838 | 838 | ||
839 | /* ok, we are alone. No ptrace, no threads, etc. */ | 839 | /* ok, we are alone. No ptrace, no threads, etc. */ |
840 | old_mm = tsk->mm; | 840 | old_mm = tsk->mm; |
841 | tsk->mm = tsk->active_mm = mm; | 841 | tsk->mm = tsk->active_mm = mm; |
842 | preempt_disable(); | 842 | preempt_disable(); |
843 | update_mm(mm, tsk); | 843 | update_mm(mm, tsk); |
844 | atomic_inc(&mm->context.attach_count); | 844 | atomic_inc(&mm->context.attach_count); |
845 | atomic_dec(&old_mm->context.attach_count); | 845 | atomic_dec(&old_mm->context.attach_count); |
846 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 846 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
847 | preempt_enable(); | 847 | preempt_enable(); |
848 | task_unlock(tsk); | 848 | task_unlock(tsk); |
849 | mmput(old_mm); | 849 | mmput(old_mm); |
850 | return 0; | 850 | return 0; |
851 | } | 851 | } |
852 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 852 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
853 | 853 | ||
854 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) | 854 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) |
855 | bool kernel_page_present(struct page *page) | 855 | bool kernel_page_present(struct page *page) |
856 | { | 856 | { |
857 | unsigned long addr; | 857 | unsigned long addr; |
858 | int cc; | 858 | int cc; |
859 | 859 | ||
860 | addr = page_to_phys(page); | 860 | addr = page_to_phys(page); |
861 | asm volatile( | 861 | asm volatile( |
862 | " lra %1,0(%1)\n" | 862 | " lra %1,0(%1)\n" |
863 | " ipm %0\n" | 863 | " ipm %0\n" |
864 | " srl %0,28" | 864 | " srl %0,28" |
865 | : "=d" (cc), "+a" (addr) : : "cc"); | 865 | : "=d" (cc), "+a" (addr) : : "cc"); |
866 | return cc == 0; | 866 | return cc == 0; |
867 | } | 867 | } |
868 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ | 868 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |
869 | 869 |
drivers/s390/cio/cio.c
1 | /* | 1 | /* |
2 | * S/390 common I/O routines -- low level i/o calls | 2 | * S/390 common I/O routines -- low level i/o calls |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999, 2008 | 4 | * Copyright IBM Corp. 1999, 2008 |
5 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 5 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
6 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 6 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
7 | * Arnd Bergmann (arndb@de.ibm.com) | 7 | * Arnd Bergmann (arndb@de.ibm.com) |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "cio" | 11 | #define KMSG_COMPONENT "cio" |
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
13 | 13 | ||
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <asm/cio.h> | 21 | #include <asm/cio.h> |
22 | #include <asm/delay.h> | 22 | #include <asm/delay.h> |
23 | #include <asm/irq.h> | 23 | #include <asm/irq.h> |
24 | #include <asm/irq_regs.h> | 24 | #include <asm/irq_regs.h> |
25 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
26 | #include <asm/reset.h> | 26 | #include <asm/reset.h> |
27 | #include <asm/ipl.h> | 27 | #include <asm/ipl.h> |
28 | #include <asm/chpid.h> | 28 | #include <asm/chpid.h> |
29 | #include <asm/airq.h> | 29 | #include <asm/airq.h> |
30 | #include <asm/isc.h> | 30 | #include <asm/isc.h> |
31 | #include <asm/cputime.h> | 31 | #include <asm/cputime.h> |
32 | #include <asm/fcx.h> | 32 | #include <asm/fcx.h> |
33 | #include <asm/nmi.h> | 33 | #include <asm/nmi.h> |
34 | #include <asm/crw.h> | 34 | #include <asm/crw.h> |
35 | #include "cio.h" | 35 | #include "cio.h" |
36 | #include "css.h" | 36 | #include "css.h" |
37 | #include "chsc.h" | 37 | #include "chsc.h" |
38 | #include "ioasm.h" | 38 | #include "ioasm.h" |
39 | #include "io_sch.h" | 39 | #include "io_sch.h" |
40 | #include "blacklist.h" | 40 | #include "blacklist.h" |
41 | #include "cio_debug.h" | 41 | #include "cio_debug.h" |
42 | #include "chp.h" | 42 | #include "chp.h" |
43 | 43 | ||
44 | debug_info_t *cio_debug_msg_id; | 44 | debug_info_t *cio_debug_msg_id; |
45 | debug_info_t *cio_debug_trace_id; | 45 | debug_info_t *cio_debug_trace_id; |
46 | debug_info_t *cio_debug_crw_id; | 46 | debug_info_t *cio_debug_crw_id; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Function: cio_debug_init | 49 | * Function: cio_debug_init |
50 | * Initializes three debug logs for common I/O: | 50 | * Initializes three debug logs for common I/O: |
51 | * - cio_msg logs generic cio messages | 51 | * - cio_msg logs generic cio messages |
52 | * - cio_trace logs the calling of different functions | 52 | * - cio_trace logs the calling of different functions |
53 | * - cio_crw logs machine check related cio messages | 53 | * - cio_crw logs machine check related cio messages |
54 | */ | 54 | */ |
55 | static int __init cio_debug_init(void) | 55 | static int __init cio_debug_init(void) |
56 | { | 56 | { |
57 | cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); | 57 | cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); |
58 | if (!cio_debug_msg_id) | 58 | if (!cio_debug_msg_id) |
59 | goto out_unregister; | 59 | goto out_unregister; |
60 | debug_register_view(cio_debug_msg_id, &debug_sprintf_view); | 60 | debug_register_view(cio_debug_msg_id, &debug_sprintf_view); |
61 | debug_set_level(cio_debug_msg_id, 2); | 61 | debug_set_level(cio_debug_msg_id, 2); |
62 | cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); | 62 | cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); |
63 | if (!cio_debug_trace_id) | 63 | if (!cio_debug_trace_id) |
64 | goto out_unregister; | 64 | goto out_unregister; |
65 | debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); | 65 | debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); |
66 | debug_set_level(cio_debug_trace_id, 2); | 66 | debug_set_level(cio_debug_trace_id, 2); |
67 | cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); | 67 | cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); |
68 | if (!cio_debug_crw_id) | 68 | if (!cio_debug_crw_id) |
69 | goto out_unregister; | 69 | goto out_unregister; |
70 | debug_register_view(cio_debug_crw_id, &debug_sprintf_view); | 70 | debug_register_view(cio_debug_crw_id, &debug_sprintf_view); |
71 | debug_set_level(cio_debug_crw_id, 4); | 71 | debug_set_level(cio_debug_crw_id, 4); |
72 | return 0; | 72 | return 0; |
73 | 73 | ||
74 | out_unregister: | 74 | out_unregister: |
75 | if (cio_debug_msg_id) | 75 | if (cio_debug_msg_id) |
76 | debug_unregister(cio_debug_msg_id); | 76 | debug_unregister(cio_debug_msg_id); |
77 | if (cio_debug_trace_id) | 77 | if (cio_debug_trace_id) |
78 | debug_unregister(cio_debug_trace_id); | 78 | debug_unregister(cio_debug_trace_id); |
79 | if (cio_debug_crw_id) | 79 | if (cio_debug_crw_id) |
80 | debug_unregister(cio_debug_crw_id); | 80 | debug_unregister(cio_debug_crw_id); |
81 | return -1; | 81 | return -1; |
82 | } | 82 | } |
83 | 83 | ||
84 | arch_initcall (cio_debug_init); | 84 | arch_initcall (cio_debug_init); |
85 | 85 | ||
86 | int cio_set_options(struct subchannel *sch, int flags) | 86 | int cio_set_options(struct subchannel *sch, int flags) |
87 | { | 87 | { |
88 | struct io_subchannel_private *priv = to_io_private(sch); | 88 | struct io_subchannel_private *priv = to_io_private(sch); |
89 | 89 | ||
90 | priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; | 90 | priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; |
91 | priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; | 91 | priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; |
92 | priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; | 92 | priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; |
93 | return 0; | 93 | return 0; |
94 | } | 94 | } |
95 | 95 | ||
96 | static int | 96 | static int |
97 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | 97 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) |
98 | { | 98 | { |
99 | char dbf_text[15]; | 99 | char dbf_text[15]; |
100 | 100 | ||
101 | if (lpm != 0) | 101 | if (lpm != 0) |
102 | sch->lpm &= ~lpm; | 102 | sch->lpm &= ~lpm; |
103 | else | 103 | else |
104 | sch->lpm = 0; | 104 | sch->lpm = 0; |
105 | 105 | ||
106 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " | 106 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " |
107 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, | 107 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, |
108 | sch->schid.sch_no); | 108 | sch->schid.sch_no); |
109 | 109 | ||
110 | if (cio_update_schib(sch)) | 110 | if (cio_update_schib(sch)) |
111 | return -ENODEV; | 111 | return -ENODEV; |
112 | 112 | ||
113 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); | 113 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); |
114 | CIO_TRACE_EVENT(0, dbf_text); | 114 | CIO_TRACE_EVENT(0, dbf_text); |
115 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); | 115 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); |
116 | 116 | ||
117 | return (sch->lpm ? -EACCES : -ENODEV); | 117 | return (sch->lpm ? -EACCES : -ENODEV); |
118 | } | 118 | } |
119 | 119 | ||
120 | int | 120 | int |
121 | cio_start_key (struct subchannel *sch, /* subchannel structure */ | 121 | cio_start_key (struct subchannel *sch, /* subchannel structure */ |
122 | struct ccw1 * cpa, /* logical channel prog addr */ | 122 | struct ccw1 * cpa, /* logical channel prog addr */ |
123 | __u8 lpm, /* logical path mask */ | 123 | __u8 lpm, /* logical path mask */ |
124 | __u8 key) /* storage key */ | 124 | __u8 key) /* storage key */ |
125 | { | 125 | { |
126 | struct io_subchannel_private *priv = to_io_private(sch); | 126 | struct io_subchannel_private *priv = to_io_private(sch); |
127 | union orb *orb = &priv->orb; | 127 | union orb *orb = &priv->orb; |
128 | int ccode; | 128 | int ccode; |
129 | 129 | ||
130 | CIO_TRACE_EVENT(5, "stIO"); | 130 | CIO_TRACE_EVENT(5, "stIO"); |
131 | CIO_TRACE_EVENT(5, dev_name(&sch->dev)); | 131 | CIO_TRACE_EVENT(5, dev_name(&sch->dev)); |
132 | 132 | ||
133 | memset(orb, 0, sizeof(union orb)); | 133 | memset(orb, 0, sizeof(union orb)); |
134 | /* sch is always under 2G. */ | 134 | /* sch is always under 2G. */ |
135 | orb->cmd.intparm = (u32)(addr_t)sch; | 135 | orb->cmd.intparm = (u32)(addr_t)sch; |
136 | orb->cmd.fmt = 1; | 136 | orb->cmd.fmt = 1; |
137 | 137 | ||
138 | orb->cmd.pfch = priv->options.prefetch == 0; | 138 | orb->cmd.pfch = priv->options.prefetch == 0; |
139 | orb->cmd.spnd = priv->options.suspend; | 139 | orb->cmd.spnd = priv->options.suspend; |
140 | orb->cmd.ssic = priv->options.suspend && priv->options.inter; | 140 | orb->cmd.ssic = priv->options.suspend && priv->options.inter; |
141 | orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; | 141 | orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; |
142 | #ifdef CONFIG_64BIT | 142 | #ifdef CONFIG_64BIT |
143 | /* | 143 | /* |
144 | * for 64 bit we always support 64 bit IDAWs with 4k page size only | 144 | * for 64 bit we always support 64 bit IDAWs with 4k page size only |
145 | */ | 145 | */ |
146 | orb->cmd.c64 = 1; | 146 | orb->cmd.c64 = 1; |
147 | orb->cmd.i2k = 0; | 147 | orb->cmd.i2k = 0; |
148 | #endif | 148 | #endif |
149 | orb->cmd.key = key >> 4; | 149 | orb->cmd.key = key >> 4; |
150 | /* issue "Start Subchannel" */ | 150 | /* issue "Start Subchannel" */ |
151 | orb->cmd.cpa = (__u32) __pa(cpa); | 151 | orb->cmd.cpa = (__u32) __pa(cpa); |
152 | ccode = ssch(sch->schid, orb); | 152 | ccode = ssch(sch->schid, orb); |
153 | 153 | ||
154 | /* process condition code */ | 154 | /* process condition code */ |
155 | CIO_HEX_EVENT(5, &ccode, sizeof(ccode)); | 155 | CIO_HEX_EVENT(5, &ccode, sizeof(ccode)); |
156 | 156 | ||
157 | switch (ccode) { | 157 | switch (ccode) { |
158 | case 0: | 158 | case 0: |
159 | /* | 159 | /* |
160 | * initialize device status information | 160 | * initialize device status information |
161 | */ | 161 | */ |
162 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; | 162 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; |
163 | return 0; | 163 | return 0; |
164 | case 1: /* status pending */ | 164 | case 1: /* status pending */ |
165 | case 2: /* busy */ | 165 | case 2: /* busy */ |
166 | return -EBUSY; | 166 | return -EBUSY; |
167 | case 3: /* device/path not operational */ | 167 | case 3: /* device/path not operational */ |
168 | return cio_start_handle_notoper(sch, lpm); | 168 | return cio_start_handle_notoper(sch, lpm); |
169 | default: | 169 | default: |
170 | return ccode; | 170 | return ccode; |
171 | } | 171 | } |
172 | } | 172 | } |
173 | 173 | ||
174 | int | 174 | int |
175 | cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) | 175 | cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) |
176 | { | 176 | { |
177 | return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); | 177 | return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); |
178 | } | 178 | } |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * resume suspended I/O operation | 181 | * resume suspended I/O operation |
182 | */ | 182 | */ |
183 | int | 183 | int |
184 | cio_resume (struct subchannel *sch) | 184 | cio_resume (struct subchannel *sch) |
185 | { | 185 | { |
186 | int ccode; | 186 | int ccode; |
187 | 187 | ||
188 | CIO_TRACE_EVENT(4, "resIO"); | 188 | CIO_TRACE_EVENT(4, "resIO"); |
189 | CIO_TRACE_EVENT(4, dev_name(&sch->dev)); | 189 | CIO_TRACE_EVENT(4, dev_name(&sch->dev)); |
190 | 190 | ||
191 | ccode = rsch (sch->schid); | 191 | ccode = rsch (sch->schid); |
192 | 192 | ||
193 | CIO_HEX_EVENT(4, &ccode, sizeof(ccode)); | 193 | CIO_HEX_EVENT(4, &ccode, sizeof(ccode)); |
194 | 194 | ||
195 | switch (ccode) { | 195 | switch (ccode) { |
196 | case 0: | 196 | case 0: |
197 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; | 197 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; |
198 | return 0; | 198 | return 0; |
199 | case 1: | 199 | case 1: |
200 | return -EBUSY; | 200 | return -EBUSY; |
201 | case 2: | 201 | case 2: |
202 | return -EINVAL; | 202 | return -EINVAL; |
203 | default: | 203 | default: |
204 | /* | 204 | /* |
205 | * useless to wait for request completion | 205 | * useless to wait for request completion |
206 | * as device is no longer operational ! | 206 | * as device is no longer operational ! |
207 | */ | 207 | */ |
208 | return -ENODEV; | 208 | return -ENODEV; |
209 | } | 209 | } |
210 | } | 210 | } |
211 | 211 | ||
212 | /* | 212 | /* |
213 | * halt I/O operation | 213 | * halt I/O operation |
214 | */ | 214 | */ |
215 | int | 215 | int |
216 | cio_halt(struct subchannel *sch) | 216 | cio_halt(struct subchannel *sch) |
217 | { | 217 | { |
218 | int ccode; | 218 | int ccode; |
219 | 219 | ||
220 | if (!sch) | 220 | if (!sch) |
221 | return -ENODEV; | 221 | return -ENODEV; |
222 | 222 | ||
223 | CIO_TRACE_EVENT(2, "haltIO"); | 223 | CIO_TRACE_EVENT(2, "haltIO"); |
224 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 224 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * Issue "Halt subchannel" and process condition code | 227 | * Issue "Halt subchannel" and process condition code |
228 | */ | 228 | */ |
229 | ccode = hsch (sch->schid); | 229 | ccode = hsch (sch->schid); |
230 | 230 | ||
231 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); | 231 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); |
232 | 232 | ||
233 | switch (ccode) { | 233 | switch (ccode) { |
234 | case 0: | 234 | case 0: |
235 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; | 235 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; |
236 | return 0; | 236 | return 0; |
237 | case 1: /* status pending */ | 237 | case 1: /* status pending */ |
238 | case 2: /* busy */ | 238 | case 2: /* busy */ |
239 | return -EBUSY; | 239 | return -EBUSY; |
240 | default: /* device not operational */ | 240 | default: /* device not operational */ |
241 | return -ENODEV; | 241 | return -ENODEV; |
242 | } | 242 | } |
243 | } | 243 | } |
244 | 244 | ||
245 | /* | 245 | /* |
246 | * Clear I/O operation | 246 | * Clear I/O operation |
247 | */ | 247 | */ |
248 | int | 248 | int |
249 | cio_clear(struct subchannel *sch) | 249 | cio_clear(struct subchannel *sch) |
250 | { | 250 | { |
251 | int ccode; | 251 | int ccode; |
252 | 252 | ||
253 | if (!sch) | 253 | if (!sch) |
254 | return -ENODEV; | 254 | return -ENODEV; |
255 | 255 | ||
256 | CIO_TRACE_EVENT(2, "clearIO"); | 256 | CIO_TRACE_EVENT(2, "clearIO"); |
257 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 257 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Issue "Clear subchannel" and process condition code | 260 | * Issue "Clear subchannel" and process condition code |
261 | */ | 261 | */ |
262 | ccode = csch (sch->schid); | 262 | ccode = csch (sch->schid); |
263 | 263 | ||
264 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); | 264 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); |
265 | 265 | ||
266 | switch (ccode) { | 266 | switch (ccode) { |
267 | case 0: | 267 | case 0: |
268 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; | 268 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; |
269 | return 0; | 269 | return 0; |
270 | default: /* device not operational */ | 270 | default: /* device not operational */ |
271 | return -ENODEV; | 271 | return -ENODEV; |
272 | } | 272 | } |
273 | } | 273 | } |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * Function: cio_cancel | 276 | * Function: cio_cancel |
277 | * Issues a "Cancel Subchannel" on the specified subchannel | 277 | * Issues a "Cancel Subchannel" on the specified subchannel |
278 | * Note: We don't need any fancy intparms and flags here | 278 | * Note: We don't need any fancy intparms and flags here |
279 | * since xsch is executed synchronously. | 279 | * since xsch is executed synchronously. |
280 | * Only for common I/O internal use as for now. | 280 | * Only for common I/O internal use as for now. |
281 | */ | 281 | */ |
282 | int | 282 | int |
283 | cio_cancel (struct subchannel *sch) | 283 | cio_cancel (struct subchannel *sch) |
284 | { | 284 | { |
285 | int ccode; | 285 | int ccode; |
286 | 286 | ||
287 | if (!sch) | 287 | if (!sch) |
288 | return -ENODEV; | 288 | return -ENODEV; |
289 | 289 | ||
290 | CIO_TRACE_EVENT(2, "cancelIO"); | 290 | CIO_TRACE_EVENT(2, "cancelIO"); |
291 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 291 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
292 | 292 | ||
293 | ccode = xsch (sch->schid); | 293 | ccode = xsch (sch->schid); |
294 | 294 | ||
295 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); | 295 | CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); |
296 | 296 | ||
297 | switch (ccode) { | 297 | switch (ccode) { |
298 | case 0: /* success */ | 298 | case 0: /* success */ |
299 | /* Update information in scsw. */ | 299 | /* Update information in scsw. */ |
300 | if (cio_update_schib(sch)) | 300 | if (cio_update_schib(sch)) |
301 | return -ENODEV; | 301 | return -ENODEV; |
302 | return 0; | 302 | return 0; |
303 | case 1: /* status pending */ | 303 | case 1: /* status pending */ |
304 | return -EBUSY; | 304 | return -EBUSY; |
305 | case 2: /* not applicable */ | 305 | case 2: /* not applicable */ |
306 | return -EINVAL; | 306 | return -EINVAL; |
307 | default: /* not oper */ | 307 | default: /* not oper */ |
308 | return -ENODEV; | 308 | return -ENODEV; |
309 | } | 309 | } |
310 | } | 310 | } |
311 | 311 | ||
312 | 312 | ||
313 | static void cio_apply_config(struct subchannel *sch, struct schib *schib) | 313 | static void cio_apply_config(struct subchannel *sch, struct schib *schib) |
314 | { | 314 | { |
315 | schib->pmcw.intparm = sch->config.intparm; | 315 | schib->pmcw.intparm = sch->config.intparm; |
316 | schib->pmcw.mbi = sch->config.mbi; | 316 | schib->pmcw.mbi = sch->config.mbi; |
317 | schib->pmcw.isc = sch->config.isc; | 317 | schib->pmcw.isc = sch->config.isc; |
318 | schib->pmcw.ena = sch->config.ena; | 318 | schib->pmcw.ena = sch->config.ena; |
319 | schib->pmcw.mme = sch->config.mme; | 319 | schib->pmcw.mme = sch->config.mme; |
320 | schib->pmcw.mp = sch->config.mp; | 320 | schib->pmcw.mp = sch->config.mp; |
321 | schib->pmcw.csense = sch->config.csense; | 321 | schib->pmcw.csense = sch->config.csense; |
322 | schib->pmcw.mbfc = sch->config.mbfc; | 322 | schib->pmcw.mbfc = sch->config.mbfc; |
323 | if (sch->config.mbfc) | 323 | if (sch->config.mbfc) |
324 | schib->mba = sch->config.mba; | 324 | schib->mba = sch->config.mba; |
325 | } | 325 | } |
326 | 326 | ||
327 | static int cio_check_config(struct subchannel *sch, struct schib *schib) | 327 | static int cio_check_config(struct subchannel *sch, struct schib *schib) |
328 | { | 328 | { |
329 | return (schib->pmcw.intparm == sch->config.intparm) && | 329 | return (schib->pmcw.intparm == sch->config.intparm) && |
330 | (schib->pmcw.mbi == sch->config.mbi) && | 330 | (schib->pmcw.mbi == sch->config.mbi) && |
331 | (schib->pmcw.isc == sch->config.isc) && | 331 | (schib->pmcw.isc == sch->config.isc) && |
332 | (schib->pmcw.ena == sch->config.ena) && | 332 | (schib->pmcw.ena == sch->config.ena) && |
333 | (schib->pmcw.mme == sch->config.mme) && | 333 | (schib->pmcw.mme == sch->config.mme) && |
334 | (schib->pmcw.mp == sch->config.mp) && | 334 | (schib->pmcw.mp == sch->config.mp) && |
335 | (schib->pmcw.csense == sch->config.csense) && | 335 | (schib->pmcw.csense == sch->config.csense) && |
336 | (schib->pmcw.mbfc == sch->config.mbfc) && | 336 | (schib->pmcw.mbfc == sch->config.mbfc) && |
337 | (!sch->config.mbfc || (schib->mba == sch->config.mba)); | 337 | (!sch->config.mbfc || (schib->mba == sch->config.mba)); |
338 | } | 338 | } |
339 | 339 | ||
340 | /* | 340 | /* |
341 | * cio_commit_config - apply configuration to the subchannel | 341 | * cio_commit_config - apply configuration to the subchannel |
342 | */ | 342 | */ |
343 | int cio_commit_config(struct subchannel *sch) | 343 | int cio_commit_config(struct subchannel *sch) |
344 | { | 344 | { |
345 | struct schib schib; | 345 | struct schib schib; |
346 | int ccode, retry, ret = 0; | 346 | int ccode, retry, ret = 0; |
347 | 347 | ||
348 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 348 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
349 | return -ENODEV; | 349 | return -ENODEV; |
350 | 350 | ||
351 | for (retry = 0; retry < 5; retry++) { | 351 | for (retry = 0; retry < 5; retry++) { |
352 | /* copy desired changes to local schib */ | 352 | /* copy desired changes to local schib */ |
353 | cio_apply_config(sch, &schib); | 353 | cio_apply_config(sch, &schib); |
354 | ccode = msch_err(sch->schid, &schib); | 354 | ccode = msch_err(sch->schid, &schib); |
355 | if (ccode < 0) /* -EIO if msch gets a program check. */ | 355 | if (ccode < 0) /* -EIO if msch gets a program check. */ |
356 | return ccode; | 356 | return ccode; |
357 | switch (ccode) { | 357 | switch (ccode) { |
358 | case 0: /* successful */ | 358 | case 0: /* successful */ |
359 | if (stsch_err(sch->schid, &schib) || | 359 | if (stsch_err(sch->schid, &schib) || |
360 | !css_sch_is_valid(&schib)) | 360 | !css_sch_is_valid(&schib)) |
361 | return -ENODEV; | 361 | return -ENODEV; |
362 | if (cio_check_config(sch, &schib)) { | 362 | if (cio_check_config(sch, &schib)) { |
363 | /* commit changes from local schib */ | 363 | /* commit changes from local schib */ |
364 | memcpy(&sch->schib, &schib, sizeof(schib)); | 364 | memcpy(&sch->schib, &schib, sizeof(schib)); |
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | ret = -EAGAIN; | 367 | ret = -EAGAIN; |
368 | break; | 368 | break; |
369 | case 1: /* status pending */ | 369 | case 1: /* status pending */ |
370 | return -EBUSY; | 370 | return -EBUSY; |
371 | case 2: /* busy */ | 371 | case 2: /* busy */ |
372 | udelay(100); /* allow for recovery */ | 372 | udelay(100); /* allow for recovery */ |
373 | ret = -EBUSY; | 373 | ret = -EBUSY; |
374 | break; | 374 | break; |
375 | case 3: /* not operational */ | 375 | case 3: /* not operational */ |
376 | return -ENODEV; | 376 | return -ENODEV; |
377 | } | 377 | } |
378 | } | 378 | } |
379 | return ret; | 379 | return ret; |
380 | } | 380 | } |
381 | 381 | ||
382 | /** | 382 | /** |
383 | * cio_update_schib - Perform stsch and update schib if subchannel is valid. | 383 | * cio_update_schib - Perform stsch and update schib if subchannel is valid. |
384 | * @sch: subchannel on which to perform stsch | 384 | * @sch: subchannel on which to perform stsch |
385 | * Return zero on success, -ENODEV otherwise. | 385 | * Return zero on success, -ENODEV otherwise. |
386 | */ | 386 | */ |
387 | int cio_update_schib(struct subchannel *sch) | 387 | int cio_update_schib(struct subchannel *sch) |
388 | { | 388 | { |
389 | struct schib schib; | 389 | struct schib schib; |
390 | 390 | ||
391 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 391 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
392 | return -ENODEV; | 392 | return -ENODEV; |
393 | 393 | ||
394 | memcpy(&sch->schib, &schib, sizeof(schib)); | 394 | memcpy(&sch->schib, &schib, sizeof(schib)); |
395 | return 0; | 395 | return 0; |
396 | } | 396 | } |
397 | EXPORT_SYMBOL_GPL(cio_update_schib); | 397 | EXPORT_SYMBOL_GPL(cio_update_schib); |
398 | 398 | ||
399 | /** | 399 | /** |
400 | * cio_enable_subchannel - enable a subchannel. | 400 | * cio_enable_subchannel - enable a subchannel. |
401 | * @sch: subchannel to be enabled | 401 | * @sch: subchannel to be enabled |
402 | * @intparm: interruption parameter to set | 402 | * @intparm: interruption parameter to set |
403 | */ | 403 | */ |
404 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 404 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
405 | { | 405 | { |
406 | int retry; | 406 | int retry; |
407 | int ret; | 407 | int ret; |
408 | 408 | ||
409 | CIO_TRACE_EVENT(2, "ensch"); | 409 | CIO_TRACE_EVENT(2, "ensch"); |
410 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 410 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
411 | 411 | ||
412 | if (sch_is_pseudo_sch(sch)) | 412 | if (sch_is_pseudo_sch(sch)) |
413 | return -EINVAL; | 413 | return -EINVAL; |
414 | if (cio_update_schib(sch)) | 414 | if (cio_update_schib(sch)) |
415 | return -ENODEV; | 415 | return -ENODEV; |
416 | 416 | ||
417 | sch->config.ena = 1; | 417 | sch->config.ena = 1; |
418 | sch->config.isc = sch->isc; | 418 | sch->config.isc = sch->isc; |
419 | sch->config.intparm = intparm; | 419 | sch->config.intparm = intparm; |
420 | 420 | ||
421 | for (retry = 0; retry < 3; retry++) { | 421 | for (retry = 0; retry < 3; retry++) { |
422 | ret = cio_commit_config(sch); | 422 | ret = cio_commit_config(sch); |
423 | if (ret == -EIO) { | 423 | if (ret == -EIO) { |
424 | /* | 424 | /* |
425 | * Got a program check in msch. Try without | 425 | * Got a program check in msch. Try without |
426 | * the concurrent sense bit the next time. | 426 | * the concurrent sense bit the next time. |
427 | */ | 427 | */ |
428 | sch->config.csense = 0; | 428 | sch->config.csense = 0; |
429 | } else if (ret == -EBUSY) { | 429 | } else if (ret == -EBUSY) { |
430 | struct irb irb; | 430 | struct irb irb; |
431 | if (tsch(sch->schid, &irb) != 0) | 431 | if (tsch(sch->schid, &irb) != 0) |
432 | break; | 432 | break; |
433 | } else | 433 | } else |
434 | break; | 434 | break; |
435 | } | 435 | } |
436 | CIO_HEX_EVENT(2, &ret, sizeof(ret)); | 436 | CIO_HEX_EVENT(2, &ret, sizeof(ret)); |
437 | return ret; | 437 | return ret; |
438 | } | 438 | } |
439 | EXPORT_SYMBOL_GPL(cio_enable_subchannel); | 439 | EXPORT_SYMBOL_GPL(cio_enable_subchannel); |
440 | 440 | ||
441 | /** | 441 | /** |
442 | * cio_disable_subchannel - disable a subchannel. | 442 | * cio_disable_subchannel - disable a subchannel. |
443 | * @sch: subchannel to disable | 443 | * @sch: subchannel to disable |
444 | */ | 444 | */ |
445 | int cio_disable_subchannel(struct subchannel *sch) | 445 | int cio_disable_subchannel(struct subchannel *sch) |
446 | { | 446 | { |
447 | int retry; | 447 | int retry; |
448 | int ret; | 448 | int ret; |
449 | 449 | ||
450 | CIO_TRACE_EVENT(2, "dissch"); | 450 | CIO_TRACE_EVENT(2, "dissch"); |
451 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); | 451 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
452 | 452 | ||
453 | if (sch_is_pseudo_sch(sch)) | 453 | if (sch_is_pseudo_sch(sch)) |
454 | return 0; | 454 | return 0; |
455 | if (cio_update_schib(sch)) | 455 | if (cio_update_schib(sch)) |
456 | return -ENODEV; | 456 | return -ENODEV; |
457 | 457 | ||
458 | sch->config.ena = 0; | 458 | sch->config.ena = 0; |
459 | 459 | ||
460 | for (retry = 0; retry < 3; retry++) { | 460 | for (retry = 0; retry < 3; retry++) { |
461 | ret = cio_commit_config(sch); | 461 | ret = cio_commit_config(sch); |
462 | if (ret == -EBUSY) { | 462 | if (ret == -EBUSY) { |
463 | struct irb irb; | 463 | struct irb irb; |
464 | if (tsch(sch->schid, &irb) != 0) | 464 | if (tsch(sch->schid, &irb) != 0) |
465 | break; | 465 | break; |
466 | } else | 466 | } else |
467 | break; | 467 | break; |
468 | } | 468 | } |
469 | CIO_HEX_EVENT(2, &ret, sizeof(ret)); | 469 | CIO_HEX_EVENT(2, &ret, sizeof(ret)); |
470 | return ret; | 470 | return ret; |
471 | } | 471 | } |
472 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); | 472 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); |
473 | 473 | ||
474 | int cio_create_sch_lock(struct subchannel *sch) | 474 | int cio_create_sch_lock(struct subchannel *sch) |
475 | { | 475 | { |
476 | sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | 476 | sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); |
477 | if (!sch->lock) | 477 | if (!sch->lock) |
478 | return -ENOMEM; | 478 | return -ENOMEM; |
479 | spin_lock_init(sch->lock); | 479 | spin_lock_init(sch->lock); |
480 | return 0; | 480 | return 0; |
481 | } | 481 | } |
482 | 482 | ||
483 | static int cio_check_devno_blacklisted(struct subchannel *sch) | 483 | static int cio_check_devno_blacklisted(struct subchannel *sch) |
484 | { | 484 | { |
485 | if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { | 485 | if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { |
486 | /* | 486 | /* |
487 | * This device must not be known to Linux. So we simply | 487 | * This device must not be known to Linux. So we simply |
488 | * say that there is no device and return ENODEV. | 488 | * say that there is no device and return ENODEV. |
489 | */ | 489 | */ |
490 | CIO_MSG_EVENT(6, "Blacklisted device detected " | 490 | CIO_MSG_EVENT(6, "Blacklisted device detected " |
491 | "at devno %04X, subchannel set %x\n", | 491 | "at devno %04X, subchannel set %x\n", |
492 | sch->schib.pmcw.dev, sch->schid.ssid); | 492 | sch->schib.pmcw.dev, sch->schid.ssid); |
493 | return -ENODEV; | 493 | return -ENODEV; |
494 | } | 494 | } |
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | static int cio_validate_io_subchannel(struct subchannel *sch) | 498 | static int cio_validate_io_subchannel(struct subchannel *sch) |
499 | { | 499 | { |
500 | /* Initialization for io subchannels. */ | 500 | /* Initialization for io subchannels. */ |
501 | if (!css_sch_is_valid(&sch->schib)) | 501 | if (!css_sch_is_valid(&sch->schib)) |
502 | return -ENODEV; | 502 | return -ENODEV; |
503 | 503 | ||
504 | /* Devno is valid. */ | 504 | /* Devno is valid. */ |
505 | return cio_check_devno_blacklisted(sch); | 505 | return cio_check_devno_blacklisted(sch); |
506 | } | 506 | } |
507 | 507 | ||
508 | static int cio_validate_msg_subchannel(struct subchannel *sch) | 508 | static int cio_validate_msg_subchannel(struct subchannel *sch) |
509 | { | 509 | { |
510 | /* Initialization for message subchannels. */ | 510 | /* Initialization for message subchannels. */ |
511 | if (!css_sch_is_valid(&sch->schib)) | 511 | if (!css_sch_is_valid(&sch->schib)) |
512 | return -ENODEV; | 512 | return -ENODEV; |
513 | 513 | ||
514 | /* Devno is valid. */ | 514 | /* Devno is valid. */ |
515 | return cio_check_devno_blacklisted(sch); | 515 | return cio_check_devno_blacklisted(sch); |
516 | } | 516 | } |
517 | 517 | ||
518 | /** | 518 | /** |
519 | * cio_validate_subchannel - basic validation of subchannel | 519 | * cio_validate_subchannel - basic validation of subchannel |
520 | * @sch: subchannel structure to be filled out | 520 | * @sch: subchannel structure to be filled out |
521 | * @schid: subchannel id | 521 | * @schid: subchannel id |
522 | * | 522 | * |
523 | * Find out subchannel type and initialize struct subchannel. | 523 | * Find out subchannel type and initialize struct subchannel. |
524 | * Return codes: | 524 | * Return codes: |
525 | * 0 on success | 525 | * 0 on success |
526 | * -ENXIO for non-defined subchannels | 526 | * -ENXIO for non-defined subchannels |
527 | * -ENODEV for invalid subchannels or blacklisted devices | 527 | * -ENODEV for invalid subchannels or blacklisted devices |
528 | * -EIO for subchannels in an invalid subchannel set | 528 | * -EIO for subchannels in an invalid subchannel set |
529 | */ | 529 | */ |
530 | int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) | 530 | int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) |
531 | { | 531 | { |
532 | char dbf_txt[15]; | 532 | char dbf_txt[15]; |
533 | int ccode; | 533 | int ccode; |
534 | int err; | 534 | int err; |
535 | 535 | ||
536 | sprintf(dbf_txt, "valsch%x", schid.sch_no); | 536 | sprintf(dbf_txt, "valsch%x", schid.sch_no); |
537 | CIO_TRACE_EVENT(4, dbf_txt); | 537 | CIO_TRACE_EVENT(4, dbf_txt); |
538 | 538 | ||
539 | /* Nuke all fields. */ | 539 | /* Nuke all fields. */ |
540 | memset(sch, 0, sizeof(struct subchannel)); | 540 | memset(sch, 0, sizeof(struct subchannel)); |
541 | 541 | ||
542 | sch->schid = schid; | 542 | sch->schid = schid; |
543 | if (cio_is_console(schid)) { | 543 | if (cio_is_console(schid)) { |
544 | sch->lock = cio_get_console_lock(); | 544 | sch->lock = cio_get_console_lock(); |
545 | } else { | 545 | } else { |
546 | err = cio_create_sch_lock(sch); | 546 | err = cio_create_sch_lock(sch); |
547 | if (err) | 547 | if (err) |
548 | goto out; | 548 | goto out; |
549 | } | 549 | } |
550 | mutex_init(&sch->reg_mutex); | 550 | mutex_init(&sch->reg_mutex); |
551 | 551 | ||
552 | /* | 552 | /* |
553 | * The first subchannel that is not-operational (ccode==3) | 553 | * The first subchannel that is not-operational (ccode==3) |
554 | * indicates that there aren't any more devices available. | 554 | * indicates that there aren't any more devices available. |
555 | * If stsch gets an exception, it means the current subchannel set | 555 | * If stsch gets an exception, it means the current subchannel set |
556 | * is not valid. | 556 | * is not valid. |
557 | */ | 557 | */ |
558 | ccode = stsch_err (schid, &sch->schib); | 558 | ccode = stsch_err (schid, &sch->schib); |
559 | if (ccode) { | 559 | if (ccode) { |
560 | err = (ccode == 3) ? -ENXIO : ccode; | 560 | err = (ccode == 3) ? -ENXIO : ccode; |
561 | goto out; | 561 | goto out; |
562 | } | 562 | } |
563 | /* Copy subchannel type from path management control word. */ | 563 | /* Copy subchannel type from path management control word. */ |
564 | sch->st = sch->schib.pmcw.st; | 564 | sch->st = sch->schib.pmcw.st; |
565 | 565 | ||
566 | switch (sch->st) { | 566 | switch (sch->st) { |
567 | case SUBCHANNEL_TYPE_IO: | 567 | case SUBCHANNEL_TYPE_IO: |
568 | err = cio_validate_io_subchannel(sch); | 568 | err = cio_validate_io_subchannel(sch); |
569 | break; | 569 | break; |
570 | case SUBCHANNEL_TYPE_MSG: | 570 | case SUBCHANNEL_TYPE_MSG: |
571 | err = cio_validate_msg_subchannel(sch); | 571 | err = cio_validate_msg_subchannel(sch); |
572 | break; | 572 | break; |
573 | default: | 573 | default: |
574 | err = 0; | 574 | err = 0; |
575 | } | 575 | } |
576 | if (err) | 576 | if (err) |
577 | goto out; | 577 | goto out; |
578 | 578 | ||
579 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", | 579 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", |
580 | sch->schid.ssid, sch->schid.sch_no, sch->st); | 580 | sch->schid.ssid, sch->schid.sch_no, sch->st); |
581 | return 0; | 581 | return 0; |
582 | out: | 582 | out: |
583 | if (!cio_is_console(schid)) | 583 | if (!cio_is_console(schid)) |
584 | kfree(sch->lock); | 584 | kfree(sch->lock); |
585 | sch->lock = NULL; | 585 | sch->lock = NULL; |
586 | return err; | 586 | return err; |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | 589 | /* |
590 | * do_IRQ() handles all normal I/O device IRQ's (the special | 590 | * do_IRQ() handles all normal I/O device IRQ's (the special |
591 | * SMP cross-CPU interrupts have their own specific | 591 | * SMP cross-CPU interrupts have their own specific |
592 | * handlers). | 592 | * handlers). |
593 | * | 593 | * |
594 | */ | 594 | */ |
595 | void __irq_entry do_IRQ(struct pt_regs *regs) | 595 | void __irq_entry do_IRQ(struct pt_regs *regs) |
596 | { | 596 | { |
597 | struct tpi_info *tpi_info; | 597 | struct tpi_info *tpi_info; |
598 | struct subchannel *sch; | 598 | struct subchannel *sch; |
599 | struct irb *irb; | 599 | struct irb *irb; |
600 | struct pt_regs *old_regs; | 600 | struct pt_regs *old_regs; |
601 | 601 | ||
602 | old_regs = set_irq_regs(regs); | 602 | old_regs = set_irq_regs(regs); |
603 | irq_enter(); | 603 | irq_enter(); |
604 | __this_cpu_write(s390_idle.nohz_delay, 1); | 604 | __this_cpu_write(s390_idle.nohz_delay, 1); |
605 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | 605 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
606 | /* Serve timer interrupts first. */ | 606 | /* Serve timer interrupts first. */ |
607 | clock_comparator_work(); | 607 | clock_comparator_work(); |
608 | /* | 608 | /* |
609 | * Get interrupt information from lowcore | 609 | * Get interrupt information from lowcore |
610 | */ | 610 | */ |
611 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; | 611 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
612 | irb = (struct irb *)&S390_lowcore.irb; | 612 | irb = (struct irb *)&S390_lowcore.irb; |
613 | do { | 613 | do { |
614 | kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; | 614 | kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; |
615 | if (tpi_info->adapter_IO) { | 615 | if (tpi_info->adapter_IO) { |
616 | do_adapter_IO(tpi_info->isc); | 616 | do_adapter_IO(tpi_info->isc); |
617 | continue; | 617 | continue; |
618 | } | 618 | } |
619 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | 619 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; |
620 | if (!sch) { | 620 | if (!sch) { |
621 | /* Clear pending interrupt condition. */ | 621 | /* Clear pending interrupt condition. */ |
622 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; | 622 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; |
623 | tsch(tpi_info->schid, irb); | 623 | tsch(tpi_info->schid, irb); |
624 | continue; | 624 | continue; |
625 | } | 625 | } |
626 | spin_lock(sch->lock); | 626 | spin_lock(sch->lock); |
627 | /* Store interrupt response block to lowcore. */ | 627 | /* Store interrupt response block to lowcore. */ |
628 | if (tsch(tpi_info->schid, irb) == 0) { | 628 | if (tsch(tpi_info->schid, irb) == 0) { |
629 | /* Keep subchannel information word up to date. */ | 629 | /* Keep subchannel information word up to date. */ |
630 | memcpy (&sch->schib.scsw, &irb->scsw, | 630 | memcpy (&sch->schib.scsw, &irb->scsw, |
631 | sizeof (irb->scsw)); | 631 | sizeof (irb->scsw)); |
632 | /* Call interrupt handler if there is one. */ | 632 | /* Call interrupt handler if there is one. */ |
633 | if (sch->driver && sch->driver->irq) | 633 | if (sch->driver && sch->driver->irq) |
634 | sch->driver->irq(sch); | 634 | sch->driver->irq(sch); |
635 | else | 635 | else |
636 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; | 636 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; |
637 | } else | 637 | } else |
638 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; | 638 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; |
639 | spin_unlock(sch->lock); | 639 | spin_unlock(sch->lock); |
640 | /* | 640 | /* |
641 | * Are more interrupts pending? | 641 | * Are more interrupts pending? |
642 | * If so, the tpi instruction will update the lowcore | 642 | * If so, the tpi instruction will update the lowcore |
643 | * to hold the info for the next interrupt. | 643 | * to hold the info for the next interrupt. |
644 | * We don't do this for VM because a tpi drops the cpu | 644 | * We don't do this for VM because a tpi drops the cpu |
645 | * out of the sie which costs more cycles than it saves. | 645 | * out of the sie which costs more cycles than it saves. |
646 | */ | 646 | */ |
647 | } while (MACHINE_IS_LPAR && tpi(NULL) != 0); | 647 | } while (MACHINE_IS_LPAR && tpi(NULL) != 0); |
648 | irq_exit(); | 648 | irq_exit(); |
649 | set_irq_regs(old_regs); | 649 | set_irq_regs(old_regs); |
650 | } | 650 | } |
651 | 651 | ||
652 | #ifdef CONFIG_CCW_CONSOLE | 652 | #ifdef CONFIG_CCW_CONSOLE |
653 | static struct subchannel console_subchannel; | 653 | static struct subchannel console_subchannel; |
654 | static struct io_subchannel_private console_priv; | 654 | static struct io_subchannel_private console_priv; |
655 | static int console_subchannel_in_use; | 655 | static int console_subchannel_in_use; |
656 | 656 | ||
657 | /* | 657 | /* |
658 | * Use cio_tsch to update the subchannel status and call the interrupt handler | 658 | * Use cio_tsch to update the subchannel status and call the interrupt handler |
659 | * if status had been pending. Called with the console_subchannel lock. | 659 | * if status had been pending. Called with the console_subchannel lock. |
660 | */ | 660 | */ |
661 | static void cio_tsch(struct subchannel *sch) | 661 | static void cio_tsch(struct subchannel *sch) |
662 | { | 662 | { |
663 | struct irb *irb; | 663 | struct irb *irb; |
664 | int irq_context; | 664 | int irq_context; |
665 | 665 | ||
666 | irb = (struct irb *)&S390_lowcore.irb; | 666 | irb = (struct irb *)&S390_lowcore.irb; |
667 | /* Store interrupt response block to lowcore. */ | 667 | /* Store interrupt response block to lowcore. */ |
668 | if (tsch(sch->schid, irb) != 0) | 668 | if (tsch(sch->schid, irb) != 0) |
669 | /* Not status pending or not operational. */ | 669 | /* Not status pending or not operational. */ |
670 | return; | 670 | return; |
671 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | 671 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); |
672 | /* Call interrupt handler with updated status. */ | 672 | /* Call interrupt handler with updated status. */ |
673 | irq_context = in_interrupt(); | 673 | irq_context = in_interrupt(); |
674 | if (!irq_context) { | 674 | if (!irq_context) { |
675 | local_bh_disable(); | 675 | local_bh_disable(); |
676 | irq_enter(); | 676 | irq_enter(); |
677 | } | 677 | } |
678 | if (sch->driver && sch->driver->irq) | 678 | if (sch->driver && sch->driver->irq) |
679 | sch->driver->irq(sch); | 679 | sch->driver->irq(sch); |
680 | else | 680 | else |
681 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; | 681 | kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; |
682 | if (!irq_context) { | 682 | if (!irq_context) { |
683 | irq_exit(); | 683 | irq_exit(); |
684 | _local_bh_enable(); | 684 | _local_bh_enable(); |
685 | } | 685 | } |
686 | } | 686 | } |
687 | 687 | ||
688 | void *cio_get_console_priv(void) | 688 | void *cio_get_console_priv(void) |
689 | { | 689 | { |
690 | return &console_priv; | 690 | return &console_priv; |
691 | } | 691 | } |
692 | 692 | ||
693 | /* | 693 | /* |
694 | * busy wait for the next interrupt on the console | 694 | * busy wait for the next interrupt on the console |
695 | */ | 695 | */ |
696 | void wait_cons_dev(void) | 696 | void wait_cons_dev(void) |
697 | { | 697 | { |
698 | if (!console_subchannel_in_use) | 698 | if (!console_subchannel_in_use) |
699 | return; | 699 | return; |
700 | 700 | ||
701 | while (1) { | 701 | while (1) { |
702 | cio_tsch(&console_subchannel); | 702 | cio_tsch(&console_subchannel); |
703 | if (console_subchannel.schib.scsw.cmd.actl == 0) | 703 | if (console_subchannel.schib.scsw.cmd.actl == 0) |
704 | break; | 704 | break; |
705 | udelay_simple(100); | 705 | udelay_simple(100); |
706 | } | 706 | } |
707 | } | 707 | } |
708 | 708 | ||
709 | static int | 709 | static int |
710 | cio_test_for_console(struct subchannel_id schid, void *data) | 710 | cio_test_for_console(struct subchannel_id schid, void *data) |
711 | { | 711 | { |
712 | if (stsch_err(schid, &console_subchannel.schib) != 0) | 712 | if (stsch_err(schid, &console_subchannel.schib) != 0) |
713 | return -ENXIO; | 713 | return -ENXIO; |
714 | if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && | 714 | if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && |
715 | console_subchannel.schib.pmcw.dnv && | 715 | console_subchannel.schib.pmcw.dnv && |
716 | (console_subchannel.schib.pmcw.dev == console_devno)) { | 716 | (console_subchannel.schib.pmcw.dev == console_devno)) { |
717 | console_irq = schid.sch_no; | 717 | console_irq = schid.sch_no; |
718 | return 1; /* found */ | 718 | return 1; /* found */ |
719 | } | 719 | } |
720 | return 0; | 720 | return 0; |
721 | } | 721 | } |
722 | 722 | ||
723 | 723 | ||
724 | static int | 724 | static int |
725 | cio_get_console_sch_no(void) | 725 | cio_get_console_sch_no(void) |
726 | { | 726 | { |
727 | struct subchannel_id schid; | 727 | struct subchannel_id schid; |
728 | 728 | ||
729 | init_subchannel_id(&schid); | 729 | init_subchannel_id(&schid); |
730 | if (console_irq != -1) { | 730 | if (console_irq != -1) { |
731 | /* VM provided us with the irq number of the console. */ | 731 | /* VM provided us with the irq number of the console. */ |
732 | schid.sch_no = console_irq; | 732 | schid.sch_no = console_irq; |
733 | if (stsch_err(schid, &console_subchannel.schib) != 0 || | 733 | if (stsch_err(schid, &console_subchannel.schib) != 0 || |
734 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || | 734 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || |
735 | !console_subchannel.schib.pmcw.dnv) | 735 | !console_subchannel.schib.pmcw.dnv) |
736 | return -1; | 736 | return -1; |
737 | console_devno = console_subchannel.schib.pmcw.dev; | 737 | console_devno = console_subchannel.schib.pmcw.dev; |
738 | } else if (console_devno != -1) { | 738 | } else if (console_devno != -1) { |
739 | /* At least the console device number is known. */ | 739 | /* At least the console device number is known. */ |
740 | for_each_subchannel(cio_test_for_console, NULL); | 740 | for_each_subchannel(cio_test_for_console, NULL); |
741 | if (console_irq == -1) | 741 | if (console_irq == -1) |
742 | return -1; | 742 | return -1; |
743 | } else { | 743 | } else { |
744 | /* unlike in 2.4, we cannot autoprobe here, since | 744 | /* unlike in 2.4, we cannot autoprobe here, since |
745 | * the channel subsystem is not fully initialized. | 745 | * the channel subsystem is not fully initialized. |
746 | * With some luck, the HWC console can take over */ | 746 | * With some luck, the HWC console can take over */ |
747 | return -1; | 747 | return -1; |
748 | } | 748 | } |
749 | return console_irq; | 749 | return console_irq; |
750 | } | 750 | } |
751 | 751 | ||
752 | struct subchannel * | 752 | struct subchannel * |
753 | cio_probe_console(void) | 753 | cio_probe_console(void) |
754 | { | 754 | { |
755 | int sch_no, ret; | 755 | int sch_no, ret; |
756 | struct subchannel_id schid; | 756 | struct subchannel_id schid; |
757 | 757 | ||
758 | if (xchg(&console_subchannel_in_use, 1) != 0) | 758 | if (xchg(&console_subchannel_in_use, 1) != 0) |
759 | return ERR_PTR(-EBUSY); | 759 | return ERR_PTR(-EBUSY); |
760 | sch_no = cio_get_console_sch_no(); | 760 | sch_no = cio_get_console_sch_no(); |
761 | if (sch_no == -1) { | 761 | if (sch_no == -1) { |
762 | console_subchannel_in_use = 0; | 762 | console_subchannel_in_use = 0; |
763 | pr_warning("No CCW console was found\n"); | 763 | pr_warning("No CCW console was found\n"); |
764 | return ERR_PTR(-ENODEV); | 764 | return ERR_PTR(-ENODEV); |
765 | } | 765 | } |
766 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | 766 | memset(&console_subchannel, 0, sizeof(struct subchannel)); |
767 | init_subchannel_id(&schid); | 767 | init_subchannel_id(&schid); |
768 | schid.sch_no = sch_no; | 768 | schid.sch_no = sch_no; |
769 | ret = cio_validate_subchannel(&console_subchannel, schid); | 769 | ret = cio_validate_subchannel(&console_subchannel, schid); |
770 | if (ret) { | 770 | if (ret) { |
771 | console_subchannel_in_use = 0; | 771 | console_subchannel_in_use = 0; |
772 | return ERR_PTR(-ENODEV); | 772 | return ERR_PTR(-ENODEV); |
773 | } | 773 | } |
774 | 774 | ||
775 | /* | 775 | /* |
776 | * enable console I/O-interrupt subclass | 776 | * enable console I/O-interrupt subclass |
777 | */ | 777 | */ |
778 | isc_register(CONSOLE_ISC); | 778 | isc_register(CONSOLE_ISC); |
779 | console_subchannel.config.isc = CONSOLE_ISC; | 779 | console_subchannel.config.isc = CONSOLE_ISC; |
780 | console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; | 780 | console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; |
781 | ret = cio_commit_config(&console_subchannel); | 781 | ret = cio_commit_config(&console_subchannel); |
782 | if (ret) { | 782 | if (ret) { |
783 | isc_unregister(CONSOLE_ISC); | 783 | isc_unregister(CONSOLE_ISC); |
784 | console_subchannel_in_use = 0; | 784 | console_subchannel_in_use = 0; |
785 | return ERR_PTR(ret); | 785 | return ERR_PTR(ret); |
786 | } | 786 | } |
787 | return &console_subchannel; | 787 | return &console_subchannel; |
788 | } | 788 | } |
789 | 789 | ||
790 | void | 790 | void |
791 | cio_release_console(void) | 791 | cio_release_console(void) |
792 | { | 792 | { |
793 | console_subchannel.config.intparm = 0; | 793 | console_subchannel.config.intparm = 0; |
794 | cio_commit_config(&console_subchannel); | 794 | cio_commit_config(&console_subchannel); |
795 | isc_unregister(CONSOLE_ISC); | 795 | isc_unregister(CONSOLE_ISC); |
796 | console_subchannel_in_use = 0; | 796 | console_subchannel_in_use = 0; |
797 | } | 797 | } |
798 | 798 | ||
799 | /* Bah... hack to catch console special sausages. */ | 799 | /* Bah... hack to catch console special sausages. */ |
800 | int | 800 | int |
801 | cio_is_console(struct subchannel_id schid) | 801 | cio_is_console(struct subchannel_id schid) |
802 | { | 802 | { |
803 | if (!console_subchannel_in_use) | 803 | if (!console_subchannel_in_use) |
804 | return 0; | 804 | return 0; |
805 | return schid_equal(&schid, &console_subchannel.schid); | 805 | return schid_equal(&schid, &console_subchannel.schid); |
806 | } | 806 | } |
807 | 807 | ||
808 | struct subchannel * | 808 | struct subchannel * |
809 | cio_get_console_subchannel(void) | 809 | cio_get_console_subchannel(void) |
810 | { | 810 | { |
811 | if (!console_subchannel_in_use) | 811 | if (!console_subchannel_in_use) |
812 | return NULL; | 812 | return NULL; |
813 | return &console_subchannel; | 813 | return &console_subchannel; |
814 | } | 814 | } |
815 | 815 | ||
816 | #endif | 816 | #endif |
817 | static int | 817 | static int |
818 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | 818 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) |
819 | { | 819 | { |
820 | int retry, cc; | 820 | int retry, cc; |
821 | 821 | ||
822 | cc = 0; | 822 | cc = 0; |
823 | for (retry=0;retry<3;retry++) { | 823 | for (retry=0;retry<3;retry++) { |
824 | schib->pmcw.ena = 0; | 824 | schib->pmcw.ena = 0; |
825 | cc = msch_err(schid, schib); | 825 | cc = msch_err(schid, schib); |
826 | if (cc) | 826 | if (cc) |
827 | return (cc==3?-ENODEV:-EBUSY); | 827 | return (cc==3?-ENODEV:-EBUSY); |
828 | if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) | 828 | if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) |
829 | return -ENODEV; | 829 | return -ENODEV; |
830 | if (!schib->pmcw.ena) | 830 | if (!schib->pmcw.ena) |
831 | return 0; | 831 | return 0; |
832 | } | 832 | } |
833 | return -EBUSY; /* uhm... */ | 833 | return -EBUSY; /* uhm... */ |
834 | } | 834 | } |
835 | 835 | ||
836 | static int | 836 | static int |
837 | __clear_io_subchannel_easy(struct subchannel_id schid) | 837 | __clear_io_subchannel_easy(struct subchannel_id schid) |
838 | { | 838 | { |
839 | int retry; | 839 | int retry; |
840 | 840 | ||
841 | if (csch(schid)) | 841 | if (csch(schid)) |
842 | return -ENODEV; | 842 | return -ENODEV; |
843 | for (retry=0;retry<20;retry++) { | 843 | for (retry=0;retry<20;retry++) { |
844 | struct tpi_info ti; | 844 | struct tpi_info ti; |
845 | 845 | ||
846 | if (tpi(&ti)) { | 846 | if (tpi(&ti)) { |
847 | tsch(ti.schid, (struct irb *)&S390_lowcore.irb); | 847 | tsch(ti.schid, (struct irb *)&S390_lowcore.irb); |
848 | if (schid_equal(&ti.schid, &schid)) | 848 | if (schid_equal(&ti.schid, &schid)) |
849 | return 0; | 849 | return 0; |
850 | } | 850 | } |
851 | udelay_simple(100); | 851 | udelay_simple(100); |
852 | } | 852 | } |
853 | return -EBUSY; | 853 | return -EBUSY; |
854 | } | 854 | } |
855 | 855 | ||
856 | static void __clear_chsc_subchannel_easy(void) | 856 | static void __clear_chsc_subchannel_easy(void) |
857 | { | 857 | { |
858 | /* It seems we can only wait for a bit here :/ */ | 858 | /* It seems we can only wait for a bit here :/ */ |
859 | udelay_simple(100); | 859 | udelay_simple(100); |
860 | } | 860 | } |
861 | 861 | ||
862 | static int pgm_check_occured; | 862 | static int pgm_check_occured; |
863 | 863 | ||
864 | static void cio_reset_pgm_check_handler(void) | 864 | static void cio_reset_pgm_check_handler(void) |
865 | { | 865 | { |
866 | pgm_check_occured = 1; | 866 | pgm_check_occured = 1; |
867 | } | 867 | } |
868 | 868 | ||
869 | static int stsch_reset(struct subchannel_id schid, struct schib *addr) | 869 | static int stsch_reset(struct subchannel_id schid, struct schib *addr) |
870 | { | 870 | { |
871 | int rc; | 871 | int rc; |
872 | 872 | ||
873 | pgm_check_occured = 0; | 873 | pgm_check_occured = 0; |
874 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; | 874 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; |
875 | rc = stsch_err(schid, addr); | 875 | rc = stsch_err(schid, addr); |
876 | s390_base_pgm_handler_fn = NULL; | 876 | s390_base_pgm_handler_fn = NULL; |
877 | 877 | ||
878 | /* The program check handler could have changed pgm_check_occured. */ | 878 | /* The program check handler could have changed pgm_check_occured. */ |
879 | barrier(); | 879 | barrier(); |
880 | 880 | ||
881 | if (pgm_check_occured) | 881 | if (pgm_check_occured) |
882 | return -EIO; | 882 | return -EIO; |
883 | else | 883 | else |
884 | return rc; | 884 | return rc; |
885 | } | 885 | } |
886 | 886 | ||
887 | static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) | 887 | static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) |
888 | { | 888 | { |
889 | struct schib schib; | 889 | struct schib schib; |
890 | 890 | ||
891 | if (stsch_reset(schid, &schib)) | 891 | if (stsch_reset(schid, &schib)) |
892 | return -ENXIO; | 892 | return -ENXIO; |
893 | if (!schib.pmcw.ena) | 893 | if (!schib.pmcw.ena) |
894 | return 0; | 894 | return 0; |
895 | switch(__disable_subchannel_easy(schid, &schib)) { | 895 | switch(__disable_subchannel_easy(schid, &schib)) { |
896 | case 0: | 896 | case 0: |
897 | case -ENODEV: | 897 | case -ENODEV: |
898 | break; | 898 | break; |
899 | default: /* -EBUSY */ | 899 | default: /* -EBUSY */ |
900 | switch (schib.pmcw.st) { | 900 | switch (schib.pmcw.st) { |
901 | case SUBCHANNEL_TYPE_IO: | 901 | case SUBCHANNEL_TYPE_IO: |
902 | if (__clear_io_subchannel_easy(schid)) | 902 | if (__clear_io_subchannel_easy(schid)) |
903 | goto out; /* give up... */ | 903 | goto out; /* give up... */ |
904 | break; | 904 | break; |
905 | case SUBCHANNEL_TYPE_CHSC: | 905 | case SUBCHANNEL_TYPE_CHSC: |
906 | __clear_chsc_subchannel_easy(); | 906 | __clear_chsc_subchannel_easy(); |
907 | break; | 907 | break; |
908 | default: | 908 | default: |
909 | /* No default clear strategy */ | 909 | /* No default clear strategy */ |
910 | break; | 910 | break; |
911 | } | 911 | } |
912 | stsch_err(schid, &schib); | 912 | stsch_err(schid, &schib); |
913 | __disable_subchannel_easy(schid, &schib); | 913 | __disable_subchannel_easy(schid, &schib); |
914 | } | 914 | } |
915 | out: | 915 | out: |
916 | return 0; | 916 | return 0; |
917 | } | 917 | } |
918 | 918 | ||
919 | static atomic_t chpid_reset_count; | 919 | static atomic_t chpid_reset_count; |
920 | 920 | ||
921 | static void s390_reset_chpids_mcck_handler(void) | 921 | static void s390_reset_chpids_mcck_handler(void) |
922 | { | 922 | { |
923 | struct crw crw; | 923 | struct crw crw; |
924 | struct mci *mci; | 924 | struct mci *mci; |
925 | 925 | ||
926 | /* Check for pending channel report word. */ | 926 | /* Check for pending channel report word. */ |
927 | mci = (struct mci *)&S390_lowcore.mcck_interruption_code; | 927 | mci = (struct mci *)&S390_lowcore.mcck_interruption_code; |
928 | if (!mci->cp) | 928 | if (!mci->cp) |
929 | return; | 929 | return; |
930 | /* Process channel report words. */ | 930 | /* Process channel report words. */ |
931 | while (stcrw(&crw) == 0) { | 931 | while (stcrw(&crw) == 0) { |
932 | /* Check for responses to RCHP. */ | 932 | /* Check for responses to RCHP. */ |
933 | if (crw.slct && crw.rsc == CRW_RSC_CPATH) | 933 | if (crw.slct && crw.rsc == CRW_RSC_CPATH) |
934 | atomic_dec(&chpid_reset_count); | 934 | atomic_dec(&chpid_reset_count); |
935 | } | 935 | } |
936 | } | 936 | } |
937 | 937 | ||
938 | #define RCHP_TIMEOUT (30 * USEC_PER_SEC) | 938 | #define RCHP_TIMEOUT (30 * USEC_PER_SEC) |
939 | static void css_reset(void) | 939 | static void css_reset(void) |
940 | { | 940 | { |
941 | int i, ret; | 941 | int i, ret; |
942 | unsigned long long timeout; | 942 | unsigned long long timeout; |
943 | struct chp_id chpid; | 943 | struct chp_id chpid; |
944 | 944 | ||
945 | /* Reset subchannels. */ | 945 | /* Reset subchannels. */ |
946 | for_each_subchannel(__shutdown_subchannel_easy, NULL); | 946 | for_each_subchannel(__shutdown_subchannel_easy, NULL); |
947 | /* Reset channel paths. */ | 947 | /* Reset channel paths. */ |
948 | s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; | 948 | s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; |
949 | /* Enable channel report machine checks. */ | 949 | /* Enable channel report machine checks. */ |
950 | __ctl_set_bit(14, 28); | 950 | __ctl_set_bit(14, 28); |
951 | /* Temporarily reenable machine checks. */ | 951 | /* Temporarily reenable machine checks. */ |
952 | local_mcck_enable(); | 952 | local_mcck_enable(); |
953 | chp_id_init(&chpid); | 953 | chp_id_init(&chpid); |
954 | for (i = 0; i <= __MAX_CHPID; i++) { | 954 | for (i = 0; i <= __MAX_CHPID; i++) { |
955 | chpid.id = i; | 955 | chpid.id = i; |
956 | ret = rchp(chpid); | 956 | ret = rchp(chpid); |
957 | if ((ret == 0) || (ret == 2)) | 957 | if ((ret == 0) || (ret == 2)) |
958 | /* | 958 | /* |
959 | * rchp either succeeded, or another rchp is already | 959 | * rchp either succeeded, or another rchp is already |
960 | * in progress. In either case, we'll get a crw. | 960 | * in progress. In either case, we'll get a crw. |
961 | */ | 961 | */ |
962 | atomic_inc(&chpid_reset_count); | 962 | atomic_inc(&chpid_reset_count); |
963 | } | 963 | } |
964 | /* Wait for machine check for all channel paths. */ | 964 | /* Wait for machine check for all channel paths. */ |
965 | timeout = get_clock() + (RCHP_TIMEOUT << 12); | 965 | timeout = get_clock() + (RCHP_TIMEOUT << 12); |
966 | while (atomic_read(&chpid_reset_count) != 0) { | 966 | while (atomic_read(&chpid_reset_count) != 0) { |
967 | if (get_clock() > timeout) | 967 | if (get_clock() > timeout) |
968 | break; | 968 | break; |
969 | cpu_relax(); | 969 | cpu_relax(); |
970 | } | 970 | } |
971 | /* Disable machine checks again. */ | 971 | /* Disable machine checks again. */ |
972 | local_mcck_disable(); | 972 | local_mcck_disable(); |
973 | /* Disable channel report machine checks. */ | 973 | /* Disable channel report machine checks. */ |
974 | __ctl_clear_bit(14, 28); | 974 | __ctl_clear_bit(14, 28); |
975 | s390_base_mcck_handler_fn = NULL; | 975 | s390_base_mcck_handler_fn = NULL; |
976 | } | 976 | } |
977 | 977 | ||
978 | static struct reset_call css_reset_call = { | 978 | static struct reset_call css_reset_call = { |
979 | .fn = css_reset, | 979 | .fn = css_reset, |
980 | }; | 980 | }; |
981 | 981 | ||
982 | static int __init init_css_reset_call(void) | 982 | static int __init init_css_reset_call(void) |
983 | { | 983 | { |
984 | atomic_set(&chpid_reset_count, 0); | 984 | atomic_set(&chpid_reset_count, 0); |
985 | register_reset_call(&css_reset_call); | 985 | register_reset_call(&css_reset_call); |
986 | return 0; | 986 | return 0; |
987 | } | 987 | } |
988 | 988 | ||
989 | arch_initcall(init_css_reset_call); | 989 | arch_initcall(init_css_reset_call); |
990 | 990 | ||
991 | struct sch_match_id { | 991 | struct sch_match_id { |
992 | struct subchannel_id schid; | 992 | struct subchannel_id schid; |
993 | struct ccw_dev_id devid; | 993 | struct ccw_dev_id devid; |
994 | int rc; | 994 | int rc; |
995 | }; | 995 | }; |
996 | 996 | ||
997 | static int __reipl_subchannel_match(struct subchannel_id schid, void *data) | 997 | static int __reipl_subchannel_match(struct subchannel_id schid, void *data) |
998 | { | 998 | { |
999 | struct schib schib; | 999 | struct schib schib; |
1000 | struct sch_match_id *match_id = data; | 1000 | struct sch_match_id *match_id = data; |
1001 | 1001 | ||
1002 | if (stsch_reset(schid, &schib)) | 1002 | if (stsch_reset(schid, &schib)) |
1003 | return -ENXIO; | 1003 | return -ENXIO; |
1004 | if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && | 1004 | if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && |
1005 | (schib.pmcw.dev == match_id->devid.devno) && | 1005 | (schib.pmcw.dev == match_id->devid.devno) && |
1006 | (schid.ssid == match_id->devid.ssid)) { | 1006 | (schid.ssid == match_id->devid.ssid)) { |
1007 | match_id->schid = schid; | 1007 | match_id->schid = schid; |
1008 | match_id->rc = 0; | 1008 | match_id->rc = 0; |
1009 | return 1; | 1009 | return 1; |
1010 | } | 1010 | } |
1011 | return 0; | 1011 | return 0; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | static int reipl_find_schid(struct ccw_dev_id *devid, | 1014 | static int reipl_find_schid(struct ccw_dev_id *devid, |
1015 | struct subchannel_id *schid) | 1015 | struct subchannel_id *schid) |
1016 | { | 1016 | { |
1017 | struct sch_match_id match_id; | 1017 | struct sch_match_id match_id; |
1018 | 1018 | ||
1019 | match_id.devid = *devid; | 1019 | match_id.devid = *devid; |
1020 | match_id.rc = -ENODEV; | 1020 | match_id.rc = -ENODEV; |
1021 | for_each_subchannel(__reipl_subchannel_match, &match_id); | 1021 | for_each_subchannel(__reipl_subchannel_match, &match_id); |
1022 | if (match_id.rc == 0) | 1022 | if (match_id.rc == 0) |
1023 | *schid = match_id.schid; | 1023 | *schid = match_id.schid; |
1024 | return match_id.rc; | 1024 | return match_id.rc; |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | extern void do_reipl_asm(__u32 schid); | 1027 | extern void do_reipl_asm(__u32 schid); |
1028 | 1028 | ||
1029 | /* Make sure all subchannels are quiet before we re-ipl an lpar. */ | 1029 | /* Make sure all subchannels are quiet before we re-ipl an lpar. */ |
1030 | void reipl_ccw_dev(struct ccw_dev_id *devid) | 1030 | void reipl_ccw_dev(struct ccw_dev_id *devid) |
1031 | { | 1031 | { |
1032 | struct subchannel_id schid; | 1032 | struct subchannel_id uninitialized_var(schid); |
1033 | 1033 | ||
1034 | s390_reset_system(NULL, NULL); | 1034 | s390_reset_system(NULL, NULL); |
1035 | if (reipl_find_schid(devid, &schid) != 0) | 1035 | if (reipl_find_schid(devid, &schid) != 0) |
1036 | panic("IPL Device not found\n"); | 1036 | panic("IPL Device not found\n"); |
1037 | do_reipl_asm(*((__u32*)&schid)); | 1037 | do_reipl_asm(*((__u32*)&schid)); |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | 1040 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) |
1041 | { | 1041 | { |
1042 | struct subchannel_id schid; | 1042 | struct subchannel_id schid; |
1043 | struct schib schib; | 1043 | struct schib schib; |
1044 | 1044 | ||
1045 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; | 1045 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; |
1046 | if (!schid.one) | 1046 | if (!schid.one) |
1047 | return -ENODEV; | 1047 | return -ENODEV; |
1048 | if (stsch_err(schid, &schib)) | 1048 | if (stsch_err(schid, &schib)) |
1049 | return -ENODEV; | 1049 | return -ENODEV; |
1050 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) | 1050 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) |
1051 | return -ENODEV; | 1051 | return -ENODEV; |
1052 | if (!schib.pmcw.dnv) | 1052 | if (!schib.pmcw.dnv) |
1053 | return -ENODEV; | 1053 | return -ENODEV; |
1054 | iplinfo->devno = schib.pmcw.dev; | 1054 | iplinfo->devno = schib.pmcw.dev; |
1055 | iplinfo->is_qdio = schib.pmcw.qf; | 1055 | iplinfo->is_qdio = schib.pmcw.qf; |
1056 | return 0; | 1056 | return 0; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | /** | 1059 | /** |
1060 | * cio_tm_start_key - perform start function | 1060 | * cio_tm_start_key - perform start function |
1061 | * @sch: subchannel on which to perform the start function | 1061 | * @sch: subchannel on which to perform the start function |
1062 | * @tcw: transport-command word to be started | 1062 | * @tcw: transport-command word to be started |
1063 | * @lpm: mask of paths to use | 1063 | * @lpm: mask of paths to use |
1064 | * @key: storage key to use for storage access | 1064 | * @key: storage key to use for storage access |
1065 | * | 1065 | * |
1066 | * Start the tcw on the given subchannel. Return zero on success, non-zero | 1066 | * Start the tcw on the given subchannel. Return zero on success, non-zero |
1067 | * otherwise. | 1067 | * otherwise. |
1068 | */ | 1068 | */ |
1069 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) | 1069 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) |
1070 | { | 1070 | { |
1071 | int cc; | 1071 | int cc; |
1072 | union orb *orb = &to_io_private(sch)->orb; | 1072 | union orb *orb = &to_io_private(sch)->orb; |
1073 | 1073 | ||
1074 | memset(orb, 0, sizeof(union orb)); | 1074 | memset(orb, 0, sizeof(union orb)); |
1075 | orb->tm.intparm = (u32) (addr_t) sch; | 1075 | orb->tm.intparm = (u32) (addr_t) sch; |
1076 | orb->tm.key = key >> 4; | 1076 | orb->tm.key = key >> 4; |
1077 | orb->tm.b = 1; | 1077 | orb->tm.b = 1; |
1078 | orb->tm.lpm = lpm ? lpm : sch->lpm; | 1078 | orb->tm.lpm = lpm ? lpm : sch->lpm; |
1079 | orb->tm.tcw = (u32) (addr_t) tcw; | 1079 | orb->tm.tcw = (u32) (addr_t) tcw; |
1080 | cc = ssch(sch->schid, orb); | 1080 | cc = ssch(sch->schid, orb); |
1081 | switch (cc) { | 1081 | switch (cc) { |
1082 | case 0: | 1082 | case 0: |
1083 | return 0; | 1083 | return 0; |
1084 | case 1: | 1084 | case 1: |
1085 | case 2: | 1085 | case 2: |
1086 | return -EBUSY; | 1086 | return -EBUSY; |
1087 | default: | 1087 | default: |
1088 | return cio_start_handle_notoper(sch, lpm); | 1088 | return cio_start_handle_notoper(sch, lpm); |
1089 | } | 1089 | } |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | /** | 1092 | /** |
1093 | * cio_tm_intrg - perform interrogate function | 1093 | * cio_tm_intrg - perform interrogate function |
1094 | * @sch - subchannel on which to perform the interrogate function | 1094 | * @sch - subchannel on which to perform the interrogate function |
1095 | * | 1095 | * |
1096 | * If the specified subchannel is running in transport-mode, perform the | 1096 | * If the specified subchannel is running in transport-mode, perform the |
1097 | * interrogate function. Return zero on success, non-zero otherwie. | 1097 | * interrogate function. Return zero on success, non-zero otherwie. |
1098 | */ | 1098 | */ |
1099 | int cio_tm_intrg(struct subchannel *sch) | 1099 | int cio_tm_intrg(struct subchannel *sch) |
1100 | { | 1100 | { |
1101 | int cc; | 1101 | int cc; |
1102 | 1102 | ||
1103 | if (!to_io_private(sch)->orb.tm.b) | 1103 | if (!to_io_private(sch)->orb.tm.b) |
1104 | return -EINVAL; | 1104 | return -EINVAL; |
1105 | cc = xsch(sch->schid); | 1105 | cc = xsch(sch->schid); |
1106 | switch (cc) { | 1106 | switch (cc) { |
1107 | case 0: | 1107 | case 0: |
1108 | case 2: | 1108 | case 2: |
1109 | return 0; | 1109 | return 0; |
1110 | case 1: | 1110 | case 1: |
1111 | return -EBUSY; | 1111 | return -EBUSY; |
1112 | default: | 1112 | default: |
1113 | return -ENODEV; | 1113 | return -ENODEV; |
1114 | } | 1114 | } |
1115 | } | 1115 | } |
1116 | 1116 |