Commit 851755871c1f3184f4124c466e85881f17fa3226
Committed by
Avi Kivity
1 parent
6dbf79e716
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
KVM: s390: Sanitize fpc registers for KVM_SET_FPU
commit 7eef87dc99e419b1cc051e4417c37e4744d7b661 (KVM: s390: fix register setting) added a load of the floating point control register to the KVM_SET_FPU path. Lets make sure that the fpc is valid. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
arch/s390/kvm/kvm-s390.c
1 | /* | 1 | /* |
2 | * s390host.c -- hosting zSeries kernel virtual machines | 2 | * s390host.c -- hosting zSeries kernel virtual machines |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008,2009 | 4 | * Copyright IBM Corp. 2008,2009 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
8 | * as published by the Free Software Foundation. | 8 | * as published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 10 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | 11 | * Christian Borntraeger <borntraeger@de.ibm.com> |
12 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 12 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
13 | * Christian Ehrhardt <ehrhardt@de.ibm.com> | 13 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/hrtimer.h> | 19 | #include <linux/hrtimer.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kvm.h> | 21 | #include <linux/kvm.h> |
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <asm/asm-offsets.h> | 26 | #include <asm/asm-offsets.h> |
27 | #include <asm/lowcore.h> | 27 | #include <asm/lowcore.h> |
28 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
29 | #include <asm/nmi.h> | 29 | #include <asm/nmi.h> |
30 | #include <asm/system.h> | 30 | #include <asm/system.h> |
31 | #include "kvm-s390.h" | 31 | #include "kvm-s390.h" |
32 | #include "gaccess.h" | 32 | #include "gaccess.h" |
33 | 33 | ||
34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
35 | 35 | ||
36 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 36 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
37 | { "userspace_handled", VCPU_STAT(exit_userspace) }, | 37 | { "userspace_handled", VCPU_STAT(exit_userspace) }, |
38 | { "exit_null", VCPU_STAT(exit_null) }, | 38 | { "exit_null", VCPU_STAT(exit_null) }, |
39 | { "exit_validity", VCPU_STAT(exit_validity) }, | 39 | { "exit_validity", VCPU_STAT(exit_validity) }, |
40 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, | 40 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, |
41 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | 41 | { "exit_external_request", VCPU_STAT(exit_external_request) }, |
42 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, | 42 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, |
43 | { "exit_instruction", VCPU_STAT(exit_instruction) }, | 43 | { "exit_instruction", VCPU_STAT(exit_instruction) }, |
44 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, | 44 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, |
45 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 45 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
46 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, | 46 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
47 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 47 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
48 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | 48 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
49 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, | 49 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, |
50 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | 50 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
51 | { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, | 51 | { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, |
52 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, | 52 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, |
53 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, | 53 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, |
54 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, | 54 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, |
55 | { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, | 55 | { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, |
56 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, | 56 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, |
57 | { "instruction_stidp", VCPU_STAT(instruction_stidp) }, | 57 | { "instruction_stidp", VCPU_STAT(instruction_stidp) }, |
58 | { "instruction_spx", VCPU_STAT(instruction_spx) }, | 58 | { "instruction_spx", VCPU_STAT(instruction_spx) }, |
59 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, | 59 | { "instruction_stpx", VCPU_STAT(instruction_stpx) }, |
60 | { "instruction_stap", VCPU_STAT(instruction_stap) }, | 60 | { "instruction_stap", VCPU_STAT(instruction_stap) }, |
61 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, | 61 | { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, |
62 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, | 62 | { "instruction_stsch", VCPU_STAT(instruction_stsch) }, |
63 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, | 63 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, |
64 | { "instruction_stsi", VCPU_STAT(instruction_stsi) }, | 64 | { "instruction_stsi", VCPU_STAT(instruction_stsi) }, |
65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, | 65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, |
66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, | 66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, |
67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, | 67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, |
68 | { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, | 68 | { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, |
69 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, | 69 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, |
70 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, | 70 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, |
71 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 71 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
72 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, | 72 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, |
73 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, | 73 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, |
74 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, | 74 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, |
75 | { "diagnose_10", VCPU_STAT(diagnose_10) }, | 75 | { "diagnose_10", VCPU_STAT(diagnose_10) }, |
76 | { "diagnose_44", VCPU_STAT(diagnose_44) }, | 76 | { "diagnose_44", VCPU_STAT(diagnose_44) }, |
77 | { NULL } | 77 | { NULL } |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static unsigned long long *facilities; | 80 | static unsigned long long *facilities; |
81 | 81 | ||
82 | /* Section: not file related */ | 82 | /* Section: not file related */ |
83 | int kvm_arch_hardware_enable(void *garbage) | 83 | int kvm_arch_hardware_enable(void *garbage) |
84 | { | 84 | { |
85 | /* every s390 is virtualization enabled ;-) */ | 85 | /* every s390 is virtualization enabled ;-) */ |
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | void kvm_arch_hardware_disable(void *garbage) | 89 | void kvm_arch_hardware_disable(void *garbage) |
90 | { | 90 | { |
91 | } | 91 | } |
92 | 92 | ||
93 | int kvm_arch_hardware_setup(void) | 93 | int kvm_arch_hardware_setup(void) |
94 | { | 94 | { |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | void kvm_arch_hardware_unsetup(void) | 98 | void kvm_arch_hardware_unsetup(void) |
99 | { | 99 | { |
100 | } | 100 | } |
101 | 101 | ||
102 | void kvm_arch_check_processor_compat(void *rtn) | 102 | void kvm_arch_check_processor_compat(void *rtn) |
103 | { | 103 | { |
104 | } | 104 | } |
105 | 105 | ||
106 | int kvm_arch_init(void *opaque) | 106 | int kvm_arch_init(void *opaque) |
107 | { | 107 | { |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | void kvm_arch_exit(void) | 111 | void kvm_arch_exit(void) |
112 | { | 112 | { |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Section: device related */ | 115 | /* Section: device related */ |
116 | long kvm_arch_dev_ioctl(struct file *filp, | 116 | long kvm_arch_dev_ioctl(struct file *filp, |
117 | unsigned int ioctl, unsigned long arg) | 117 | unsigned int ioctl, unsigned long arg) |
118 | { | 118 | { |
119 | if (ioctl == KVM_S390_ENABLE_SIE) | 119 | if (ioctl == KVM_S390_ENABLE_SIE) |
120 | return s390_enable_sie(); | 120 | return s390_enable_sie(); |
121 | return -EINVAL; | 121 | return -EINVAL; |
122 | } | 122 | } |
123 | 123 | ||
124 | int kvm_dev_ioctl_check_extension(long ext) | 124 | int kvm_dev_ioctl_check_extension(long ext) |
125 | { | 125 | { |
126 | int r; | 126 | int r; |
127 | 127 | ||
128 | switch (ext) { | 128 | switch (ext) { |
129 | case KVM_CAP_S390_PSW: | 129 | case KVM_CAP_S390_PSW: |
130 | case KVM_CAP_S390_GMAP: | 130 | case KVM_CAP_S390_GMAP: |
131 | case KVM_CAP_SYNC_MMU: | 131 | case KVM_CAP_SYNC_MMU: |
132 | #ifdef CONFIG_KVM_S390_UCONTROL | 132 | #ifdef CONFIG_KVM_S390_UCONTROL |
133 | case KVM_CAP_S390_UCONTROL: | 133 | case KVM_CAP_S390_UCONTROL: |
134 | #endif | 134 | #endif |
135 | case KVM_CAP_SYNC_REGS: | 135 | case KVM_CAP_SYNC_REGS: |
136 | r = 1; | 136 | r = 1; |
137 | break; | 137 | break; |
138 | default: | 138 | default: |
139 | r = 0; | 139 | r = 0; |
140 | } | 140 | } |
141 | return r; | 141 | return r; |
142 | } | 142 | } |
143 | 143 | ||
144 | /* Section: vm related */ | 144 | /* Section: vm related */ |
145 | /* | 145 | /* |
146 | * Get (and clear) the dirty memory log for a memory slot. | 146 | * Get (and clear) the dirty memory log for a memory slot. |
147 | */ | 147 | */ |
148 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 148 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
149 | struct kvm_dirty_log *log) | 149 | struct kvm_dirty_log *log) |
150 | { | 150 | { |
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | long kvm_arch_vm_ioctl(struct file *filp, | 154 | long kvm_arch_vm_ioctl(struct file *filp, |
155 | unsigned int ioctl, unsigned long arg) | 155 | unsigned int ioctl, unsigned long arg) |
156 | { | 156 | { |
157 | struct kvm *kvm = filp->private_data; | 157 | struct kvm *kvm = filp->private_data; |
158 | void __user *argp = (void __user *)arg; | 158 | void __user *argp = (void __user *)arg; |
159 | int r; | 159 | int r; |
160 | 160 | ||
161 | switch (ioctl) { | 161 | switch (ioctl) { |
162 | case KVM_S390_INTERRUPT: { | 162 | case KVM_S390_INTERRUPT: { |
163 | struct kvm_s390_interrupt s390int; | 163 | struct kvm_s390_interrupt s390int; |
164 | 164 | ||
165 | r = -EFAULT; | 165 | r = -EFAULT; |
166 | if (copy_from_user(&s390int, argp, sizeof(s390int))) | 166 | if (copy_from_user(&s390int, argp, sizeof(s390int))) |
167 | break; | 167 | break; |
168 | r = kvm_s390_inject_vm(kvm, &s390int); | 168 | r = kvm_s390_inject_vm(kvm, &s390int); |
169 | break; | 169 | break; |
170 | } | 170 | } |
171 | default: | 171 | default: |
172 | r = -ENOTTY; | 172 | r = -ENOTTY; |
173 | } | 173 | } |
174 | 174 | ||
175 | return r; | 175 | return r; |
176 | } | 176 | } |
177 | 177 | ||
178 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 178 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
179 | { | 179 | { |
180 | int rc; | 180 | int rc; |
181 | char debug_name[16]; | 181 | char debug_name[16]; |
182 | 182 | ||
183 | rc = -EINVAL; | 183 | rc = -EINVAL; |
184 | #ifdef CONFIG_KVM_S390_UCONTROL | 184 | #ifdef CONFIG_KVM_S390_UCONTROL |
185 | if (type & ~KVM_VM_S390_UCONTROL) | 185 | if (type & ~KVM_VM_S390_UCONTROL) |
186 | goto out_err; | 186 | goto out_err; |
187 | if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) | 187 | if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) |
188 | goto out_err; | 188 | goto out_err; |
189 | #else | 189 | #else |
190 | if (type) | 190 | if (type) |
191 | goto out_err; | 191 | goto out_err; |
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | rc = s390_enable_sie(); | 194 | rc = s390_enable_sie(); |
195 | if (rc) | 195 | if (rc) |
196 | goto out_err; | 196 | goto out_err; |
197 | 197 | ||
198 | rc = -ENOMEM; | 198 | rc = -ENOMEM; |
199 | 199 | ||
200 | kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); | 200 | kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); |
201 | if (!kvm->arch.sca) | 201 | if (!kvm->arch.sca) |
202 | goto out_err; | 202 | goto out_err; |
203 | 203 | ||
204 | sprintf(debug_name, "kvm-%u", current->pid); | 204 | sprintf(debug_name, "kvm-%u", current->pid); |
205 | 205 | ||
206 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); | 206 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); |
207 | if (!kvm->arch.dbf) | 207 | if (!kvm->arch.dbf) |
208 | goto out_nodbf; | 208 | goto out_nodbf; |
209 | 209 | ||
210 | spin_lock_init(&kvm->arch.float_int.lock); | 210 | spin_lock_init(&kvm->arch.float_int.lock); |
211 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | 211 | INIT_LIST_HEAD(&kvm->arch.float_int.list); |
212 | 212 | ||
213 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); | 213 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); |
214 | VM_EVENT(kvm, 3, "%s", "vm created"); | 214 | VM_EVENT(kvm, 3, "%s", "vm created"); |
215 | 215 | ||
216 | if (type & KVM_VM_S390_UCONTROL) { | 216 | if (type & KVM_VM_S390_UCONTROL) { |
217 | kvm->arch.gmap = NULL; | 217 | kvm->arch.gmap = NULL; |
218 | } else { | 218 | } else { |
219 | kvm->arch.gmap = gmap_alloc(current->mm); | 219 | kvm->arch.gmap = gmap_alloc(current->mm); |
220 | if (!kvm->arch.gmap) | 220 | if (!kvm->arch.gmap) |
221 | goto out_nogmap; | 221 | goto out_nogmap; |
222 | } | 222 | } |
223 | return 0; | 223 | return 0; |
224 | out_nogmap: | 224 | out_nogmap: |
225 | debug_unregister(kvm->arch.dbf); | 225 | debug_unregister(kvm->arch.dbf); |
226 | out_nodbf: | 226 | out_nodbf: |
227 | free_page((unsigned long)(kvm->arch.sca)); | 227 | free_page((unsigned long)(kvm->arch.sca)); |
228 | out_err: | 228 | out_err: |
229 | return rc; | 229 | return rc; |
230 | } | 230 | } |
231 | 231 | ||
232 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 232 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
233 | { | 233 | { |
234 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); | 234 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); |
235 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 235 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
236 | clear_bit(63 - vcpu->vcpu_id, | 236 | clear_bit(63 - vcpu->vcpu_id, |
237 | (unsigned long *) &vcpu->kvm->arch.sca->mcn); | 237 | (unsigned long *) &vcpu->kvm->arch.sca->mcn); |
238 | if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == | 238 | if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == |
239 | (__u64) vcpu->arch.sie_block) | 239 | (__u64) vcpu->arch.sie_block) |
240 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; | 240 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; |
241 | } | 241 | } |
242 | smp_mb(); | 242 | smp_mb(); |
243 | 243 | ||
244 | if (kvm_is_ucontrol(vcpu->kvm)) | 244 | if (kvm_is_ucontrol(vcpu->kvm)) |
245 | gmap_free(vcpu->arch.gmap); | 245 | gmap_free(vcpu->arch.gmap); |
246 | 246 | ||
247 | free_page((unsigned long)(vcpu->arch.sie_block)); | 247 | free_page((unsigned long)(vcpu->arch.sie_block)); |
248 | kvm_vcpu_uninit(vcpu); | 248 | kvm_vcpu_uninit(vcpu); |
249 | kfree(vcpu); | 249 | kfree(vcpu); |
250 | } | 250 | } |
251 | 251 | ||
252 | static void kvm_free_vcpus(struct kvm *kvm) | 252 | static void kvm_free_vcpus(struct kvm *kvm) |
253 | { | 253 | { |
254 | unsigned int i; | 254 | unsigned int i; |
255 | struct kvm_vcpu *vcpu; | 255 | struct kvm_vcpu *vcpu; |
256 | 256 | ||
257 | kvm_for_each_vcpu(i, vcpu, kvm) | 257 | kvm_for_each_vcpu(i, vcpu, kvm) |
258 | kvm_arch_vcpu_destroy(vcpu); | 258 | kvm_arch_vcpu_destroy(vcpu); |
259 | 259 | ||
260 | mutex_lock(&kvm->lock); | 260 | mutex_lock(&kvm->lock); |
261 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | 261 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
262 | kvm->vcpus[i] = NULL; | 262 | kvm->vcpus[i] = NULL; |
263 | 263 | ||
264 | atomic_set(&kvm->online_vcpus, 0); | 264 | atomic_set(&kvm->online_vcpus, 0); |
265 | mutex_unlock(&kvm->lock); | 265 | mutex_unlock(&kvm->lock); |
266 | } | 266 | } |
267 | 267 | ||
268 | void kvm_arch_sync_events(struct kvm *kvm) | 268 | void kvm_arch_sync_events(struct kvm *kvm) |
269 | { | 269 | { |
270 | } | 270 | } |
271 | 271 | ||
272 | void kvm_arch_destroy_vm(struct kvm *kvm) | 272 | void kvm_arch_destroy_vm(struct kvm *kvm) |
273 | { | 273 | { |
274 | kvm_free_vcpus(kvm); | 274 | kvm_free_vcpus(kvm); |
275 | free_page((unsigned long)(kvm->arch.sca)); | 275 | free_page((unsigned long)(kvm->arch.sca)); |
276 | debug_unregister(kvm->arch.dbf); | 276 | debug_unregister(kvm->arch.dbf); |
277 | if (!kvm_is_ucontrol(kvm)) | 277 | if (!kvm_is_ucontrol(kvm)) |
278 | gmap_free(kvm->arch.gmap); | 278 | gmap_free(kvm->arch.gmap); |
279 | } | 279 | } |
280 | 280 | ||
281 | /* Section: vcpu related */ | 281 | /* Section: vcpu related */ |
282 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 282 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
283 | { | 283 | { |
284 | if (kvm_is_ucontrol(vcpu->kvm)) { | 284 | if (kvm_is_ucontrol(vcpu->kvm)) { |
285 | vcpu->arch.gmap = gmap_alloc(current->mm); | 285 | vcpu->arch.gmap = gmap_alloc(current->mm); |
286 | if (!vcpu->arch.gmap) | 286 | if (!vcpu->arch.gmap) |
287 | return -ENOMEM; | 287 | return -ENOMEM; |
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | 290 | ||
291 | vcpu->arch.gmap = vcpu->kvm->arch.gmap; | 291 | vcpu->arch.gmap = vcpu->kvm->arch.gmap; |
292 | vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | | 292 | vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | |
293 | KVM_SYNC_GPRS | | 293 | KVM_SYNC_GPRS | |
294 | KVM_SYNC_ACRS; | 294 | KVM_SYNC_ACRS; |
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 298 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
299 | { | 299 | { |
300 | /* Nothing todo */ | 300 | /* Nothing todo */ |
301 | } | 301 | } |
302 | 302 | ||
303 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 303 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
304 | { | 304 | { |
305 | save_fp_regs(&vcpu->arch.host_fpregs); | 305 | save_fp_regs(&vcpu->arch.host_fpregs); |
306 | save_access_regs(vcpu->arch.host_acrs); | 306 | save_access_regs(vcpu->arch.host_acrs); |
307 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | 307 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; |
308 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 308 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
309 | restore_access_regs(vcpu->run->s.regs.acrs); | 309 | restore_access_regs(vcpu->run->s.regs.acrs); |
310 | gmap_enable(vcpu->arch.gmap); | 310 | gmap_enable(vcpu->arch.gmap); |
311 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 311 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
312 | } | 312 | } |
313 | 313 | ||
314 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 314 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
315 | { | 315 | { |
316 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 316 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
317 | gmap_disable(vcpu->arch.gmap); | 317 | gmap_disable(vcpu->arch.gmap); |
318 | save_fp_regs(&vcpu->arch.guest_fpregs); | 318 | save_fp_regs(&vcpu->arch.guest_fpregs); |
319 | save_access_regs(vcpu->run->s.regs.acrs); | 319 | save_access_regs(vcpu->run->s.regs.acrs); |
320 | restore_fp_regs(&vcpu->arch.host_fpregs); | 320 | restore_fp_regs(&vcpu->arch.host_fpregs); |
321 | restore_access_regs(vcpu->arch.host_acrs); | 321 | restore_access_regs(vcpu->arch.host_acrs); |
322 | } | 322 | } |
323 | 323 | ||
324 | static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | 324 | static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) |
325 | { | 325 | { |
326 | /* this equals initial cpu reset in pop, but we don't switch to ESA */ | 326 | /* this equals initial cpu reset in pop, but we don't switch to ESA */ |
327 | vcpu->arch.sie_block->gpsw.mask = 0UL; | 327 | vcpu->arch.sie_block->gpsw.mask = 0UL; |
328 | vcpu->arch.sie_block->gpsw.addr = 0UL; | 328 | vcpu->arch.sie_block->gpsw.addr = 0UL; |
329 | kvm_s390_set_prefix(vcpu, 0); | 329 | kvm_s390_set_prefix(vcpu, 0); |
330 | vcpu->arch.sie_block->cputm = 0UL; | 330 | vcpu->arch.sie_block->cputm = 0UL; |
331 | vcpu->arch.sie_block->ckc = 0UL; | 331 | vcpu->arch.sie_block->ckc = 0UL; |
332 | vcpu->arch.sie_block->todpr = 0; | 332 | vcpu->arch.sie_block->todpr = 0; |
333 | memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); | 333 | memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); |
334 | vcpu->arch.sie_block->gcr[0] = 0xE0UL; | 334 | vcpu->arch.sie_block->gcr[0] = 0xE0UL; |
335 | vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; | 335 | vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; |
336 | vcpu->arch.guest_fpregs.fpc = 0; | 336 | vcpu->arch.guest_fpregs.fpc = 0; |
337 | asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); | 337 | asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); |
338 | vcpu->arch.sie_block->gbea = 1; | 338 | vcpu->arch.sie_block->gbea = 1; |
339 | } | 339 | } |
340 | 340 | ||
341 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 341 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
342 | { | 342 | { |
343 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | | 343 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
344 | CPUSTAT_SM | | 344 | CPUSTAT_SM | |
345 | CPUSTAT_STOPPED); | 345 | CPUSTAT_STOPPED); |
346 | vcpu->arch.sie_block->ecb = 6; | 346 | vcpu->arch.sie_block->ecb = 6; |
347 | vcpu->arch.sie_block->eca = 0xC1002001U; | 347 | vcpu->arch.sie_block->eca = 0xC1002001U; |
348 | vcpu->arch.sie_block->fac = (int) (long) facilities; | 348 | vcpu->arch.sie_block->fac = (int) (long) facilities; |
349 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 349 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
350 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, | 350 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, |
351 | (unsigned long) vcpu); | 351 | (unsigned long) vcpu); |
352 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 352 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
353 | get_cpu_id(&vcpu->arch.cpu_id); | 353 | get_cpu_id(&vcpu->arch.cpu_id); |
354 | vcpu->arch.cpu_id.version = 0xff; | 354 | vcpu->arch.cpu_id.version = 0xff; |
355 | return 0; | 355 | return 0; |
356 | } | 356 | } |
357 | 357 | ||
358 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 358 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
359 | unsigned int id) | 359 | unsigned int id) |
360 | { | 360 | { |
361 | struct kvm_vcpu *vcpu; | 361 | struct kvm_vcpu *vcpu; |
362 | int rc = -EINVAL; | 362 | int rc = -EINVAL; |
363 | 363 | ||
364 | if (id >= KVM_MAX_VCPUS) | 364 | if (id >= KVM_MAX_VCPUS) |
365 | goto out; | 365 | goto out; |
366 | 366 | ||
367 | rc = -ENOMEM; | 367 | rc = -ENOMEM; |
368 | 368 | ||
369 | vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | 369 | vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); |
370 | if (!vcpu) | 370 | if (!vcpu) |
371 | goto out; | 371 | goto out; |
372 | 372 | ||
373 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) | 373 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) |
374 | get_zeroed_page(GFP_KERNEL); | 374 | get_zeroed_page(GFP_KERNEL); |
375 | 375 | ||
376 | if (!vcpu->arch.sie_block) | 376 | if (!vcpu->arch.sie_block) |
377 | goto out_free_cpu; | 377 | goto out_free_cpu; |
378 | 378 | ||
379 | vcpu->arch.sie_block->icpua = id; | 379 | vcpu->arch.sie_block->icpua = id; |
380 | if (!kvm_is_ucontrol(kvm)) { | 380 | if (!kvm_is_ucontrol(kvm)) { |
381 | if (!kvm->arch.sca) { | 381 | if (!kvm->arch.sca) { |
382 | WARN_ON_ONCE(1); | 382 | WARN_ON_ONCE(1); |
383 | goto out_free_cpu; | 383 | goto out_free_cpu; |
384 | } | 384 | } |
385 | if (!kvm->arch.sca->cpu[id].sda) | 385 | if (!kvm->arch.sca->cpu[id].sda) |
386 | kvm->arch.sca->cpu[id].sda = | 386 | kvm->arch.sca->cpu[id].sda = |
387 | (__u64) vcpu->arch.sie_block; | 387 | (__u64) vcpu->arch.sie_block; |
388 | vcpu->arch.sie_block->scaoh = | 388 | vcpu->arch.sie_block->scaoh = |
389 | (__u32)(((__u64)kvm->arch.sca) >> 32); | 389 | (__u32)(((__u64)kvm->arch.sca) >> 32); |
390 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 390 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
391 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 391 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
392 | } | 392 | } |
393 | 393 | ||
394 | spin_lock_init(&vcpu->arch.local_int.lock); | 394 | spin_lock_init(&vcpu->arch.local_int.lock); |
395 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); | 395 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); |
396 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 396 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
397 | spin_lock(&kvm->arch.float_int.lock); | 397 | spin_lock(&kvm->arch.float_int.lock); |
398 | kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; | 398 | kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; |
399 | init_waitqueue_head(&vcpu->arch.local_int.wq); | 399 | init_waitqueue_head(&vcpu->arch.local_int.wq); |
400 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; | 400 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; |
401 | spin_unlock(&kvm->arch.float_int.lock); | 401 | spin_unlock(&kvm->arch.float_int.lock); |
402 | 402 | ||
403 | rc = kvm_vcpu_init(vcpu, kvm, id); | 403 | rc = kvm_vcpu_init(vcpu, kvm, id); |
404 | if (rc) | 404 | if (rc) |
405 | goto out_free_sie_block; | 405 | goto out_free_sie_block; |
406 | VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, | 406 | VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, |
407 | vcpu->arch.sie_block); | 407 | vcpu->arch.sie_block); |
408 | 408 | ||
409 | return vcpu; | 409 | return vcpu; |
410 | out_free_sie_block: | 410 | out_free_sie_block: |
411 | free_page((unsigned long)(vcpu->arch.sie_block)); | 411 | free_page((unsigned long)(vcpu->arch.sie_block)); |
412 | out_free_cpu: | 412 | out_free_cpu: |
413 | kfree(vcpu); | 413 | kfree(vcpu); |
414 | out: | 414 | out: |
415 | return ERR_PTR(rc); | 415 | return ERR_PTR(rc); |
416 | } | 416 | } |
417 | 417 | ||
418 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 418 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
419 | { | 419 | { |
420 | /* kvm common code refers to this, but never calls it */ | 420 | /* kvm common code refers to this, but never calls it */ |
421 | BUG(); | 421 | BUG(); |
422 | return 0; | 422 | return 0; |
423 | } | 423 | } |
424 | 424 | ||
425 | static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) | 425 | static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) |
426 | { | 426 | { |
427 | kvm_s390_vcpu_initial_reset(vcpu); | 427 | kvm_s390_vcpu_initial_reset(vcpu); |
428 | return 0; | 428 | return 0; |
429 | } | 429 | } |
430 | 430 | ||
431 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 431 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
432 | { | 432 | { |
433 | memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); | 433 | memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); |
434 | return 0; | 434 | return 0; |
435 | } | 435 | } |
436 | 436 | ||
437 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 437 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
438 | { | 438 | { |
439 | memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); | 439 | memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); |
440 | return 0; | 440 | return 0; |
441 | } | 441 | } |
442 | 442 | ||
443 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 443 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
444 | struct kvm_sregs *sregs) | 444 | struct kvm_sregs *sregs) |
445 | { | 445 | { |
446 | memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); | 446 | memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); |
447 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); | 447 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); |
448 | restore_access_regs(vcpu->run->s.regs.acrs); | 448 | restore_access_regs(vcpu->run->s.regs.acrs); |
449 | return 0; | 449 | return 0; |
450 | } | 450 | } |
451 | 451 | ||
452 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 452 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
453 | struct kvm_sregs *sregs) | 453 | struct kvm_sregs *sregs) |
454 | { | 454 | { |
455 | memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); | 455 | memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); |
456 | memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); | 456 | memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); |
457 | return 0; | 457 | return 0; |
458 | } | 458 | } |
459 | 459 | ||
460 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 460 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
461 | { | 461 | { |
462 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); | 462 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); |
463 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; | 463 | vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK; |
464 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 464 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
465 | return 0; | 465 | return 0; |
466 | } | 466 | } |
467 | 467 | ||
468 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 468 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
469 | { | 469 | { |
470 | memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); | 470 | memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); |
471 | fpu->fpc = vcpu->arch.guest_fpregs.fpc; | 471 | fpu->fpc = vcpu->arch.guest_fpregs.fpc; |
472 | return 0; | 472 | return 0; |
473 | } | 473 | } |
474 | 474 | ||
475 | static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | 475 | static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) |
476 | { | 476 | { |
477 | int rc = 0; | 477 | int rc = 0; |
478 | 478 | ||
479 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) | 479 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) |
480 | rc = -EBUSY; | 480 | rc = -EBUSY; |
481 | else { | 481 | else { |
482 | vcpu->run->psw_mask = psw.mask; | 482 | vcpu->run->psw_mask = psw.mask; |
483 | vcpu->run->psw_addr = psw.addr; | 483 | vcpu->run->psw_addr = psw.addr; |
484 | } | 484 | } |
485 | return rc; | 485 | return rc; |
486 | } | 486 | } |
487 | 487 | ||
488 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 488 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
489 | struct kvm_translation *tr) | 489 | struct kvm_translation *tr) |
490 | { | 490 | { |
491 | return -EINVAL; /* not implemented yet */ | 491 | return -EINVAL; /* not implemented yet */ |
492 | } | 492 | } |
493 | 493 | ||
494 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 494 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
495 | struct kvm_guest_debug *dbg) | 495 | struct kvm_guest_debug *dbg) |
496 | { | 496 | { |
497 | return -EINVAL; /* not implemented yet */ | 497 | return -EINVAL; /* not implemented yet */ |
498 | } | 498 | } |
499 | 499 | ||
500 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 500 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
501 | struct kvm_mp_state *mp_state) | 501 | struct kvm_mp_state *mp_state) |
502 | { | 502 | { |
503 | return -EINVAL; /* not implemented yet */ | 503 | return -EINVAL; /* not implemented yet */ |
504 | } | 504 | } |
505 | 505 | ||
506 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 506 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
507 | struct kvm_mp_state *mp_state) | 507 | struct kvm_mp_state *mp_state) |
508 | { | 508 | { |
509 | return -EINVAL; /* not implemented yet */ | 509 | return -EINVAL; /* not implemented yet */ |
510 | } | 510 | } |
511 | 511 | ||
512 | static int __vcpu_run(struct kvm_vcpu *vcpu) | 512 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
513 | { | 513 | { |
514 | int rc; | 514 | int rc; |
515 | 515 | ||
516 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); | 516 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); |
517 | 517 | ||
518 | if (need_resched()) | 518 | if (need_resched()) |
519 | schedule(); | 519 | schedule(); |
520 | 520 | ||
521 | if (test_thread_flag(TIF_MCCK_PENDING)) | 521 | if (test_thread_flag(TIF_MCCK_PENDING)) |
522 | s390_handle_mcck(); | 522 | s390_handle_mcck(); |
523 | 523 | ||
524 | if (!kvm_is_ucontrol(vcpu->kvm)) | 524 | if (!kvm_is_ucontrol(vcpu->kvm)) |
525 | kvm_s390_deliver_pending_interrupts(vcpu); | 525 | kvm_s390_deliver_pending_interrupts(vcpu); |
526 | 526 | ||
527 | vcpu->arch.sie_block->icptcode = 0; | 527 | vcpu->arch.sie_block->icptcode = 0; |
528 | local_irq_disable(); | 528 | local_irq_disable(); |
529 | kvm_guest_enter(); | 529 | kvm_guest_enter(); |
530 | local_irq_enable(); | 530 | local_irq_enable(); |
531 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | 531 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", |
532 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 532 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
533 | rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); | 533 | rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); |
534 | if (rc) { | 534 | if (rc) { |
535 | if (kvm_is_ucontrol(vcpu->kvm)) { | 535 | if (kvm_is_ucontrol(vcpu->kvm)) { |
536 | rc = SIE_INTERCEPT_UCONTROL; | 536 | rc = SIE_INTERCEPT_UCONTROL; |
537 | } else { | 537 | } else { |
538 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | 538 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); |
539 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 539 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
540 | rc = 0; | 540 | rc = 0; |
541 | } | 541 | } |
542 | } | 542 | } |
543 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | 543 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", |
544 | vcpu->arch.sie_block->icptcode); | 544 | vcpu->arch.sie_block->icptcode); |
545 | local_irq_disable(); | 545 | local_irq_disable(); |
546 | kvm_guest_exit(); | 546 | kvm_guest_exit(); |
547 | local_irq_enable(); | 547 | local_irq_enable(); |
548 | 548 | ||
549 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); | 549 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); |
550 | return rc; | 550 | return rc; |
551 | } | 551 | } |
552 | 552 | ||
553 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 553 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
554 | { | 554 | { |
555 | int rc; | 555 | int rc; |
556 | sigset_t sigsaved; | 556 | sigset_t sigsaved; |
557 | 557 | ||
558 | rerun_vcpu: | 558 | rerun_vcpu: |
559 | if (vcpu->sigset_active) | 559 | if (vcpu->sigset_active) |
560 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 560 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
561 | 561 | ||
562 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 562 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
563 | 563 | ||
564 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); | 564 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); |
565 | 565 | ||
566 | switch (kvm_run->exit_reason) { | 566 | switch (kvm_run->exit_reason) { |
567 | case KVM_EXIT_S390_SIEIC: | 567 | case KVM_EXIT_S390_SIEIC: |
568 | case KVM_EXIT_UNKNOWN: | 568 | case KVM_EXIT_UNKNOWN: |
569 | case KVM_EXIT_INTR: | 569 | case KVM_EXIT_INTR: |
570 | case KVM_EXIT_S390_RESET: | 570 | case KVM_EXIT_S390_RESET: |
571 | case KVM_EXIT_S390_UCONTROL: | 571 | case KVM_EXIT_S390_UCONTROL: |
572 | break; | 572 | break; |
573 | default: | 573 | default: |
574 | BUG(); | 574 | BUG(); |
575 | } | 575 | } |
576 | 576 | ||
577 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; | 577 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; |
578 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; | 578 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; |
579 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { | 579 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { |
580 | kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; | 580 | kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; |
581 | kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); | 581 | kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); |
582 | } | 582 | } |
583 | 583 | ||
584 | might_fault(); | 584 | might_fault(); |
585 | 585 | ||
586 | do { | 586 | do { |
587 | rc = __vcpu_run(vcpu); | 587 | rc = __vcpu_run(vcpu); |
588 | if (rc) | 588 | if (rc) |
589 | break; | 589 | break; |
590 | if (kvm_is_ucontrol(vcpu->kvm)) | 590 | if (kvm_is_ucontrol(vcpu->kvm)) |
591 | rc = -EOPNOTSUPP; | 591 | rc = -EOPNOTSUPP; |
592 | else | 592 | else |
593 | rc = kvm_handle_sie_intercept(vcpu); | 593 | rc = kvm_handle_sie_intercept(vcpu); |
594 | } while (!signal_pending(current) && !rc); | 594 | } while (!signal_pending(current) && !rc); |
595 | 595 | ||
596 | if (rc == SIE_INTERCEPT_RERUNVCPU) | 596 | if (rc == SIE_INTERCEPT_RERUNVCPU) |
597 | goto rerun_vcpu; | 597 | goto rerun_vcpu; |
598 | 598 | ||
599 | if (signal_pending(current) && !rc) { | 599 | if (signal_pending(current) && !rc) { |
600 | kvm_run->exit_reason = KVM_EXIT_INTR; | 600 | kvm_run->exit_reason = KVM_EXIT_INTR; |
601 | rc = -EINTR; | 601 | rc = -EINTR; |
602 | } | 602 | } |
603 | 603 | ||
604 | #ifdef CONFIG_KVM_S390_UCONTROL | 604 | #ifdef CONFIG_KVM_S390_UCONTROL |
605 | if (rc == SIE_INTERCEPT_UCONTROL) { | 605 | if (rc == SIE_INTERCEPT_UCONTROL) { |
606 | kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; | 606 | kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; |
607 | kvm_run->s390_ucontrol.trans_exc_code = | 607 | kvm_run->s390_ucontrol.trans_exc_code = |
608 | current->thread.gmap_addr; | 608 | current->thread.gmap_addr; |
609 | kvm_run->s390_ucontrol.pgm_code = 0x10; | 609 | kvm_run->s390_ucontrol.pgm_code = 0x10; |
610 | rc = 0; | 610 | rc = 0; |
611 | } | 611 | } |
612 | #endif | 612 | #endif |
613 | 613 | ||
614 | if (rc == -EOPNOTSUPP) { | 614 | if (rc == -EOPNOTSUPP) { |
615 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | 615 | /* intercept cannot be handled in-kernel, prepare kvm-run */ |
616 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | 616 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; |
617 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; | 617 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; |
618 | kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; | 618 | kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; |
619 | kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; | 619 | kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; |
620 | rc = 0; | 620 | rc = 0; |
621 | } | 621 | } |
622 | 622 | ||
623 | if (rc == -EREMOTE) { | 623 | if (rc == -EREMOTE) { |
624 | /* intercept was handled, but userspace support is needed | 624 | /* intercept was handled, but userspace support is needed |
625 | * kvm_run has been prepared by the handler */ | 625 | * kvm_run has been prepared by the handler */ |
626 | rc = 0; | 626 | rc = 0; |
627 | } | 627 | } |
628 | 628 | ||
629 | kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; | 629 | kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; |
630 | kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; | 630 | kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; |
631 | kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; | 631 | kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; |
632 | 632 | ||
633 | if (vcpu->sigset_active) | 633 | if (vcpu->sigset_active) |
634 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 634 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
635 | 635 | ||
636 | vcpu->stat.exit_userspace++; | 636 | vcpu->stat.exit_userspace++; |
637 | return rc; | 637 | return rc; |
638 | } | 638 | } |
639 | 639 | ||
640 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, | 640 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, |
641 | unsigned long n, int prefix) | 641 | unsigned long n, int prefix) |
642 | { | 642 | { |
643 | if (prefix) | 643 | if (prefix) |
644 | return copy_to_guest(vcpu, guestdest, from, n); | 644 | return copy_to_guest(vcpu, guestdest, from, n); |
645 | else | 645 | else |
646 | return copy_to_guest_absolute(vcpu, guestdest, from, n); | 646 | return copy_to_guest_absolute(vcpu, guestdest, from, n); |
647 | } | 647 | } |
648 | 648 | ||
649 | /* | 649 | /* |
650 | * store status at address | 650 | * store status at address |
651 | * we use have two special cases: | 651 | * we use have two special cases: |
652 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit | 652 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit |
653 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix | 653 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix |
654 | */ | 654 | */ |
655 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 655 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
656 | { | 656 | { |
657 | unsigned char archmode = 1; | 657 | unsigned char archmode = 1; |
658 | int prefix; | 658 | int prefix; |
659 | 659 | ||
660 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | 660 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { |
661 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) | 661 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) |
662 | return -EFAULT; | 662 | return -EFAULT; |
663 | addr = SAVE_AREA_BASE; | 663 | addr = SAVE_AREA_BASE; |
664 | prefix = 0; | 664 | prefix = 0; |
665 | } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { | 665 | } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { |
666 | if (copy_to_guest(vcpu, 163ul, &archmode, 1)) | 666 | if (copy_to_guest(vcpu, 163ul, &archmode, 1)) |
667 | return -EFAULT; | 667 | return -EFAULT; |
668 | addr = SAVE_AREA_BASE; | 668 | addr = SAVE_AREA_BASE; |
669 | prefix = 1; | 669 | prefix = 1; |
670 | } else | 670 | } else |
671 | prefix = 0; | 671 | prefix = 0; |
672 | 672 | ||
673 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), | 673 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), |
674 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) | 674 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) |
675 | return -EFAULT; | 675 | return -EFAULT; |
676 | 676 | ||
677 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), | 677 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), |
678 | vcpu->run->s.regs.gprs, 128, prefix)) | 678 | vcpu->run->s.regs.gprs, 128, prefix)) |
679 | return -EFAULT; | 679 | return -EFAULT; |
680 | 680 | ||
681 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), | 681 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), |
682 | &vcpu->arch.sie_block->gpsw, 16, prefix)) | 682 | &vcpu->arch.sie_block->gpsw, 16, prefix)) |
683 | return -EFAULT; | 683 | return -EFAULT; |
684 | 684 | ||
685 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), | 685 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), |
686 | &vcpu->arch.sie_block->prefix, 4, prefix)) | 686 | &vcpu->arch.sie_block->prefix, 4, prefix)) |
687 | return -EFAULT; | 687 | return -EFAULT; |
688 | 688 | ||
689 | if (__guestcopy(vcpu, | 689 | if (__guestcopy(vcpu, |
690 | addr + offsetof(struct save_area, fp_ctrl_reg), | 690 | addr + offsetof(struct save_area, fp_ctrl_reg), |
691 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) | 691 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) |
692 | return -EFAULT; | 692 | return -EFAULT; |
693 | 693 | ||
694 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), | 694 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), |
695 | &vcpu->arch.sie_block->todpr, 4, prefix)) | 695 | &vcpu->arch.sie_block->todpr, 4, prefix)) |
696 | return -EFAULT; | 696 | return -EFAULT; |
697 | 697 | ||
698 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), | 698 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), |
699 | &vcpu->arch.sie_block->cputm, 8, prefix)) | 699 | &vcpu->arch.sie_block->cputm, 8, prefix)) |
700 | return -EFAULT; | 700 | return -EFAULT; |
701 | 701 | ||
702 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), | 702 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), |
703 | &vcpu->arch.sie_block->ckc, 8, prefix)) | 703 | &vcpu->arch.sie_block->ckc, 8, prefix)) |
704 | return -EFAULT; | 704 | return -EFAULT; |
705 | 705 | ||
706 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), | 706 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), |
707 | &vcpu->run->s.regs.acrs, 64, prefix)) | 707 | &vcpu->run->s.regs.acrs, 64, prefix)) |
708 | return -EFAULT; | 708 | return -EFAULT; |
709 | 709 | ||
710 | if (__guestcopy(vcpu, | 710 | if (__guestcopy(vcpu, |
711 | addr + offsetof(struct save_area, ctrl_regs), | 711 | addr + offsetof(struct save_area, ctrl_regs), |
712 | &vcpu->arch.sie_block->gcr, 128, prefix)) | 712 | &vcpu->arch.sie_block->gcr, 128, prefix)) |
713 | return -EFAULT; | 713 | return -EFAULT; |
714 | return 0; | 714 | return 0; |
715 | } | 715 | } |
716 | 716 | ||
717 | long kvm_arch_vcpu_ioctl(struct file *filp, | 717 | long kvm_arch_vcpu_ioctl(struct file *filp, |
718 | unsigned int ioctl, unsigned long arg) | 718 | unsigned int ioctl, unsigned long arg) |
719 | { | 719 | { |
720 | struct kvm_vcpu *vcpu = filp->private_data; | 720 | struct kvm_vcpu *vcpu = filp->private_data; |
721 | void __user *argp = (void __user *)arg; | 721 | void __user *argp = (void __user *)arg; |
722 | long r; | 722 | long r; |
723 | 723 | ||
724 | switch (ioctl) { | 724 | switch (ioctl) { |
725 | case KVM_S390_INTERRUPT: { | 725 | case KVM_S390_INTERRUPT: { |
726 | struct kvm_s390_interrupt s390int; | 726 | struct kvm_s390_interrupt s390int; |
727 | 727 | ||
728 | r = -EFAULT; | 728 | r = -EFAULT; |
729 | if (copy_from_user(&s390int, argp, sizeof(s390int))) | 729 | if (copy_from_user(&s390int, argp, sizeof(s390int))) |
730 | break; | 730 | break; |
731 | r = kvm_s390_inject_vcpu(vcpu, &s390int); | 731 | r = kvm_s390_inject_vcpu(vcpu, &s390int); |
732 | break; | 732 | break; |
733 | } | 733 | } |
734 | case KVM_S390_STORE_STATUS: | 734 | case KVM_S390_STORE_STATUS: |
735 | r = kvm_s390_vcpu_store_status(vcpu, arg); | 735 | r = kvm_s390_vcpu_store_status(vcpu, arg); |
736 | break; | 736 | break; |
737 | case KVM_S390_SET_INITIAL_PSW: { | 737 | case KVM_S390_SET_INITIAL_PSW: { |
738 | psw_t psw; | 738 | psw_t psw; |
739 | 739 | ||
740 | r = -EFAULT; | 740 | r = -EFAULT; |
741 | if (copy_from_user(&psw, argp, sizeof(psw))) | 741 | if (copy_from_user(&psw, argp, sizeof(psw))) |
742 | break; | 742 | break; |
743 | r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); | 743 | r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); |
744 | break; | 744 | break; |
745 | } | 745 | } |
746 | case KVM_S390_INITIAL_RESET: | 746 | case KVM_S390_INITIAL_RESET: |
747 | r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); | 747 | r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
748 | break; | 748 | break; |
749 | #ifdef CONFIG_KVM_S390_UCONTROL | 749 | #ifdef CONFIG_KVM_S390_UCONTROL |
750 | case KVM_S390_UCAS_MAP: { | 750 | case KVM_S390_UCAS_MAP: { |
751 | struct kvm_s390_ucas_mapping ucasmap; | 751 | struct kvm_s390_ucas_mapping ucasmap; |
752 | 752 | ||
753 | if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { | 753 | if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { |
754 | r = -EFAULT; | 754 | r = -EFAULT; |
755 | break; | 755 | break; |
756 | } | 756 | } |
757 | 757 | ||
758 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 758 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
759 | r = -EINVAL; | 759 | r = -EINVAL; |
760 | break; | 760 | break; |
761 | } | 761 | } |
762 | 762 | ||
763 | r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, | 763 | r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, |
764 | ucasmap.vcpu_addr, ucasmap.length); | 764 | ucasmap.vcpu_addr, ucasmap.length); |
765 | break; | 765 | break; |
766 | } | 766 | } |
767 | case KVM_S390_UCAS_UNMAP: { | 767 | case KVM_S390_UCAS_UNMAP: { |
768 | struct kvm_s390_ucas_mapping ucasmap; | 768 | struct kvm_s390_ucas_mapping ucasmap; |
769 | 769 | ||
770 | if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { | 770 | if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { |
771 | r = -EFAULT; | 771 | r = -EFAULT; |
772 | break; | 772 | break; |
773 | } | 773 | } |
774 | 774 | ||
775 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 775 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
776 | r = -EINVAL; | 776 | r = -EINVAL; |
777 | break; | 777 | break; |
778 | } | 778 | } |
779 | 779 | ||
780 | r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, | 780 | r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, |
781 | ucasmap.length); | 781 | ucasmap.length); |
782 | break; | 782 | break; |
783 | } | 783 | } |
784 | #endif | 784 | #endif |
785 | case KVM_S390_VCPU_FAULT: { | 785 | case KVM_S390_VCPU_FAULT: { |
786 | r = gmap_fault(arg, vcpu->arch.gmap); | 786 | r = gmap_fault(arg, vcpu->arch.gmap); |
787 | if (!IS_ERR_VALUE(r)) | 787 | if (!IS_ERR_VALUE(r)) |
788 | r = 0; | 788 | r = 0; |
789 | break; | 789 | break; |
790 | } | 790 | } |
791 | default: | 791 | default: |
792 | r = -ENOTTY; | 792 | r = -ENOTTY; |
793 | } | 793 | } |
794 | return r; | 794 | return r; |
795 | } | 795 | } |
796 | 796 | ||
797 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | 797 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
798 | { | 798 | { |
799 | #ifdef CONFIG_KVM_S390_UCONTROL | 799 | #ifdef CONFIG_KVM_S390_UCONTROL |
800 | if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) | 800 | if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) |
801 | && (kvm_is_ucontrol(vcpu->kvm))) { | 801 | && (kvm_is_ucontrol(vcpu->kvm))) { |
802 | vmf->page = virt_to_page(vcpu->arch.sie_block); | 802 | vmf->page = virt_to_page(vcpu->arch.sie_block); |
803 | get_page(vmf->page); | 803 | get_page(vmf->page); |
804 | return 0; | 804 | return 0; |
805 | } | 805 | } |
806 | #endif | 806 | #endif |
807 | return VM_FAULT_SIGBUS; | 807 | return VM_FAULT_SIGBUS; |
808 | } | 808 | } |
809 | 809 | ||
810 | /* Section: memory related */ | 810 | /* Section: memory related */ |
811 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 811 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
812 | struct kvm_memory_slot *memslot, | 812 | struct kvm_memory_slot *memslot, |
813 | struct kvm_memory_slot old, | 813 | struct kvm_memory_slot old, |
814 | struct kvm_userspace_memory_region *mem, | 814 | struct kvm_userspace_memory_region *mem, |
815 | int user_alloc) | 815 | int user_alloc) |
816 | { | 816 | { |
817 | /* A few sanity checks. We can have exactly one memory slot which has | 817 | /* A few sanity checks. We can have exactly one memory slot which has |
818 | to start at guest virtual zero and which has to be located at a | 818 | to start at guest virtual zero and which has to be located at a |
819 | page boundary in userland and which has to end at a page boundary. | 819 | page boundary in userland and which has to end at a page boundary. |
820 | The memory in userland is ok to be fragmented into various different | 820 | The memory in userland is ok to be fragmented into various different |
821 | vmas. It is okay to mmap() and munmap() stuff in this slot after | 821 | vmas. It is okay to mmap() and munmap() stuff in this slot after |
822 | doing this call at any time */ | 822 | doing this call at any time */ |
823 | 823 | ||
824 | if (mem->slot) | 824 | if (mem->slot) |
825 | return -EINVAL; | 825 | return -EINVAL; |
826 | 826 | ||
827 | if (mem->guest_phys_addr) | 827 | if (mem->guest_phys_addr) |
828 | return -EINVAL; | 828 | return -EINVAL; |
829 | 829 | ||
830 | if (mem->userspace_addr & 0xffffful) | 830 | if (mem->userspace_addr & 0xffffful) |
831 | return -EINVAL; | 831 | return -EINVAL; |
832 | 832 | ||
833 | if (mem->memory_size & 0xffffful) | 833 | if (mem->memory_size & 0xffffful) |
834 | return -EINVAL; | 834 | return -EINVAL; |
835 | 835 | ||
836 | if (!user_alloc) | 836 | if (!user_alloc) |
837 | return -EINVAL; | 837 | return -EINVAL; |
838 | 838 | ||
839 | return 0; | 839 | return 0; |
840 | } | 840 | } |
841 | 841 | ||
842 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 842 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
843 | struct kvm_userspace_memory_region *mem, | 843 | struct kvm_userspace_memory_region *mem, |
844 | struct kvm_memory_slot old, | 844 | struct kvm_memory_slot old, |
845 | int user_alloc) | 845 | int user_alloc) |
846 | { | 846 | { |
847 | int rc; | 847 | int rc; |
848 | 848 | ||
849 | 849 | ||
850 | rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, | 850 | rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, |
851 | mem->guest_phys_addr, mem->memory_size); | 851 | mem->guest_phys_addr, mem->memory_size); |
852 | if (rc) | 852 | if (rc) |
853 | printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); | 853 | printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); |
854 | return; | 854 | return; |
855 | } | 855 | } |
856 | 856 | ||
857 | void kvm_arch_flush_shadow(struct kvm *kvm) | 857 | void kvm_arch_flush_shadow(struct kvm *kvm) |
858 | { | 858 | { |
859 | } | 859 | } |
860 | 860 | ||
861 | static int __init kvm_s390_init(void) | 861 | static int __init kvm_s390_init(void) |
862 | { | 862 | { |
863 | int ret; | 863 | int ret; |
864 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 864 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
865 | if (ret) | 865 | if (ret) |
866 | return ret; | 866 | return ret; |
867 | 867 | ||
868 | /* | 868 | /* |
869 | * guests can ask for up to 255+1 double words, we need a full page | 869 | * guests can ask for up to 255+1 double words, we need a full page |
870 | * to hold the maximum amount of facilities. On the other hand, we | 870 | * to hold the maximum amount of facilities. On the other hand, we |
871 | * only set facilities that are known to work in KVM. | 871 | * only set facilities that are known to work in KVM. |
872 | */ | 872 | */ |
873 | facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); | 873 | facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); |
874 | if (!facilities) { | 874 | if (!facilities) { |
875 | kvm_exit(); | 875 | kvm_exit(); |
876 | return -ENOMEM; | 876 | return -ENOMEM; |
877 | } | 877 | } |
878 | memcpy(facilities, S390_lowcore.stfle_fac_list, 16); | 878 | memcpy(facilities, S390_lowcore.stfle_fac_list, 16); |
879 | facilities[0] &= 0xff00fff3f47c0000ULL; | 879 | facilities[0] &= 0xff00fff3f47c0000ULL; |
880 | facilities[1] &= 0x201c000000000000ULL; | 880 | facilities[1] &= 0x201c000000000000ULL; |
881 | return 0; | 881 | return 0; |
882 | } | 882 | } |
883 | 883 | ||
884 | static void __exit kvm_s390_exit(void) | 884 | static void __exit kvm_s390_exit(void) |
885 | { | 885 | { |
886 | free_page((unsigned long) facilities); | 886 | free_page((unsigned long) facilities); |
887 | kvm_exit(); | 887 | kvm_exit(); |
888 | } | 888 | } |
889 | 889 | ||
890 | module_init(kvm_s390_init); | 890 | module_init(kvm_s390_init); |
891 | module_exit(kvm_s390_exit); | 891 | module_exit(kvm_s390_exit); |
892 | 892 |