Commit f5c9803173848864d0c56108b9e102db0bf601de

Authored by Takuya Yoshikawa
Committed by Avi Kivity
1 parent 197717d581

KVM: update gfn_to_hva() to use gfn_to_hva_memslot()

Marcelo introduced gfn_to_hva_memslot() when he implemented
gfn_to_pfn_memslot(). Let's use this for gfn_to_hva() too.

Note: also remove parentheses next to return as checkpatch said to do.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 1 changed file with 6 additions and 6 deletions Inline Diff

1 /* 1 /*
2 * Kernel-based Virtual Machine driver for Linux 2 * Kernel-based Virtual Machine driver for Linux
3 * 3 *
4 * This module enables machines with Intel VT-x extensions to run virtual 4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation. 5 * machines without emulation or binary translation.
6 * 6 *
7 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2006 Qumranet, Inc.
8 * 8 *
9 * Authors: 9 * Authors:
10 * Avi Kivity <avi@qumranet.com> 10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com> 11 * Yaniv Kamay <yaniv@qumranet.com>
12 * 12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See 13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory. 14 * the COPYING file in the top-level directory.
15 * 15 *
16 */ 16 */
17 17
18 #include "iodev.h" 18 #include "iodev.h"
19 19
20 #include <linux/kvm_host.h> 20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h> 21 #include <linux/kvm.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/errno.h> 23 #include <linux/errno.h>
24 #include <linux/percpu.h> 24 #include <linux/percpu.h>
25 #include <linux/mm.h> 25 #include <linux/mm.h>
26 #include <linux/miscdevice.h> 26 #include <linux/miscdevice.h>
27 #include <linux/vmalloc.h> 27 #include <linux/vmalloc.h>
28 #include <linux/reboot.h> 28 #include <linux/reboot.h>
29 #include <linux/debugfs.h> 29 #include <linux/debugfs.h>
30 #include <linux/highmem.h> 30 #include <linux/highmem.h>
31 #include <linux/file.h> 31 #include <linux/file.h>
32 #include <linux/sysdev.h> 32 #include <linux/sysdev.h>
33 #include <linux/cpu.h> 33 #include <linux/cpu.h>
34 #include <linux/sched.h> 34 #include <linux/sched.h>
35 #include <linux/cpumask.h> 35 #include <linux/cpumask.h>
36 #include <linux/smp.h> 36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h> 37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h> 38 #include <linux/profile.h>
39 #include <linux/kvm_para.h> 39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h> 40 #include <linux/pagemap.h>
41 #include <linux/mman.h> 41 #include <linux/mman.h>
42 #include <linux/swap.h> 42 #include <linux/swap.h>
43 #include <linux/bitops.h> 43 #include <linux/bitops.h>
44 #include <linux/spinlock.h> 44 #include <linux/spinlock.h>
45 #include <linux/compat.h> 45 #include <linux/compat.h>
46 #include <linux/srcu.h> 46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h> 47 #include <linux/hugetlb.h>
48 #include <linux/slab.h> 48 #include <linux/slab.h>
49 49
50 #include <asm/processor.h> 50 #include <asm/processor.h>
51 #include <asm/io.h> 51 #include <asm/io.h>
52 #include <asm/uaccess.h> 52 #include <asm/uaccess.h>
53 #include <asm/pgtable.h> 53 #include <asm/pgtable.h>
54 #include <asm-generic/bitops/le.h> 54 #include <asm-generic/bitops/le.h>
55 55
56 #include "coalesced_mmio.h" 56 #include "coalesced_mmio.h"
57 57
58 #define CREATE_TRACE_POINTS 58 #define CREATE_TRACE_POINTS
59 #include <trace/events/kvm.h> 59 #include <trace/events/kvm.h>
60 60
61 MODULE_AUTHOR("Qumranet"); 61 MODULE_AUTHOR("Qumranet");
62 MODULE_LICENSE("GPL"); 62 MODULE_LICENSE("GPL");
63 63
64 /* 64 /*
65 * Ordering of locks: 65 * Ordering of locks:
66 * 66 *
67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
68 */ 68 */
69 69
70 DEFINE_SPINLOCK(kvm_lock); 70 DEFINE_SPINLOCK(kvm_lock);
71 LIST_HEAD(vm_list); 71 LIST_HEAD(vm_list);
72 72
73 static cpumask_var_t cpus_hardware_enabled; 73 static cpumask_var_t cpus_hardware_enabled;
74 static int kvm_usage_count = 0; 74 static int kvm_usage_count = 0;
75 static atomic_t hardware_enable_failed; 75 static atomic_t hardware_enable_failed;
76 76
77 struct kmem_cache *kvm_vcpu_cache; 77 struct kmem_cache *kvm_vcpu_cache;
78 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 78 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
79 79
80 static __read_mostly struct preempt_ops kvm_preempt_ops; 80 static __read_mostly struct preempt_ops kvm_preempt_ops;
81 81
82 struct dentry *kvm_debugfs_dir; 82 struct dentry *kvm_debugfs_dir;
83 83
84 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 84 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
85 unsigned long arg); 85 unsigned long arg);
86 static int hardware_enable_all(void); 86 static int hardware_enable_all(void);
87 static void hardware_disable_all(void); 87 static void hardware_disable_all(void);
88 88
89 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 89 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
90 90
91 static bool kvm_rebooting; 91 static bool kvm_rebooting;
92 92
93 static bool largepages_enabled = true; 93 static bool largepages_enabled = true;
94 94
95 inline int kvm_is_mmio_pfn(pfn_t pfn) 95 inline int kvm_is_mmio_pfn(pfn_t pfn)
96 { 96 {
97 if (pfn_valid(pfn)) { 97 if (pfn_valid(pfn)) {
98 struct page *page = compound_head(pfn_to_page(pfn)); 98 struct page *page = compound_head(pfn_to_page(pfn));
99 return PageReserved(page); 99 return PageReserved(page);
100 } 100 }
101 101
102 return true; 102 return true;
103 } 103 }
104 104
105 /* 105 /*
106 * Switches to specified vcpu, until a matching vcpu_put() 106 * Switches to specified vcpu, until a matching vcpu_put()
107 */ 107 */
108 void vcpu_load(struct kvm_vcpu *vcpu) 108 void vcpu_load(struct kvm_vcpu *vcpu)
109 { 109 {
110 int cpu; 110 int cpu;
111 111
112 mutex_lock(&vcpu->mutex); 112 mutex_lock(&vcpu->mutex);
113 cpu = get_cpu(); 113 cpu = get_cpu();
114 preempt_notifier_register(&vcpu->preempt_notifier); 114 preempt_notifier_register(&vcpu->preempt_notifier);
115 kvm_arch_vcpu_load(vcpu, cpu); 115 kvm_arch_vcpu_load(vcpu, cpu);
116 put_cpu(); 116 put_cpu();
117 } 117 }
118 118
119 void vcpu_put(struct kvm_vcpu *vcpu) 119 void vcpu_put(struct kvm_vcpu *vcpu)
120 { 120 {
121 preempt_disable(); 121 preempt_disable();
122 kvm_arch_vcpu_put(vcpu); 122 kvm_arch_vcpu_put(vcpu);
123 preempt_notifier_unregister(&vcpu->preempt_notifier); 123 preempt_notifier_unregister(&vcpu->preempt_notifier);
124 preempt_enable(); 124 preempt_enable();
125 mutex_unlock(&vcpu->mutex); 125 mutex_unlock(&vcpu->mutex);
126 } 126 }
127 127
128 static void ack_flush(void *_completed) 128 static void ack_flush(void *_completed)
129 { 129 {
130 } 130 }
131 131
132 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 132 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
133 { 133 {
134 int i, cpu, me; 134 int i, cpu, me;
135 cpumask_var_t cpus; 135 cpumask_var_t cpus;
136 bool called = true; 136 bool called = true;
137 struct kvm_vcpu *vcpu; 137 struct kvm_vcpu *vcpu;
138 138
139 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 139 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
140 140
141 raw_spin_lock(&kvm->requests_lock); 141 raw_spin_lock(&kvm->requests_lock);
142 me = smp_processor_id(); 142 me = smp_processor_id();
143 kvm_for_each_vcpu(i, vcpu, kvm) { 143 kvm_for_each_vcpu(i, vcpu, kvm) {
144 if (test_and_set_bit(req, &vcpu->requests)) 144 if (test_and_set_bit(req, &vcpu->requests))
145 continue; 145 continue;
146 cpu = vcpu->cpu; 146 cpu = vcpu->cpu;
147 if (cpus != NULL && cpu != -1 && cpu != me) 147 if (cpus != NULL && cpu != -1 && cpu != me)
148 cpumask_set_cpu(cpu, cpus); 148 cpumask_set_cpu(cpu, cpus);
149 } 149 }
150 if (unlikely(cpus == NULL)) 150 if (unlikely(cpus == NULL))
151 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 151 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
152 else if (!cpumask_empty(cpus)) 152 else if (!cpumask_empty(cpus))
153 smp_call_function_many(cpus, ack_flush, NULL, 1); 153 smp_call_function_many(cpus, ack_flush, NULL, 1);
154 else 154 else
155 called = false; 155 called = false;
156 raw_spin_unlock(&kvm->requests_lock); 156 raw_spin_unlock(&kvm->requests_lock);
157 free_cpumask_var(cpus); 157 free_cpumask_var(cpus);
158 return called; 158 return called;
159 } 159 }
160 160
161 void kvm_flush_remote_tlbs(struct kvm *kvm) 161 void kvm_flush_remote_tlbs(struct kvm *kvm)
162 { 162 {
163 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 163 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
164 ++kvm->stat.remote_tlb_flush; 164 ++kvm->stat.remote_tlb_flush;
165 } 165 }
166 166
167 void kvm_reload_remote_mmus(struct kvm *kvm) 167 void kvm_reload_remote_mmus(struct kvm *kvm)
168 { 168 {
169 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 169 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
170 } 170 }
171 171
172 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 172 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
173 { 173 {
174 struct page *page; 174 struct page *page;
175 int r; 175 int r;
176 176
177 mutex_init(&vcpu->mutex); 177 mutex_init(&vcpu->mutex);
178 vcpu->cpu = -1; 178 vcpu->cpu = -1;
179 vcpu->kvm = kvm; 179 vcpu->kvm = kvm;
180 vcpu->vcpu_id = id; 180 vcpu->vcpu_id = id;
181 init_waitqueue_head(&vcpu->wq); 181 init_waitqueue_head(&vcpu->wq);
182 182
183 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 183 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
184 if (!page) { 184 if (!page) {
185 r = -ENOMEM; 185 r = -ENOMEM;
186 goto fail; 186 goto fail;
187 } 187 }
188 vcpu->run = page_address(page); 188 vcpu->run = page_address(page);
189 189
190 r = kvm_arch_vcpu_init(vcpu); 190 r = kvm_arch_vcpu_init(vcpu);
191 if (r < 0) 191 if (r < 0)
192 goto fail_free_run; 192 goto fail_free_run;
193 return 0; 193 return 0;
194 194
195 fail_free_run: 195 fail_free_run:
196 free_page((unsigned long)vcpu->run); 196 free_page((unsigned long)vcpu->run);
197 fail: 197 fail:
198 return r; 198 return r;
199 } 199 }
200 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 200 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
201 201
202 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 202 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
203 { 203 {
204 kvm_arch_vcpu_uninit(vcpu); 204 kvm_arch_vcpu_uninit(vcpu);
205 free_page((unsigned long)vcpu->run); 205 free_page((unsigned long)vcpu->run);
206 } 206 }
207 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 207 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
208 208
209 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 209 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
210 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 210 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
211 { 211 {
212 return container_of(mn, struct kvm, mmu_notifier); 212 return container_of(mn, struct kvm, mmu_notifier);
213 } 213 }
214 214
215 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 215 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
216 struct mm_struct *mm, 216 struct mm_struct *mm,
217 unsigned long address) 217 unsigned long address)
218 { 218 {
219 struct kvm *kvm = mmu_notifier_to_kvm(mn); 219 struct kvm *kvm = mmu_notifier_to_kvm(mn);
220 int need_tlb_flush, idx; 220 int need_tlb_flush, idx;
221 221
222 /* 222 /*
223 * When ->invalidate_page runs, the linux pte has been zapped 223 * When ->invalidate_page runs, the linux pte has been zapped
224 * already but the page is still allocated until 224 * already but the page is still allocated until
225 * ->invalidate_page returns. So if we increase the sequence 225 * ->invalidate_page returns. So if we increase the sequence
226 * here the kvm page fault will notice if the spte can't be 226 * here the kvm page fault will notice if the spte can't be
227 * established because the page is going to be freed. If 227 * established because the page is going to be freed. If
228 * instead the kvm page fault establishes the spte before 228 * instead the kvm page fault establishes the spte before
229 * ->invalidate_page runs, kvm_unmap_hva will release it 229 * ->invalidate_page runs, kvm_unmap_hva will release it
230 * before returning. 230 * before returning.
231 * 231 *
232 * The sequence increase only need to be seen at spin_unlock 232 * The sequence increase only need to be seen at spin_unlock
233 * time, and not at spin_lock time. 233 * time, and not at spin_lock time.
234 * 234 *
235 * Increasing the sequence after the spin_unlock would be 235 * Increasing the sequence after the spin_unlock would be
236 * unsafe because the kvm page fault could then establish the 236 * unsafe because the kvm page fault could then establish the
237 * pte after kvm_unmap_hva returned, without noticing the page 237 * pte after kvm_unmap_hva returned, without noticing the page
238 * is going to be freed. 238 * is going to be freed.
239 */ 239 */
240 idx = srcu_read_lock(&kvm->srcu); 240 idx = srcu_read_lock(&kvm->srcu);
241 spin_lock(&kvm->mmu_lock); 241 spin_lock(&kvm->mmu_lock);
242 kvm->mmu_notifier_seq++; 242 kvm->mmu_notifier_seq++;
243 need_tlb_flush = kvm_unmap_hva(kvm, address); 243 need_tlb_flush = kvm_unmap_hva(kvm, address);
244 spin_unlock(&kvm->mmu_lock); 244 spin_unlock(&kvm->mmu_lock);
245 srcu_read_unlock(&kvm->srcu, idx); 245 srcu_read_unlock(&kvm->srcu, idx);
246 246
247 /* we've to flush the tlb before the pages can be freed */ 247 /* we've to flush the tlb before the pages can be freed */
248 if (need_tlb_flush) 248 if (need_tlb_flush)
249 kvm_flush_remote_tlbs(kvm); 249 kvm_flush_remote_tlbs(kvm);
250 250
251 } 251 }
252 252
253 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 253 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
254 struct mm_struct *mm, 254 struct mm_struct *mm,
255 unsigned long address, 255 unsigned long address,
256 pte_t pte) 256 pte_t pte)
257 { 257 {
258 struct kvm *kvm = mmu_notifier_to_kvm(mn); 258 struct kvm *kvm = mmu_notifier_to_kvm(mn);
259 int idx; 259 int idx;
260 260
261 idx = srcu_read_lock(&kvm->srcu); 261 idx = srcu_read_lock(&kvm->srcu);
262 spin_lock(&kvm->mmu_lock); 262 spin_lock(&kvm->mmu_lock);
263 kvm->mmu_notifier_seq++; 263 kvm->mmu_notifier_seq++;
264 kvm_set_spte_hva(kvm, address, pte); 264 kvm_set_spte_hva(kvm, address, pte);
265 spin_unlock(&kvm->mmu_lock); 265 spin_unlock(&kvm->mmu_lock);
266 srcu_read_unlock(&kvm->srcu, idx); 266 srcu_read_unlock(&kvm->srcu, idx);
267 } 267 }
268 268
269 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 269 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
270 struct mm_struct *mm, 270 struct mm_struct *mm,
271 unsigned long start, 271 unsigned long start,
272 unsigned long end) 272 unsigned long end)
273 { 273 {
274 struct kvm *kvm = mmu_notifier_to_kvm(mn); 274 struct kvm *kvm = mmu_notifier_to_kvm(mn);
275 int need_tlb_flush = 0, idx; 275 int need_tlb_flush = 0, idx;
276 276
277 idx = srcu_read_lock(&kvm->srcu); 277 idx = srcu_read_lock(&kvm->srcu);
278 spin_lock(&kvm->mmu_lock); 278 spin_lock(&kvm->mmu_lock);
279 /* 279 /*
280 * The count increase must become visible at unlock time as no 280 * The count increase must become visible at unlock time as no
281 * spte can be established without taking the mmu_lock and 281 * spte can be established without taking the mmu_lock and
282 * count is also read inside the mmu_lock critical section. 282 * count is also read inside the mmu_lock critical section.
283 */ 283 */
284 kvm->mmu_notifier_count++; 284 kvm->mmu_notifier_count++;
285 for (; start < end; start += PAGE_SIZE) 285 for (; start < end; start += PAGE_SIZE)
286 need_tlb_flush |= kvm_unmap_hva(kvm, start); 286 need_tlb_flush |= kvm_unmap_hva(kvm, start);
287 spin_unlock(&kvm->mmu_lock); 287 spin_unlock(&kvm->mmu_lock);
288 srcu_read_unlock(&kvm->srcu, idx); 288 srcu_read_unlock(&kvm->srcu, idx);
289 289
290 /* we've to flush the tlb before the pages can be freed */ 290 /* we've to flush the tlb before the pages can be freed */
291 if (need_tlb_flush) 291 if (need_tlb_flush)
292 kvm_flush_remote_tlbs(kvm); 292 kvm_flush_remote_tlbs(kvm);
293 } 293 }
294 294
295 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 295 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
296 struct mm_struct *mm, 296 struct mm_struct *mm,
297 unsigned long start, 297 unsigned long start,
298 unsigned long end) 298 unsigned long end)
299 { 299 {
300 struct kvm *kvm = mmu_notifier_to_kvm(mn); 300 struct kvm *kvm = mmu_notifier_to_kvm(mn);
301 301
302 spin_lock(&kvm->mmu_lock); 302 spin_lock(&kvm->mmu_lock);
303 /* 303 /*
304 * This sequence increase will notify the kvm page fault that 304 * This sequence increase will notify the kvm page fault that
305 * the page that is going to be mapped in the spte could have 305 * the page that is going to be mapped in the spte could have
306 * been freed. 306 * been freed.
307 */ 307 */
308 kvm->mmu_notifier_seq++; 308 kvm->mmu_notifier_seq++;
309 /* 309 /*
310 * The above sequence increase must be visible before the 310 * The above sequence increase must be visible before the
311 * below count decrease but both values are read by the kvm 311 * below count decrease but both values are read by the kvm
312 * page fault under mmu_lock spinlock so we don't need to add 312 * page fault under mmu_lock spinlock so we don't need to add
313 * a smb_wmb() here in between the two. 313 * a smb_wmb() here in between the two.
314 */ 314 */
315 kvm->mmu_notifier_count--; 315 kvm->mmu_notifier_count--;
316 spin_unlock(&kvm->mmu_lock); 316 spin_unlock(&kvm->mmu_lock);
317 317
318 BUG_ON(kvm->mmu_notifier_count < 0); 318 BUG_ON(kvm->mmu_notifier_count < 0);
319 } 319 }
320 320
321 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 321 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
322 struct mm_struct *mm, 322 struct mm_struct *mm,
323 unsigned long address) 323 unsigned long address)
324 { 324 {
325 struct kvm *kvm = mmu_notifier_to_kvm(mn); 325 struct kvm *kvm = mmu_notifier_to_kvm(mn);
326 int young, idx; 326 int young, idx;
327 327
328 idx = srcu_read_lock(&kvm->srcu); 328 idx = srcu_read_lock(&kvm->srcu);
329 spin_lock(&kvm->mmu_lock); 329 spin_lock(&kvm->mmu_lock);
330 young = kvm_age_hva(kvm, address); 330 young = kvm_age_hva(kvm, address);
331 spin_unlock(&kvm->mmu_lock); 331 spin_unlock(&kvm->mmu_lock);
332 srcu_read_unlock(&kvm->srcu, idx); 332 srcu_read_unlock(&kvm->srcu, idx);
333 333
334 if (young) 334 if (young)
335 kvm_flush_remote_tlbs(kvm); 335 kvm_flush_remote_tlbs(kvm);
336 336
337 return young; 337 return young;
338 } 338 }
339 339
340 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 340 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
341 struct mm_struct *mm) 341 struct mm_struct *mm)
342 { 342 {
343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 343 struct kvm *kvm = mmu_notifier_to_kvm(mn);
344 int idx; 344 int idx;
345 345
346 idx = srcu_read_lock(&kvm->srcu); 346 idx = srcu_read_lock(&kvm->srcu);
347 kvm_arch_flush_shadow(kvm); 347 kvm_arch_flush_shadow(kvm);
348 srcu_read_unlock(&kvm->srcu, idx); 348 srcu_read_unlock(&kvm->srcu, idx);
349 } 349 }
350 350
351 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 351 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
352 .invalidate_page = kvm_mmu_notifier_invalidate_page, 352 .invalidate_page = kvm_mmu_notifier_invalidate_page,
353 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 353 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
354 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 354 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
355 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 355 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
356 .change_pte = kvm_mmu_notifier_change_pte, 356 .change_pte = kvm_mmu_notifier_change_pte,
357 .release = kvm_mmu_notifier_release, 357 .release = kvm_mmu_notifier_release,
358 }; 358 };
359 359
360 static int kvm_init_mmu_notifier(struct kvm *kvm) 360 static int kvm_init_mmu_notifier(struct kvm *kvm)
361 { 361 {
362 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 362 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
363 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 363 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
364 } 364 }
365 365
366 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 366 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
367 367
368 static int kvm_init_mmu_notifier(struct kvm *kvm) 368 static int kvm_init_mmu_notifier(struct kvm *kvm)
369 { 369 {
370 return 0; 370 return 0;
371 } 371 }
372 372
373 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 373 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
374 374
375 static struct kvm *kvm_create_vm(void) 375 static struct kvm *kvm_create_vm(void)
376 { 376 {
377 int r = 0, i; 377 int r = 0, i;
378 struct kvm *kvm = kvm_arch_create_vm(); 378 struct kvm *kvm = kvm_arch_create_vm();
379 379
380 if (IS_ERR(kvm)) 380 if (IS_ERR(kvm))
381 goto out; 381 goto out;
382 382
383 r = hardware_enable_all(); 383 r = hardware_enable_all();
384 if (r) 384 if (r)
385 goto out_err_nodisable; 385 goto out_err_nodisable;
386 386
387 #ifdef CONFIG_HAVE_KVM_IRQCHIP 387 #ifdef CONFIG_HAVE_KVM_IRQCHIP
388 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 388 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
389 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 389 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
390 #endif 390 #endif
391 391
392 r = -ENOMEM; 392 r = -ENOMEM;
393 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 393 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
394 if (!kvm->memslots) 394 if (!kvm->memslots)
395 goto out_err; 395 goto out_err;
396 if (init_srcu_struct(&kvm->srcu)) 396 if (init_srcu_struct(&kvm->srcu))
397 goto out_err; 397 goto out_err;
398 for (i = 0; i < KVM_NR_BUSES; i++) { 398 for (i = 0; i < KVM_NR_BUSES; i++) {
399 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 399 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
400 GFP_KERNEL); 400 GFP_KERNEL);
401 if (!kvm->buses[i]) { 401 if (!kvm->buses[i]) {
402 cleanup_srcu_struct(&kvm->srcu); 402 cleanup_srcu_struct(&kvm->srcu);
403 goto out_err; 403 goto out_err;
404 } 404 }
405 } 405 }
406 406
407 r = kvm_init_mmu_notifier(kvm); 407 r = kvm_init_mmu_notifier(kvm);
408 if (r) { 408 if (r) {
409 cleanup_srcu_struct(&kvm->srcu); 409 cleanup_srcu_struct(&kvm->srcu);
410 goto out_err; 410 goto out_err;
411 } 411 }
412 412
413 kvm->mm = current->mm; 413 kvm->mm = current->mm;
414 atomic_inc(&kvm->mm->mm_count); 414 atomic_inc(&kvm->mm->mm_count);
415 spin_lock_init(&kvm->mmu_lock); 415 spin_lock_init(&kvm->mmu_lock);
416 raw_spin_lock_init(&kvm->requests_lock); 416 raw_spin_lock_init(&kvm->requests_lock);
417 kvm_eventfd_init(kvm); 417 kvm_eventfd_init(kvm);
418 mutex_init(&kvm->lock); 418 mutex_init(&kvm->lock);
419 mutex_init(&kvm->irq_lock); 419 mutex_init(&kvm->irq_lock);
420 mutex_init(&kvm->slots_lock); 420 mutex_init(&kvm->slots_lock);
421 atomic_set(&kvm->users_count, 1); 421 atomic_set(&kvm->users_count, 1);
422 spin_lock(&kvm_lock); 422 spin_lock(&kvm_lock);
423 list_add(&kvm->vm_list, &vm_list); 423 list_add(&kvm->vm_list, &vm_list);
424 spin_unlock(&kvm_lock); 424 spin_unlock(&kvm_lock);
425 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 425 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
426 kvm_coalesced_mmio_init(kvm); 426 kvm_coalesced_mmio_init(kvm);
427 #endif 427 #endif
428 out: 428 out:
429 return kvm; 429 return kvm;
430 430
431 out_err: 431 out_err:
432 hardware_disable_all(); 432 hardware_disable_all();
433 out_err_nodisable: 433 out_err_nodisable:
434 for (i = 0; i < KVM_NR_BUSES; i++) 434 for (i = 0; i < KVM_NR_BUSES; i++)
435 kfree(kvm->buses[i]); 435 kfree(kvm->buses[i]);
436 kfree(kvm->memslots); 436 kfree(kvm->memslots);
437 kfree(kvm); 437 kfree(kvm);
438 return ERR_PTR(r); 438 return ERR_PTR(r);
439 } 439 }
440 440
441 /* 441 /*
442 * Free any memory in @free but not in @dont. 442 * Free any memory in @free but not in @dont.
443 */ 443 */
444 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 444 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
445 struct kvm_memory_slot *dont) 445 struct kvm_memory_slot *dont)
446 { 446 {
447 int i; 447 int i;
448 448
449 if (!dont || free->rmap != dont->rmap) 449 if (!dont || free->rmap != dont->rmap)
450 vfree(free->rmap); 450 vfree(free->rmap);
451 451
452 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 452 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
453 vfree(free->dirty_bitmap); 453 vfree(free->dirty_bitmap);
454 454
455 455
456 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 456 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
457 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 457 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
458 vfree(free->lpage_info[i]); 458 vfree(free->lpage_info[i]);
459 free->lpage_info[i] = NULL; 459 free->lpage_info[i] = NULL;
460 } 460 }
461 } 461 }
462 462
463 free->npages = 0; 463 free->npages = 0;
464 free->dirty_bitmap = NULL; 464 free->dirty_bitmap = NULL;
465 free->rmap = NULL; 465 free->rmap = NULL;
466 } 466 }
467 467
468 void kvm_free_physmem(struct kvm *kvm) 468 void kvm_free_physmem(struct kvm *kvm)
469 { 469 {
470 int i; 470 int i;
471 struct kvm_memslots *slots = kvm->memslots; 471 struct kvm_memslots *slots = kvm->memslots;
472 472
473 for (i = 0; i < slots->nmemslots; ++i) 473 for (i = 0; i < slots->nmemslots; ++i)
474 kvm_free_physmem_slot(&slots->memslots[i], NULL); 474 kvm_free_physmem_slot(&slots->memslots[i], NULL);
475 475
476 kfree(kvm->memslots); 476 kfree(kvm->memslots);
477 } 477 }
478 478
479 static void kvm_destroy_vm(struct kvm *kvm) 479 static void kvm_destroy_vm(struct kvm *kvm)
480 { 480 {
481 int i; 481 int i;
482 struct mm_struct *mm = kvm->mm; 482 struct mm_struct *mm = kvm->mm;
483 483
484 kvm_arch_sync_events(kvm); 484 kvm_arch_sync_events(kvm);
485 spin_lock(&kvm_lock); 485 spin_lock(&kvm_lock);
486 list_del(&kvm->vm_list); 486 list_del(&kvm->vm_list);
487 spin_unlock(&kvm_lock); 487 spin_unlock(&kvm_lock);
488 kvm_free_irq_routing(kvm); 488 kvm_free_irq_routing(kvm);
489 for (i = 0; i < KVM_NR_BUSES; i++) 489 for (i = 0; i < KVM_NR_BUSES; i++)
490 kvm_io_bus_destroy(kvm->buses[i]); 490 kvm_io_bus_destroy(kvm->buses[i]);
491 kvm_coalesced_mmio_free(kvm); 491 kvm_coalesced_mmio_free(kvm);
492 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 492 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
493 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 493 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
494 #else 494 #else
495 kvm_arch_flush_shadow(kvm); 495 kvm_arch_flush_shadow(kvm);
496 #endif 496 #endif
497 kvm_arch_destroy_vm(kvm); 497 kvm_arch_destroy_vm(kvm);
498 hardware_disable_all(); 498 hardware_disable_all();
499 mmdrop(mm); 499 mmdrop(mm);
500 } 500 }
501 501
502 void kvm_get_kvm(struct kvm *kvm) 502 void kvm_get_kvm(struct kvm *kvm)
503 { 503 {
504 atomic_inc(&kvm->users_count); 504 atomic_inc(&kvm->users_count);
505 } 505 }
506 EXPORT_SYMBOL_GPL(kvm_get_kvm); 506 EXPORT_SYMBOL_GPL(kvm_get_kvm);
507 507
508 void kvm_put_kvm(struct kvm *kvm) 508 void kvm_put_kvm(struct kvm *kvm)
509 { 509 {
510 if (atomic_dec_and_test(&kvm->users_count)) 510 if (atomic_dec_and_test(&kvm->users_count))
511 kvm_destroy_vm(kvm); 511 kvm_destroy_vm(kvm);
512 } 512 }
513 EXPORT_SYMBOL_GPL(kvm_put_kvm); 513 EXPORT_SYMBOL_GPL(kvm_put_kvm);
514 514
515 515
516 static int kvm_vm_release(struct inode *inode, struct file *filp) 516 static int kvm_vm_release(struct inode *inode, struct file *filp)
517 { 517 {
518 struct kvm *kvm = filp->private_data; 518 struct kvm *kvm = filp->private_data;
519 519
520 kvm_irqfd_release(kvm); 520 kvm_irqfd_release(kvm);
521 521
522 kvm_put_kvm(kvm); 522 kvm_put_kvm(kvm);
523 return 0; 523 return 0;
524 } 524 }
525 525
526 /* 526 /*
527 * Allocate some memory and give it an address in the guest physical address 527 * Allocate some memory and give it an address in the guest physical address
528 * space. 528 * space.
529 * 529 *
530 * Discontiguous memory is allowed, mostly for framebuffers. 530 * Discontiguous memory is allowed, mostly for framebuffers.
531 * 531 *
532 * Must be called holding mmap_sem for write. 532 * Must be called holding mmap_sem for write.
533 */ 533 */
534 int __kvm_set_memory_region(struct kvm *kvm, 534 int __kvm_set_memory_region(struct kvm *kvm,
535 struct kvm_userspace_memory_region *mem, 535 struct kvm_userspace_memory_region *mem,
536 int user_alloc) 536 int user_alloc)
537 { 537 {
538 int r, flush_shadow = 0; 538 int r, flush_shadow = 0;
539 gfn_t base_gfn; 539 gfn_t base_gfn;
540 unsigned long npages; 540 unsigned long npages;
541 unsigned long i; 541 unsigned long i;
542 struct kvm_memory_slot *memslot; 542 struct kvm_memory_slot *memslot;
543 struct kvm_memory_slot old, new; 543 struct kvm_memory_slot old, new;
544 struct kvm_memslots *slots, *old_memslots; 544 struct kvm_memslots *slots, *old_memslots;
545 545
546 r = -EINVAL; 546 r = -EINVAL;
547 /* General sanity checks */ 547 /* General sanity checks */
548 if (mem->memory_size & (PAGE_SIZE - 1)) 548 if (mem->memory_size & (PAGE_SIZE - 1))
549 goto out; 549 goto out;
550 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 550 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
551 goto out; 551 goto out;
552 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 552 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
553 goto out; 553 goto out;
554 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 554 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
555 goto out; 555 goto out;
556 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 556 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
557 goto out; 557 goto out;
558 558
559 memslot = &kvm->memslots->memslots[mem->slot]; 559 memslot = &kvm->memslots->memslots[mem->slot];
560 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 560 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
561 npages = mem->memory_size >> PAGE_SHIFT; 561 npages = mem->memory_size >> PAGE_SHIFT;
562 562
563 if (!npages) 563 if (!npages)
564 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 564 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
565 565
566 new = old = *memslot; 566 new = old = *memslot;
567 567
568 new.base_gfn = base_gfn; 568 new.base_gfn = base_gfn;
569 new.npages = npages; 569 new.npages = npages;
570 new.flags = mem->flags; 570 new.flags = mem->flags;
571 571
572 /* Disallow changing a memory slot's size. */ 572 /* Disallow changing a memory slot's size. */
573 r = -EINVAL; 573 r = -EINVAL;
574 if (npages && old.npages && npages != old.npages) 574 if (npages && old.npages && npages != old.npages)
575 goto out_free; 575 goto out_free;
576 576
577 /* Check for overlaps */ 577 /* Check for overlaps */
578 r = -EEXIST; 578 r = -EEXIST;
579 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 579 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
580 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 580 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
581 581
582 if (s == memslot || !s->npages) 582 if (s == memslot || !s->npages)
583 continue; 583 continue;
584 if (!((base_gfn + npages <= s->base_gfn) || 584 if (!((base_gfn + npages <= s->base_gfn) ||
585 (base_gfn >= s->base_gfn + s->npages))) 585 (base_gfn >= s->base_gfn + s->npages)))
586 goto out_free; 586 goto out_free;
587 } 587 }
588 588
589 /* Free page dirty bitmap if unneeded */ 589 /* Free page dirty bitmap if unneeded */
590 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 590 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
591 new.dirty_bitmap = NULL; 591 new.dirty_bitmap = NULL;
592 592
593 r = -ENOMEM; 593 r = -ENOMEM;
594 594
595 /* Allocate if a slot is being created */ 595 /* Allocate if a slot is being created */
596 #ifndef CONFIG_S390 596 #ifndef CONFIG_S390
597 if (npages && !new.rmap) { 597 if (npages && !new.rmap) {
598 new.rmap = vmalloc(npages * sizeof(struct page *)); 598 new.rmap = vmalloc(npages * sizeof(struct page *));
599 599
600 if (!new.rmap) 600 if (!new.rmap)
601 goto out_free; 601 goto out_free;
602 602
603 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 603 memset(new.rmap, 0, npages * sizeof(*new.rmap));
604 604
605 new.user_alloc = user_alloc; 605 new.user_alloc = user_alloc;
606 new.userspace_addr = mem->userspace_addr; 606 new.userspace_addr = mem->userspace_addr;
607 } 607 }
608 if (!npages) 608 if (!npages)
609 goto skip_lpage; 609 goto skip_lpage;
610 610
611 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 611 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
612 unsigned long ugfn; 612 unsigned long ugfn;
613 unsigned long j; 613 unsigned long j;
614 int lpages; 614 int lpages;
615 int level = i + 2; 615 int level = i + 2;
616 616
617 /* Avoid unused variable warning if no large pages */ 617 /* Avoid unused variable warning if no large pages */
618 (void)level; 618 (void)level;
619 619
620 if (new.lpage_info[i]) 620 if (new.lpage_info[i])
621 continue; 621 continue;
622 622
623 lpages = 1 + (base_gfn + npages - 1) / 623 lpages = 1 + (base_gfn + npages - 1) /
624 KVM_PAGES_PER_HPAGE(level); 624 KVM_PAGES_PER_HPAGE(level);
625 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); 625 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
626 626
627 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 627 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
628 628
629 if (!new.lpage_info[i]) 629 if (!new.lpage_info[i])
630 goto out_free; 630 goto out_free;
631 631
632 memset(new.lpage_info[i], 0, 632 memset(new.lpage_info[i], 0,
633 lpages * sizeof(*new.lpage_info[i])); 633 lpages * sizeof(*new.lpage_info[i]));
634 634
635 if (base_gfn % KVM_PAGES_PER_HPAGE(level)) 635 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
636 new.lpage_info[i][0].write_count = 1; 636 new.lpage_info[i][0].write_count = 1;
637 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) 637 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
638 new.lpage_info[i][lpages - 1].write_count = 1; 638 new.lpage_info[i][lpages - 1].write_count = 1;
639 ugfn = new.userspace_addr >> PAGE_SHIFT; 639 ugfn = new.userspace_addr >> PAGE_SHIFT;
640 /* 640 /*
641 * If the gfn and userspace address are not aligned wrt each 641 * If the gfn and userspace address are not aligned wrt each
642 * other, or if explicitly asked to, disable large page 642 * other, or if explicitly asked to, disable large page
643 * support for this slot 643 * support for this slot
644 */ 644 */
645 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 645 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
646 !largepages_enabled) 646 !largepages_enabled)
647 for (j = 0; j < lpages; ++j) 647 for (j = 0; j < lpages; ++j)
648 new.lpage_info[i][j].write_count = 1; 648 new.lpage_info[i][j].write_count = 1;
649 } 649 }
650 650
651 skip_lpage: 651 skip_lpage:
652 652
653 /* Allocate page dirty bitmap if needed */ 653 /* Allocate page dirty bitmap if needed */
654 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 654 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
655 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); 655 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
656 656
657 new.dirty_bitmap = vmalloc(dirty_bytes); 657 new.dirty_bitmap = vmalloc(dirty_bytes);
658 if (!new.dirty_bitmap) 658 if (!new.dirty_bitmap)
659 goto out_free; 659 goto out_free;
660 memset(new.dirty_bitmap, 0, dirty_bytes); 660 memset(new.dirty_bitmap, 0, dirty_bytes);
661 /* destroy any largepage mappings for dirty tracking */ 661 /* destroy any largepage mappings for dirty tracking */
662 if (old.npages) 662 if (old.npages)
663 flush_shadow = 1; 663 flush_shadow = 1;
664 } 664 }
665 #else /* not defined CONFIG_S390 */ 665 #else /* not defined CONFIG_S390 */
666 new.user_alloc = user_alloc; 666 new.user_alloc = user_alloc;
667 if (user_alloc) 667 if (user_alloc)
668 new.userspace_addr = mem->userspace_addr; 668 new.userspace_addr = mem->userspace_addr;
669 #endif /* not defined CONFIG_S390 */ 669 #endif /* not defined CONFIG_S390 */
670 670
671 if (!npages) { 671 if (!npages) {
672 r = -ENOMEM; 672 r = -ENOMEM;
673 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 673 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
674 if (!slots) 674 if (!slots)
675 goto out_free; 675 goto out_free;
676 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 676 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
677 if (mem->slot >= slots->nmemslots) 677 if (mem->slot >= slots->nmemslots)
678 slots->nmemslots = mem->slot + 1; 678 slots->nmemslots = mem->slot + 1;
679 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 679 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
680 680
681 old_memslots = kvm->memslots; 681 old_memslots = kvm->memslots;
682 rcu_assign_pointer(kvm->memslots, slots); 682 rcu_assign_pointer(kvm->memslots, slots);
683 synchronize_srcu_expedited(&kvm->srcu); 683 synchronize_srcu_expedited(&kvm->srcu);
684 /* From this point no new shadow pages pointing to a deleted 684 /* From this point no new shadow pages pointing to a deleted
685 * memslot will be created. 685 * memslot will be created.
686 * 686 *
687 * validation of sp->gfn happens in: 687 * validation of sp->gfn happens in:
688 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 688 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
689 * - kvm_is_visible_gfn (mmu_check_roots) 689 * - kvm_is_visible_gfn (mmu_check_roots)
690 */ 690 */
691 kvm_arch_flush_shadow(kvm); 691 kvm_arch_flush_shadow(kvm);
692 kfree(old_memslots); 692 kfree(old_memslots);
693 } 693 }
694 694
695 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 695 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
696 if (r) 696 if (r)
697 goto out_free; 697 goto out_free;
698 698
699 #ifdef CONFIG_DMAR 699 #ifdef CONFIG_DMAR
700 /* map the pages in iommu page table */ 700 /* map the pages in iommu page table */
701 if (npages) { 701 if (npages) {
702 r = kvm_iommu_map_pages(kvm, &new); 702 r = kvm_iommu_map_pages(kvm, &new);
703 if (r) 703 if (r)
704 goto out_free; 704 goto out_free;
705 } 705 }
706 #endif 706 #endif
707 707
708 r = -ENOMEM; 708 r = -ENOMEM;
709 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 709 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
710 if (!slots) 710 if (!slots)
711 goto out_free; 711 goto out_free;
712 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 712 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
713 if (mem->slot >= slots->nmemslots) 713 if (mem->slot >= slots->nmemslots)
714 slots->nmemslots = mem->slot + 1; 714 slots->nmemslots = mem->slot + 1;
715 715
716 /* actual memory is freed via old in kvm_free_physmem_slot below */ 716 /* actual memory is freed via old in kvm_free_physmem_slot below */
717 if (!npages) { 717 if (!npages) {
718 new.rmap = NULL; 718 new.rmap = NULL;
719 new.dirty_bitmap = NULL; 719 new.dirty_bitmap = NULL;
720 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 720 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
721 new.lpage_info[i] = NULL; 721 new.lpage_info[i] = NULL;
722 } 722 }
723 723
724 slots->memslots[mem->slot] = new; 724 slots->memslots[mem->slot] = new;
725 old_memslots = kvm->memslots; 725 old_memslots = kvm->memslots;
726 rcu_assign_pointer(kvm->memslots, slots); 726 rcu_assign_pointer(kvm->memslots, slots);
727 synchronize_srcu_expedited(&kvm->srcu); 727 synchronize_srcu_expedited(&kvm->srcu);
728 728
729 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 729 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
730 730
731 kvm_free_physmem_slot(&old, &new); 731 kvm_free_physmem_slot(&old, &new);
732 kfree(old_memslots); 732 kfree(old_memslots);
733 733
734 if (flush_shadow) 734 if (flush_shadow)
735 kvm_arch_flush_shadow(kvm); 735 kvm_arch_flush_shadow(kvm);
736 736
737 return 0; 737 return 0;
738 738
739 out_free: 739 out_free:
740 kvm_free_physmem_slot(&new, &old); 740 kvm_free_physmem_slot(&new, &old);
741 out: 741 out:
742 return r; 742 return r;
743 743
744 } 744 }
745 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 745 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
746 746
747 int kvm_set_memory_region(struct kvm *kvm, 747 int kvm_set_memory_region(struct kvm *kvm,
748 struct kvm_userspace_memory_region *mem, 748 struct kvm_userspace_memory_region *mem,
749 int user_alloc) 749 int user_alloc)
750 { 750 {
751 int r; 751 int r;
752 752
753 mutex_lock(&kvm->slots_lock); 753 mutex_lock(&kvm->slots_lock);
754 r = __kvm_set_memory_region(kvm, mem, user_alloc); 754 r = __kvm_set_memory_region(kvm, mem, user_alloc);
755 mutex_unlock(&kvm->slots_lock); 755 mutex_unlock(&kvm->slots_lock);
756 return r; 756 return r;
757 } 757 }
758 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 758 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
759 759
760 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 760 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
761 struct 761 struct
762 kvm_userspace_memory_region *mem, 762 kvm_userspace_memory_region *mem,
763 int user_alloc) 763 int user_alloc)
764 { 764 {
765 if (mem->slot >= KVM_MEMORY_SLOTS) 765 if (mem->slot >= KVM_MEMORY_SLOTS)
766 return -EINVAL; 766 return -EINVAL;
767 return kvm_set_memory_region(kvm, mem, user_alloc); 767 return kvm_set_memory_region(kvm, mem, user_alloc);
768 } 768 }
769 769
770 int kvm_get_dirty_log(struct kvm *kvm, 770 int kvm_get_dirty_log(struct kvm *kvm,
771 struct kvm_dirty_log *log, int *is_dirty) 771 struct kvm_dirty_log *log, int *is_dirty)
772 { 772 {
773 struct kvm_memory_slot *memslot; 773 struct kvm_memory_slot *memslot;
774 int r, i; 774 int r, i;
775 unsigned long n; 775 unsigned long n;
776 unsigned long any = 0; 776 unsigned long any = 0;
777 777
778 r = -EINVAL; 778 r = -EINVAL;
779 if (log->slot >= KVM_MEMORY_SLOTS) 779 if (log->slot >= KVM_MEMORY_SLOTS)
780 goto out; 780 goto out;
781 781
782 memslot = &kvm->memslots->memslots[log->slot]; 782 memslot = &kvm->memslots->memslots[log->slot];
783 r = -ENOENT; 783 r = -ENOENT;
784 if (!memslot->dirty_bitmap) 784 if (!memslot->dirty_bitmap)
785 goto out; 785 goto out;
786 786
787 n = kvm_dirty_bitmap_bytes(memslot); 787 n = kvm_dirty_bitmap_bytes(memslot);
788 788
789 for (i = 0; !any && i < n/sizeof(long); ++i) 789 for (i = 0; !any && i < n/sizeof(long); ++i)
790 any = memslot->dirty_bitmap[i]; 790 any = memslot->dirty_bitmap[i];
791 791
792 r = -EFAULT; 792 r = -EFAULT;
793 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 793 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
794 goto out; 794 goto out;
795 795
796 if (any) 796 if (any)
797 *is_dirty = 1; 797 *is_dirty = 1;
798 798
799 r = 0; 799 r = 0;
800 out: 800 out:
801 return r; 801 return r;
802 } 802 }
803 803
804 void kvm_disable_largepages(void) 804 void kvm_disable_largepages(void)
805 { 805 {
806 largepages_enabled = false; 806 largepages_enabled = false;
807 } 807 }
808 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 808 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
809 809
810 int is_error_page(struct page *page) 810 int is_error_page(struct page *page)
811 { 811 {
812 return page == bad_page; 812 return page == bad_page;
813 } 813 }
814 EXPORT_SYMBOL_GPL(is_error_page); 814 EXPORT_SYMBOL_GPL(is_error_page);
815 815
816 int is_error_pfn(pfn_t pfn) 816 int is_error_pfn(pfn_t pfn)
817 { 817 {
818 return pfn == bad_pfn; 818 return pfn == bad_pfn;
819 } 819 }
820 EXPORT_SYMBOL_GPL(is_error_pfn); 820 EXPORT_SYMBOL_GPL(is_error_pfn);
821 821
822 static inline unsigned long bad_hva(void) 822 static inline unsigned long bad_hva(void)
823 { 823 {
824 return PAGE_OFFSET; 824 return PAGE_OFFSET;
825 } 825 }
826 826
827 int kvm_is_error_hva(unsigned long addr) 827 int kvm_is_error_hva(unsigned long addr)
828 { 828 {
829 return addr == bad_hva(); 829 return addr == bad_hva();
830 } 830 }
831 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 831 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
832 832
833 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 833 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
834 { 834 {
835 int i; 835 int i;
836 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 836 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
837 837
838 for (i = 0; i < slots->nmemslots; ++i) { 838 for (i = 0; i < slots->nmemslots; ++i) {
839 struct kvm_memory_slot *memslot = &slots->memslots[i]; 839 struct kvm_memory_slot *memslot = &slots->memslots[i];
840 840
841 if (gfn >= memslot->base_gfn 841 if (gfn >= memslot->base_gfn
842 && gfn < memslot->base_gfn + memslot->npages) 842 && gfn < memslot->base_gfn + memslot->npages)
843 return memslot; 843 return memslot;
844 } 844 }
845 return NULL; 845 return NULL;
846 } 846 }
847 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); 847 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
848 848
849 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 849 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
850 { 850 {
851 gfn = unalias_gfn(kvm, gfn); 851 gfn = unalias_gfn(kvm, gfn);
852 return gfn_to_memslot_unaliased(kvm, gfn); 852 return gfn_to_memslot_unaliased(kvm, gfn);
853 } 853 }
854 854
855 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 855 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
856 { 856 {
857 int i; 857 int i;
858 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 858 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
859 859
860 gfn = unalias_gfn_instantiation(kvm, gfn); 860 gfn = unalias_gfn_instantiation(kvm, gfn);
861 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 861 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
862 struct kvm_memory_slot *memslot = &slots->memslots[i]; 862 struct kvm_memory_slot *memslot = &slots->memslots[i];
863 863
864 if (memslot->flags & KVM_MEMSLOT_INVALID) 864 if (memslot->flags & KVM_MEMSLOT_INVALID)
865 continue; 865 continue;
866 866
867 if (gfn >= memslot->base_gfn 867 if (gfn >= memslot->base_gfn
868 && gfn < memslot->base_gfn + memslot->npages) 868 && gfn < memslot->base_gfn + memslot->npages)
869 return 1; 869 return 1;
870 } 870 }
871 return 0; 871 return 0;
872 } 872 }
873 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 873 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
874 874
875 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 875 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
876 { 876 {
877 struct vm_area_struct *vma; 877 struct vm_area_struct *vma;
878 unsigned long addr, size; 878 unsigned long addr, size;
879 879
880 size = PAGE_SIZE; 880 size = PAGE_SIZE;
881 881
882 addr = gfn_to_hva(kvm, gfn); 882 addr = gfn_to_hva(kvm, gfn);
883 if (kvm_is_error_hva(addr)) 883 if (kvm_is_error_hva(addr))
884 return PAGE_SIZE; 884 return PAGE_SIZE;
885 885
886 down_read(&current->mm->mmap_sem); 886 down_read(&current->mm->mmap_sem);
887 vma = find_vma(current->mm, addr); 887 vma = find_vma(current->mm, addr);
888 if (!vma) 888 if (!vma)
889 goto out; 889 goto out;
890 890
891 size = vma_kernel_pagesize(vma); 891 size = vma_kernel_pagesize(vma);
892 892
893 out: 893 out:
894 up_read(&current->mm->mmap_sem); 894 up_read(&current->mm->mmap_sem);
895 895
896 return size; 896 return size;
897 } 897 }
898 898
899 int memslot_id(struct kvm *kvm, gfn_t gfn) 899 int memslot_id(struct kvm *kvm, gfn_t gfn)
900 { 900 {
901 int i; 901 int i;
902 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 902 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
903 struct kvm_memory_slot *memslot = NULL; 903 struct kvm_memory_slot *memslot = NULL;
904 904
905 gfn = unalias_gfn(kvm, gfn); 905 gfn = unalias_gfn(kvm, gfn);
906 for (i = 0; i < slots->nmemslots; ++i) { 906 for (i = 0; i < slots->nmemslots; ++i) {
907 memslot = &slots->memslots[i]; 907 memslot = &slots->memslots[i];
908 908
909 if (gfn >= memslot->base_gfn 909 if (gfn >= memslot->base_gfn
910 && gfn < memslot->base_gfn + memslot->npages) 910 && gfn < memslot->base_gfn + memslot->npages)
911 break; 911 break;
912 } 912 }
913 913
914 return memslot - slots->memslots; 914 return memslot - slots->memslots;
915 } 915 }
916 916
917 static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
918 {
919 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
920 }
921
917 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 922 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
918 { 923 {
919 struct kvm_memory_slot *slot; 924 struct kvm_memory_slot *slot;
920 925
921 gfn = unalias_gfn_instantiation(kvm, gfn); 926 gfn = unalias_gfn_instantiation(kvm, gfn);
922 slot = gfn_to_memslot_unaliased(kvm, gfn); 927 slot = gfn_to_memslot_unaliased(kvm, gfn);
923 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 928 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
924 return bad_hva(); 929 return bad_hva();
925 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 930 return gfn_to_hva_memslot(slot, gfn);
926 } 931 }
927 EXPORT_SYMBOL_GPL(gfn_to_hva); 932 EXPORT_SYMBOL_GPL(gfn_to_hva);
928 933
929 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 934 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
930 { 935 {
931 struct page *page[1]; 936 struct page *page[1];
932 int npages; 937 int npages;
933 pfn_t pfn; 938 pfn_t pfn;
934 939
935 might_sleep(); 940 might_sleep();
936 941
937 npages = get_user_pages_fast(addr, 1, 1, page); 942 npages = get_user_pages_fast(addr, 1, 1, page);
938 943
939 if (unlikely(npages != 1)) { 944 if (unlikely(npages != 1)) {
940 struct vm_area_struct *vma; 945 struct vm_area_struct *vma;
941 946
942 down_read(&current->mm->mmap_sem); 947 down_read(&current->mm->mmap_sem);
943 vma = find_vma(current->mm, addr); 948 vma = find_vma(current->mm, addr);
944 949
945 if (vma == NULL || addr < vma->vm_start || 950 if (vma == NULL || addr < vma->vm_start ||
946 !(vma->vm_flags & VM_PFNMAP)) { 951 !(vma->vm_flags & VM_PFNMAP)) {
947 up_read(&current->mm->mmap_sem); 952 up_read(&current->mm->mmap_sem);
948 get_page(bad_page); 953 get_page(bad_page);
949 return page_to_pfn(bad_page); 954 return page_to_pfn(bad_page);
950 } 955 }
951 956
952 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 957 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
953 up_read(&current->mm->mmap_sem); 958 up_read(&current->mm->mmap_sem);
954 BUG_ON(!kvm_is_mmio_pfn(pfn)); 959 BUG_ON(!kvm_is_mmio_pfn(pfn));
955 } else 960 } else
956 pfn = page_to_pfn(page[0]); 961 pfn = page_to_pfn(page[0]);
957 962
958 return pfn; 963 return pfn;
959 } 964 }
960 965
961 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 966 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
962 { 967 {
963 unsigned long addr; 968 unsigned long addr;
964 969
965 addr = gfn_to_hva(kvm, gfn); 970 addr = gfn_to_hva(kvm, gfn);
966 if (kvm_is_error_hva(addr)) { 971 if (kvm_is_error_hva(addr)) {
967 get_page(bad_page); 972 get_page(bad_page);
968 return page_to_pfn(bad_page); 973 return page_to_pfn(bad_page);
969 } 974 }
970 975
971 return hva_to_pfn(kvm, addr); 976 return hva_to_pfn(kvm, addr);
972 } 977 }
973 EXPORT_SYMBOL_GPL(gfn_to_pfn); 978 EXPORT_SYMBOL_GPL(gfn_to_pfn);
974
975 static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
976 {
977 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
978 }
979 979
980 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 980 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
981 struct kvm_memory_slot *slot, gfn_t gfn) 981 struct kvm_memory_slot *slot, gfn_t gfn)
982 { 982 {
983 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 983 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
984 return hva_to_pfn(kvm, addr); 984 return hva_to_pfn(kvm, addr);
985 } 985 }
986 986
987 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 987 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
988 { 988 {
989 pfn_t pfn; 989 pfn_t pfn;
990 990
991 pfn = gfn_to_pfn(kvm, gfn); 991 pfn = gfn_to_pfn(kvm, gfn);
992 if (!kvm_is_mmio_pfn(pfn)) 992 if (!kvm_is_mmio_pfn(pfn))
993 return pfn_to_page(pfn); 993 return pfn_to_page(pfn);
994 994
995 WARN_ON(kvm_is_mmio_pfn(pfn)); 995 WARN_ON(kvm_is_mmio_pfn(pfn));
996 996
997 get_page(bad_page); 997 get_page(bad_page);
998 return bad_page; 998 return bad_page;
999 } 999 }
1000 1000
1001 EXPORT_SYMBOL_GPL(gfn_to_page); 1001 EXPORT_SYMBOL_GPL(gfn_to_page);
1002 1002
1003 void kvm_release_page_clean(struct page *page) 1003 void kvm_release_page_clean(struct page *page)
1004 { 1004 {
1005 kvm_release_pfn_clean(page_to_pfn(page)); 1005 kvm_release_pfn_clean(page_to_pfn(page));
1006 } 1006 }
1007 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1007 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1008 1008
1009 void kvm_release_pfn_clean(pfn_t pfn) 1009 void kvm_release_pfn_clean(pfn_t pfn)
1010 { 1010 {
1011 if (!kvm_is_mmio_pfn(pfn)) 1011 if (!kvm_is_mmio_pfn(pfn))
1012 put_page(pfn_to_page(pfn)); 1012 put_page(pfn_to_page(pfn));
1013 } 1013 }
1014 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1014 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1015 1015
1016 void kvm_release_page_dirty(struct page *page) 1016 void kvm_release_page_dirty(struct page *page)
1017 { 1017 {
1018 kvm_release_pfn_dirty(page_to_pfn(page)); 1018 kvm_release_pfn_dirty(page_to_pfn(page));
1019 } 1019 }
1020 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1020 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1021 1021
1022 void kvm_release_pfn_dirty(pfn_t pfn) 1022 void kvm_release_pfn_dirty(pfn_t pfn)
1023 { 1023 {
1024 kvm_set_pfn_dirty(pfn); 1024 kvm_set_pfn_dirty(pfn);
1025 kvm_release_pfn_clean(pfn); 1025 kvm_release_pfn_clean(pfn);
1026 } 1026 }
1027 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1027 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1028 1028
1029 void kvm_set_page_dirty(struct page *page) 1029 void kvm_set_page_dirty(struct page *page)
1030 { 1030 {
1031 kvm_set_pfn_dirty(page_to_pfn(page)); 1031 kvm_set_pfn_dirty(page_to_pfn(page));
1032 } 1032 }
1033 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1033 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1034 1034
1035 void kvm_set_pfn_dirty(pfn_t pfn) 1035 void kvm_set_pfn_dirty(pfn_t pfn)
1036 { 1036 {
1037 if (!kvm_is_mmio_pfn(pfn)) { 1037 if (!kvm_is_mmio_pfn(pfn)) {
1038 struct page *page = pfn_to_page(pfn); 1038 struct page *page = pfn_to_page(pfn);
1039 if (!PageReserved(page)) 1039 if (!PageReserved(page))
1040 SetPageDirty(page); 1040 SetPageDirty(page);
1041 } 1041 }
1042 } 1042 }
1043 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1043 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1044 1044
1045 void kvm_set_pfn_accessed(pfn_t pfn) 1045 void kvm_set_pfn_accessed(pfn_t pfn)
1046 { 1046 {
1047 if (!kvm_is_mmio_pfn(pfn)) 1047 if (!kvm_is_mmio_pfn(pfn))
1048 mark_page_accessed(pfn_to_page(pfn)); 1048 mark_page_accessed(pfn_to_page(pfn));
1049 } 1049 }
1050 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1050 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1051 1051
1052 void kvm_get_pfn(pfn_t pfn) 1052 void kvm_get_pfn(pfn_t pfn)
1053 { 1053 {
1054 if (!kvm_is_mmio_pfn(pfn)) 1054 if (!kvm_is_mmio_pfn(pfn))
1055 get_page(pfn_to_page(pfn)); 1055 get_page(pfn_to_page(pfn));
1056 } 1056 }
1057 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1057 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1058 1058
1059 static int next_segment(unsigned long len, int offset) 1059 static int next_segment(unsigned long len, int offset)
1060 { 1060 {
1061 if (len > PAGE_SIZE - offset) 1061 if (len > PAGE_SIZE - offset)
1062 return PAGE_SIZE - offset; 1062 return PAGE_SIZE - offset;
1063 else 1063 else
1064 return len; 1064 return len;
1065 } 1065 }
1066 1066
1067 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1067 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1068 int len) 1068 int len)
1069 { 1069 {
1070 int r; 1070 int r;
1071 unsigned long addr; 1071 unsigned long addr;
1072 1072
1073 addr = gfn_to_hva(kvm, gfn); 1073 addr = gfn_to_hva(kvm, gfn);
1074 if (kvm_is_error_hva(addr)) 1074 if (kvm_is_error_hva(addr))
1075 return -EFAULT; 1075 return -EFAULT;
1076 r = copy_from_user(data, (void __user *)addr + offset, len); 1076 r = copy_from_user(data, (void __user *)addr + offset, len);
1077 if (r) 1077 if (r)
1078 return -EFAULT; 1078 return -EFAULT;
1079 return 0; 1079 return 0;
1080 } 1080 }
1081 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1081 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1082 1082
1083 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1083 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1084 { 1084 {
1085 gfn_t gfn = gpa >> PAGE_SHIFT; 1085 gfn_t gfn = gpa >> PAGE_SHIFT;
1086 int seg; 1086 int seg;
1087 int offset = offset_in_page(gpa); 1087 int offset = offset_in_page(gpa);
1088 int ret; 1088 int ret;
1089 1089
1090 while ((seg = next_segment(len, offset)) != 0) { 1090 while ((seg = next_segment(len, offset)) != 0) {
1091 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1091 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1092 if (ret < 0) 1092 if (ret < 0)
1093 return ret; 1093 return ret;
1094 offset = 0; 1094 offset = 0;
1095 len -= seg; 1095 len -= seg;
1096 data += seg; 1096 data += seg;
1097 ++gfn; 1097 ++gfn;
1098 } 1098 }
1099 return 0; 1099 return 0;
1100 } 1100 }
1101 EXPORT_SYMBOL_GPL(kvm_read_guest); 1101 EXPORT_SYMBOL_GPL(kvm_read_guest);
1102 1102
1103 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1103 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1104 unsigned long len) 1104 unsigned long len)
1105 { 1105 {
1106 int r; 1106 int r;
1107 unsigned long addr; 1107 unsigned long addr;
1108 gfn_t gfn = gpa >> PAGE_SHIFT; 1108 gfn_t gfn = gpa >> PAGE_SHIFT;
1109 int offset = offset_in_page(gpa); 1109 int offset = offset_in_page(gpa);
1110 1110
1111 addr = gfn_to_hva(kvm, gfn); 1111 addr = gfn_to_hva(kvm, gfn);
1112 if (kvm_is_error_hva(addr)) 1112 if (kvm_is_error_hva(addr))
1113 return -EFAULT; 1113 return -EFAULT;
1114 pagefault_disable(); 1114 pagefault_disable();
1115 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1115 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1116 pagefault_enable(); 1116 pagefault_enable();
1117 if (r) 1117 if (r)
1118 return -EFAULT; 1118 return -EFAULT;
1119 return 0; 1119 return 0;
1120 } 1120 }
1121 EXPORT_SYMBOL(kvm_read_guest_atomic); 1121 EXPORT_SYMBOL(kvm_read_guest_atomic);
1122 1122
1123 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1123 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1124 int offset, int len) 1124 int offset, int len)
1125 { 1125 {
1126 int r; 1126 int r;
1127 unsigned long addr; 1127 unsigned long addr;
1128 1128
1129 addr = gfn_to_hva(kvm, gfn); 1129 addr = gfn_to_hva(kvm, gfn);
1130 if (kvm_is_error_hva(addr)) 1130 if (kvm_is_error_hva(addr))
1131 return -EFAULT; 1131 return -EFAULT;
1132 r = copy_to_user((void __user *)addr + offset, data, len); 1132 r = copy_to_user((void __user *)addr + offset, data, len);
1133 if (r) 1133 if (r)
1134 return -EFAULT; 1134 return -EFAULT;
1135 mark_page_dirty(kvm, gfn); 1135 mark_page_dirty(kvm, gfn);
1136 return 0; 1136 return 0;
1137 } 1137 }
1138 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1138 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1139 1139
1140 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1140 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1141 unsigned long len) 1141 unsigned long len)
1142 { 1142 {
1143 gfn_t gfn = gpa >> PAGE_SHIFT; 1143 gfn_t gfn = gpa >> PAGE_SHIFT;
1144 int seg; 1144 int seg;
1145 int offset = offset_in_page(gpa); 1145 int offset = offset_in_page(gpa);
1146 int ret; 1146 int ret;
1147 1147
1148 while ((seg = next_segment(len, offset)) != 0) { 1148 while ((seg = next_segment(len, offset)) != 0) {
1149 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1149 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1150 if (ret < 0) 1150 if (ret < 0)
1151 return ret; 1151 return ret;
1152 offset = 0; 1152 offset = 0;
1153 len -= seg; 1153 len -= seg;
1154 data += seg; 1154 data += seg;
1155 ++gfn; 1155 ++gfn;
1156 } 1156 }
1157 return 0; 1157 return 0;
1158 } 1158 }
1159 1159
1160 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1160 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1161 { 1161 {
1162 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1162 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1163 } 1163 }
1164 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1164 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1165 1165
1166 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1166 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1167 { 1167 {
1168 gfn_t gfn = gpa >> PAGE_SHIFT; 1168 gfn_t gfn = gpa >> PAGE_SHIFT;
1169 int seg; 1169 int seg;
1170 int offset = offset_in_page(gpa); 1170 int offset = offset_in_page(gpa);
1171 int ret; 1171 int ret;
1172 1172
1173 while ((seg = next_segment(len, offset)) != 0) { 1173 while ((seg = next_segment(len, offset)) != 0) {
1174 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1174 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1175 if (ret < 0) 1175 if (ret < 0)
1176 return ret; 1176 return ret;
1177 offset = 0; 1177 offset = 0;
1178 len -= seg; 1178 len -= seg;
1179 ++gfn; 1179 ++gfn;
1180 } 1180 }
1181 return 0; 1181 return 0;
1182 } 1182 }
1183 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1183 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1184 1184
1185 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1185 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1186 { 1186 {
1187 struct kvm_memory_slot *memslot; 1187 struct kvm_memory_slot *memslot;
1188 1188
1189 gfn = unalias_gfn(kvm, gfn); 1189 gfn = unalias_gfn(kvm, gfn);
1190 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1190 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1191 if (memslot && memslot->dirty_bitmap) { 1191 if (memslot && memslot->dirty_bitmap) {
1192 unsigned long rel_gfn = gfn - memslot->base_gfn; 1192 unsigned long rel_gfn = gfn - memslot->base_gfn;
1193 unsigned long *p = memslot->dirty_bitmap + 1193 unsigned long *p = memslot->dirty_bitmap +
1194 rel_gfn / BITS_PER_LONG; 1194 rel_gfn / BITS_PER_LONG;
1195 int offset = rel_gfn % BITS_PER_LONG; 1195 int offset = rel_gfn % BITS_PER_LONG;
1196 1196
1197 /* avoid RMW */ 1197 /* avoid RMW */
1198 if (!generic_test_le_bit(offset, p)) 1198 if (!generic_test_le_bit(offset, p))
1199 generic___set_le_bit(offset, p); 1199 generic___set_le_bit(offset, p);
1200 } 1200 }
1201 } 1201 }
1202 1202
1203 /* 1203 /*
1204 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1204 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1205 */ 1205 */
1206 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1206 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1207 { 1207 {
1208 DEFINE_WAIT(wait); 1208 DEFINE_WAIT(wait);
1209 1209
1210 for (;;) { 1210 for (;;) {
1211 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1211 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1212 1212
1213 if (kvm_arch_vcpu_runnable(vcpu)) { 1213 if (kvm_arch_vcpu_runnable(vcpu)) {
1214 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1214 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1215 break; 1215 break;
1216 } 1216 }
1217 if (kvm_cpu_has_pending_timer(vcpu)) 1217 if (kvm_cpu_has_pending_timer(vcpu))
1218 break; 1218 break;
1219 if (signal_pending(current)) 1219 if (signal_pending(current))
1220 break; 1220 break;
1221 1221
1222 schedule(); 1222 schedule();
1223 } 1223 }
1224 1224
1225 finish_wait(&vcpu->wq, &wait); 1225 finish_wait(&vcpu->wq, &wait);
1226 } 1226 }
1227 1227
1228 void kvm_resched(struct kvm_vcpu *vcpu) 1228 void kvm_resched(struct kvm_vcpu *vcpu)
1229 { 1229 {
1230 if (!need_resched()) 1230 if (!need_resched())
1231 return; 1231 return;
1232 cond_resched(); 1232 cond_resched();
1233 } 1233 }
1234 EXPORT_SYMBOL_GPL(kvm_resched); 1234 EXPORT_SYMBOL_GPL(kvm_resched);
1235 1235
1236 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1236 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1237 { 1237 {
1238 ktime_t expires; 1238 ktime_t expires;
1239 DEFINE_WAIT(wait); 1239 DEFINE_WAIT(wait);
1240 1240
1241 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1241 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1242 1242
1243 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1243 /* Sleep for 100 us, and hope lock-holder got scheduled */
1244 expires = ktime_add_ns(ktime_get(), 100000UL); 1244 expires = ktime_add_ns(ktime_get(), 100000UL);
1245 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1245 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1246 1246
1247 finish_wait(&vcpu->wq, &wait); 1247 finish_wait(&vcpu->wq, &wait);
1248 } 1248 }
1249 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1249 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1250 1250
1251 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1251 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1252 { 1252 {
1253 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1253 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1254 struct page *page; 1254 struct page *page;
1255 1255
1256 if (vmf->pgoff == 0) 1256 if (vmf->pgoff == 0)
1257 page = virt_to_page(vcpu->run); 1257 page = virt_to_page(vcpu->run);
1258 #ifdef CONFIG_X86 1258 #ifdef CONFIG_X86
1259 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1259 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1260 page = virt_to_page(vcpu->arch.pio_data); 1260 page = virt_to_page(vcpu->arch.pio_data);
1261 #endif 1261 #endif
1262 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1262 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1263 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1263 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1264 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1264 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1265 #endif 1265 #endif
1266 else 1266 else
1267 return VM_FAULT_SIGBUS; 1267 return VM_FAULT_SIGBUS;
1268 get_page(page); 1268 get_page(page);
1269 vmf->page = page; 1269 vmf->page = page;
1270 return 0; 1270 return 0;
1271 } 1271 }
1272 1272
1273 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1273 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1274 .fault = kvm_vcpu_fault, 1274 .fault = kvm_vcpu_fault,
1275 }; 1275 };
1276 1276
1277 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1277 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1278 { 1278 {
1279 vma->vm_ops = &kvm_vcpu_vm_ops; 1279 vma->vm_ops = &kvm_vcpu_vm_ops;
1280 return 0; 1280 return 0;
1281 } 1281 }
1282 1282
1283 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1283 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1284 { 1284 {
1285 struct kvm_vcpu *vcpu = filp->private_data; 1285 struct kvm_vcpu *vcpu = filp->private_data;
1286 1286
1287 kvm_put_kvm(vcpu->kvm); 1287 kvm_put_kvm(vcpu->kvm);
1288 return 0; 1288 return 0;
1289 } 1289 }
1290 1290
1291 static struct file_operations kvm_vcpu_fops = { 1291 static struct file_operations kvm_vcpu_fops = {
1292 .release = kvm_vcpu_release, 1292 .release = kvm_vcpu_release,
1293 .unlocked_ioctl = kvm_vcpu_ioctl, 1293 .unlocked_ioctl = kvm_vcpu_ioctl,
1294 .compat_ioctl = kvm_vcpu_ioctl, 1294 .compat_ioctl = kvm_vcpu_ioctl,
1295 .mmap = kvm_vcpu_mmap, 1295 .mmap = kvm_vcpu_mmap,
1296 }; 1296 };
1297 1297
1298 /* 1298 /*
1299 * Allocates an inode for the vcpu. 1299 * Allocates an inode for the vcpu.
1300 */ 1300 */
1301 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1301 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1302 { 1302 {
1303 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1303 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
1304 } 1304 }
1305 1305
1306 /* 1306 /*
1307 * Creates some virtual cpus. Good luck creating more than one. 1307 * Creates some virtual cpus. Good luck creating more than one.
1308 */ 1308 */
1309 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1309 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1310 { 1310 {
1311 int r; 1311 int r;
1312 struct kvm_vcpu *vcpu, *v; 1312 struct kvm_vcpu *vcpu, *v;
1313 1313
1314 vcpu = kvm_arch_vcpu_create(kvm, id); 1314 vcpu = kvm_arch_vcpu_create(kvm, id);
1315 if (IS_ERR(vcpu)) 1315 if (IS_ERR(vcpu))
1316 return PTR_ERR(vcpu); 1316 return PTR_ERR(vcpu);
1317 1317
1318 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1318 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1319 1319
1320 r = kvm_arch_vcpu_setup(vcpu); 1320 r = kvm_arch_vcpu_setup(vcpu);
1321 if (r) 1321 if (r)
1322 return r; 1322 return r;
1323 1323
1324 mutex_lock(&kvm->lock); 1324 mutex_lock(&kvm->lock);
1325 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1325 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1326 r = -EINVAL; 1326 r = -EINVAL;
1327 goto vcpu_destroy; 1327 goto vcpu_destroy;
1328 } 1328 }
1329 1329
1330 kvm_for_each_vcpu(r, v, kvm) 1330 kvm_for_each_vcpu(r, v, kvm)
1331 if (v->vcpu_id == id) { 1331 if (v->vcpu_id == id) {
1332 r = -EEXIST; 1332 r = -EEXIST;
1333 goto vcpu_destroy; 1333 goto vcpu_destroy;
1334 } 1334 }
1335 1335
1336 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1336 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1337 1337
1338 /* Now it's all set up, let userspace reach it */ 1338 /* Now it's all set up, let userspace reach it */
1339 kvm_get_kvm(kvm); 1339 kvm_get_kvm(kvm);
1340 r = create_vcpu_fd(vcpu); 1340 r = create_vcpu_fd(vcpu);
1341 if (r < 0) { 1341 if (r < 0) {
1342 kvm_put_kvm(kvm); 1342 kvm_put_kvm(kvm);
1343 goto vcpu_destroy; 1343 goto vcpu_destroy;
1344 } 1344 }
1345 1345
1346 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1346 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1347 smp_wmb(); 1347 smp_wmb();
1348 atomic_inc(&kvm->online_vcpus); 1348 atomic_inc(&kvm->online_vcpus);
1349 1349
1350 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1350 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1351 if (kvm->bsp_vcpu_id == id) 1351 if (kvm->bsp_vcpu_id == id)
1352 kvm->bsp_vcpu = vcpu; 1352 kvm->bsp_vcpu = vcpu;
1353 #endif 1353 #endif
1354 mutex_unlock(&kvm->lock); 1354 mutex_unlock(&kvm->lock);
1355 return r; 1355 return r;
1356 1356
1357 vcpu_destroy: 1357 vcpu_destroy:
1358 mutex_unlock(&kvm->lock); 1358 mutex_unlock(&kvm->lock);
1359 kvm_arch_vcpu_destroy(vcpu); 1359 kvm_arch_vcpu_destroy(vcpu);
1360 return r; 1360 return r;
1361 } 1361 }
1362 1362
1363 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1363 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1364 { 1364 {
1365 if (sigset) { 1365 if (sigset) {
1366 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1366 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1367 vcpu->sigset_active = 1; 1367 vcpu->sigset_active = 1;
1368 vcpu->sigset = *sigset; 1368 vcpu->sigset = *sigset;
1369 } else 1369 } else
1370 vcpu->sigset_active = 0; 1370 vcpu->sigset_active = 0;
1371 return 0; 1371 return 0;
1372 } 1372 }
1373 1373
1374 static long kvm_vcpu_ioctl(struct file *filp, 1374 static long kvm_vcpu_ioctl(struct file *filp,
1375 unsigned int ioctl, unsigned long arg) 1375 unsigned int ioctl, unsigned long arg)
1376 { 1376 {
1377 struct kvm_vcpu *vcpu = filp->private_data; 1377 struct kvm_vcpu *vcpu = filp->private_data;
1378 void __user *argp = (void __user *)arg; 1378 void __user *argp = (void __user *)arg;
1379 int r; 1379 int r;
1380 struct kvm_fpu *fpu = NULL; 1380 struct kvm_fpu *fpu = NULL;
1381 struct kvm_sregs *kvm_sregs = NULL; 1381 struct kvm_sregs *kvm_sregs = NULL;
1382 1382
1383 if (vcpu->kvm->mm != current->mm) 1383 if (vcpu->kvm->mm != current->mm)
1384 return -EIO; 1384 return -EIO;
1385 switch (ioctl) { 1385 switch (ioctl) {
1386 case KVM_RUN: 1386 case KVM_RUN:
1387 r = -EINVAL; 1387 r = -EINVAL;
1388 if (arg) 1388 if (arg)
1389 goto out; 1389 goto out;
1390 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1390 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1391 break; 1391 break;
1392 case KVM_GET_REGS: { 1392 case KVM_GET_REGS: {
1393 struct kvm_regs *kvm_regs; 1393 struct kvm_regs *kvm_regs;
1394 1394
1395 r = -ENOMEM; 1395 r = -ENOMEM;
1396 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1396 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1397 if (!kvm_regs) 1397 if (!kvm_regs)
1398 goto out; 1398 goto out;
1399 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1399 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1400 if (r) 1400 if (r)
1401 goto out_free1; 1401 goto out_free1;
1402 r = -EFAULT; 1402 r = -EFAULT;
1403 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1403 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1404 goto out_free1; 1404 goto out_free1;
1405 r = 0; 1405 r = 0;
1406 out_free1: 1406 out_free1:
1407 kfree(kvm_regs); 1407 kfree(kvm_regs);
1408 break; 1408 break;
1409 } 1409 }
1410 case KVM_SET_REGS: { 1410 case KVM_SET_REGS: {
1411 struct kvm_regs *kvm_regs; 1411 struct kvm_regs *kvm_regs;
1412 1412
1413 r = -ENOMEM; 1413 r = -ENOMEM;
1414 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1414 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1415 if (!kvm_regs) 1415 if (!kvm_regs)
1416 goto out; 1416 goto out;
1417 r = -EFAULT; 1417 r = -EFAULT;
1418 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1418 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1419 goto out_free2; 1419 goto out_free2;
1420 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1420 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1421 if (r) 1421 if (r)
1422 goto out_free2; 1422 goto out_free2;
1423 r = 0; 1423 r = 0;
1424 out_free2: 1424 out_free2:
1425 kfree(kvm_regs); 1425 kfree(kvm_regs);
1426 break; 1426 break;
1427 } 1427 }
1428 case KVM_GET_SREGS: { 1428 case KVM_GET_SREGS: {
1429 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1429 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1430 r = -ENOMEM; 1430 r = -ENOMEM;
1431 if (!kvm_sregs) 1431 if (!kvm_sregs)
1432 goto out; 1432 goto out;
1433 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1433 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1434 if (r) 1434 if (r)
1435 goto out; 1435 goto out;
1436 r = -EFAULT; 1436 r = -EFAULT;
1437 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1437 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1438 goto out; 1438 goto out;
1439 r = 0; 1439 r = 0;
1440 break; 1440 break;
1441 } 1441 }
1442 case KVM_SET_SREGS: { 1442 case KVM_SET_SREGS: {
1443 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1443 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1444 r = -ENOMEM; 1444 r = -ENOMEM;
1445 if (!kvm_sregs) 1445 if (!kvm_sregs)
1446 goto out; 1446 goto out;
1447 r = -EFAULT; 1447 r = -EFAULT;
1448 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1448 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1449 goto out; 1449 goto out;
1450 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1450 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1451 if (r) 1451 if (r)
1452 goto out; 1452 goto out;
1453 r = 0; 1453 r = 0;
1454 break; 1454 break;
1455 } 1455 }
1456 case KVM_GET_MP_STATE: { 1456 case KVM_GET_MP_STATE: {
1457 struct kvm_mp_state mp_state; 1457 struct kvm_mp_state mp_state;
1458 1458
1459 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1459 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1460 if (r) 1460 if (r)
1461 goto out; 1461 goto out;
1462 r = -EFAULT; 1462 r = -EFAULT;
1463 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1463 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1464 goto out; 1464 goto out;
1465 r = 0; 1465 r = 0;
1466 break; 1466 break;
1467 } 1467 }
1468 case KVM_SET_MP_STATE: { 1468 case KVM_SET_MP_STATE: {
1469 struct kvm_mp_state mp_state; 1469 struct kvm_mp_state mp_state;
1470 1470
1471 r = -EFAULT; 1471 r = -EFAULT;
1472 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1472 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1473 goto out; 1473 goto out;
1474 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1474 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1475 if (r) 1475 if (r)
1476 goto out; 1476 goto out;
1477 r = 0; 1477 r = 0;
1478 break; 1478 break;
1479 } 1479 }
1480 case KVM_TRANSLATE: { 1480 case KVM_TRANSLATE: {
1481 struct kvm_translation tr; 1481 struct kvm_translation tr;
1482 1482
1483 r = -EFAULT; 1483 r = -EFAULT;
1484 if (copy_from_user(&tr, argp, sizeof tr)) 1484 if (copy_from_user(&tr, argp, sizeof tr))
1485 goto out; 1485 goto out;
1486 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1486 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1487 if (r) 1487 if (r)
1488 goto out; 1488 goto out;
1489 r = -EFAULT; 1489 r = -EFAULT;
1490 if (copy_to_user(argp, &tr, sizeof tr)) 1490 if (copy_to_user(argp, &tr, sizeof tr))
1491 goto out; 1491 goto out;
1492 r = 0; 1492 r = 0;
1493 break; 1493 break;
1494 } 1494 }
1495 case KVM_SET_GUEST_DEBUG: { 1495 case KVM_SET_GUEST_DEBUG: {
1496 struct kvm_guest_debug dbg; 1496 struct kvm_guest_debug dbg;
1497 1497
1498 r = -EFAULT; 1498 r = -EFAULT;
1499 if (copy_from_user(&dbg, argp, sizeof dbg)) 1499 if (copy_from_user(&dbg, argp, sizeof dbg))
1500 goto out; 1500 goto out;
1501 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1501 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1502 if (r) 1502 if (r)
1503 goto out; 1503 goto out;
1504 r = 0; 1504 r = 0;
1505 break; 1505 break;
1506 } 1506 }
1507 case KVM_SET_SIGNAL_MASK: { 1507 case KVM_SET_SIGNAL_MASK: {
1508 struct kvm_signal_mask __user *sigmask_arg = argp; 1508 struct kvm_signal_mask __user *sigmask_arg = argp;
1509 struct kvm_signal_mask kvm_sigmask; 1509 struct kvm_signal_mask kvm_sigmask;
1510 sigset_t sigset, *p; 1510 sigset_t sigset, *p;
1511 1511
1512 p = NULL; 1512 p = NULL;
1513 if (argp) { 1513 if (argp) {
1514 r = -EFAULT; 1514 r = -EFAULT;
1515 if (copy_from_user(&kvm_sigmask, argp, 1515 if (copy_from_user(&kvm_sigmask, argp,
1516 sizeof kvm_sigmask)) 1516 sizeof kvm_sigmask))
1517 goto out; 1517 goto out;
1518 r = -EINVAL; 1518 r = -EINVAL;
1519 if (kvm_sigmask.len != sizeof sigset) 1519 if (kvm_sigmask.len != sizeof sigset)
1520 goto out; 1520 goto out;
1521 r = -EFAULT; 1521 r = -EFAULT;
1522 if (copy_from_user(&sigset, sigmask_arg->sigset, 1522 if (copy_from_user(&sigset, sigmask_arg->sigset,
1523 sizeof sigset)) 1523 sizeof sigset))
1524 goto out; 1524 goto out;
1525 p = &sigset; 1525 p = &sigset;
1526 } 1526 }
1527 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1527 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1528 break; 1528 break;
1529 } 1529 }
1530 case KVM_GET_FPU: { 1530 case KVM_GET_FPU: {
1531 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1531 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1532 r = -ENOMEM; 1532 r = -ENOMEM;
1533 if (!fpu) 1533 if (!fpu)
1534 goto out; 1534 goto out;
1535 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1535 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1536 if (r) 1536 if (r)
1537 goto out; 1537 goto out;
1538 r = -EFAULT; 1538 r = -EFAULT;
1539 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1539 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1540 goto out; 1540 goto out;
1541 r = 0; 1541 r = 0;
1542 break; 1542 break;
1543 } 1543 }
1544 case KVM_SET_FPU: { 1544 case KVM_SET_FPU: {
1545 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1545 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1546 r = -ENOMEM; 1546 r = -ENOMEM;
1547 if (!fpu) 1547 if (!fpu)
1548 goto out; 1548 goto out;
1549 r = -EFAULT; 1549 r = -EFAULT;
1550 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1550 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1551 goto out; 1551 goto out;
1552 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1552 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1553 if (r) 1553 if (r)
1554 goto out; 1554 goto out;
1555 r = 0; 1555 r = 0;
1556 break; 1556 break;
1557 } 1557 }
1558 default: 1558 default:
1559 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1559 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1560 } 1560 }
1561 out: 1561 out:
1562 kfree(fpu); 1562 kfree(fpu);
1563 kfree(kvm_sregs); 1563 kfree(kvm_sregs);
1564 return r; 1564 return r;
1565 } 1565 }
1566 1566
1567 static long kvm_vm_ioctl(struct file *filp, 1567 static long kvm_vm_ioctl(struct file *filp,
1568 unsigned int ioctl, unsigned long arg) 1568 unsigned int ioctl, unsigned long arg)
1569 { 1569 {
1570 struct kvm *kvm = filp->private_data; 1570 struct kvm *kvm = filp->private_data;
1571 void __user *argp = (void __user *)arg; 1571 void __user *argp = (void __user *)arg;
1572 int r; 1572 int r;
1573 1573
1574 if (kvm->mm != current->mm) 1574 if (kvm->mm != current->mm)
1575 return -EIO; 1575 return -EIO;
1576 switch (ioctl) { 1576 switch (ioctl) {
1577 case KVM_CREATE_VCPU: 1577 case KVM_CREATE_VCPU:
1578 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1578 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1579 if (r < 0) 1579 if (r < 0)
1580 goto out; 1580 goto out;
1581 break; 1581 break;
1582 case KVM_SET_USER_MEMORY_REGION: { 1582 case KVM_SET_USER_MEMORY_REGION: {
1583 struct kvm_userspace_memory_region kvm_userspace_mem; 1583 struct kvm_userspace_memory_region kvm_userspace_mem;
1584 1584
1585 r = -EFAULT; 1585 r = -EFAULT;
1586 if (copy_from_user(&kvm_userspace_mem, argp, 1586 if (copy_from_user(&kvm_userspace_mem, argp,
1587 sizeof kvm_userspace_mem)) 1587 sizeof kvm_userspace_mem))
1588 goto out; 1588 goto out;
1589 1589
1590 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1590 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1591 if (r) 1591 if (r)
1592 goto out; 1592 goto out;
1593 break; 1593 break;
1594 } 1594 }
1595 case KVM_GET_DIRTY_LOG: { 1595 case KVM_GET_DIRTY_LOG: {
1596 struct kvm_dirty_log log; 1596 struct kvm_dirty_log log;
1597 1597
1598 r = -EFAULT; 1598 r = -EFAULT;
1599 if (copy_from_user(&log, argp, sizeof log)) 1599 if (copy_from_user(&log, argp, sizeof log))
1600 goto out; 1600 goto out;
1601 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1601 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1602 if (r) 1602 if (r)
1603 goto out; 1603 goto out;
1604 break; 1604 break;
1605 } 1605 }
1606 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1606 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1607 case KVM_REGISTER_COALESCED_MMIO: { 1607 case KVM_REGISTER_COALESCED_MMIO: {
1608 struct kvm_coalesced_mmio_zone zone; 1608 struct kvm_coalesced_mmio_zone zone;
1609 r = -EFAULT; 1609 r = -EFAULT;
1610 if (copy_from_user(&zone, argp, sizeof zone)) 1610 if (copy_from_user(&zone, argp, sizeof zone))
1611 goto out; 1611 goto out;
1612 r = -ENXIO; 1612 r = -ENXIO;
1613 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1613 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1614 if (r) 1614 if (r)
1615 goto out; 1615 goto out;
1616 r = 0; 1616 r = 0;
1617 break; 1617 break;
1618 } 1618 }
1619 case KVM_UNREGISTER_COALESCED_MMIO: { 1619 case KVM_UNREGISTER_COALESCED_MMIO: {
1620 struct kvm_coalesced_mmio_zone zone; 1620 struct kvm_coalesced_mmio_zone zone;
1621 r = -EFAULT; 1621 r = -EFAULT;
1622 if (copy_from_user(&zone, argp, sizeof zone)) 1622 if (copy_from_user(&zone, argp, sizeof zone))
1623 goto out; 1623 goto out;
1624 r = -ENXIO; 1624 r = -ENXIO;
1625 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1625 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1626 if (r) 1626 if (r)
1627 goto out; 1627 goto out;
1628 r = 0; 1628 r = 0;
1629 break; 1629 break;
1630 } 1630 }
1631 #endif 1631 #endif
1632 case KVM_IRQFD: { 1632 case KVM_IRQFD: {
1633 struct kvm_irqfd data; 1633 struct kvm_irqfd data;
1634 1634
1635 r = -EFAULT; 1635 r = -EFAULT;
1636 if (copy_from_user(&data, argp, sizeof data)) 1636 if (copy_from_user(&data, argp, sizeof data))
1637 goto out; 1637 goto out;
1638 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1638 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1639 break; 1639 break;
1640 } 1640 }
1641 case KVM_IOEVENTFD: { 1641 case KVM_IOEVENTFD: {
1642 struct kvm_ioeventfd data; 1642 struct kvm_ioeventfd data;
1643 1643
1644 r = -EFAULT; 1644 r = -EFAULT;
1645 if (copy_from_user(&data, argp, sizeof data)) 1645 if (copy_from_user(&data, argp, sizeof data))
1646 goto out; 1646 goto out;
1647 r = kvm_ioeventfd(kvm, &data); 1647 r = kvm_ioeventfd(kvm, &data);
1648 break; 1648 break;
1649 } 1649 }
1650 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1650 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1651 case KVM_SET_BOOT_CPU_ID: 1651 case KVM_SET_BOOT_CPU_ID:
1652 r = 0; 1652 r = 0;
1653 mutex_lock(&kvm->lock); 1653 mutex_lock(&kvm->lock);
1654 if (atomic_read(&kvm->online_vcpus) != 0) 1654 if (atomic_read(&kvm->online_vcpus) != 0)
1655 r = -EBUSY; 1655 r = -EBUSY;
1656 else 1656 else
1657 kvm->bsp_vcpu_id = arg; 1657 kvm->bsp_vcpu_id = arg;
1658 mutex_unlock(&kvm->lock); 1658 mutex_unlock(&kvm->lock);
1659 break; 1659 break;
1660 #endif 1660 #endif
1661 default: 1661 default:
1662 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1662 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1663 if (r == -ENOTTY) 1663 if (r == -ENOTTY)
1664 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1664 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1665 } 1665 }
1666 out: 1666 out:
1667 return r; 1667 return r;
1668 } 1668 }
1669 1669
1670 #ifdef CONFIG_COMPAT 1670 #ifdef CONFIG_COMPAT
1671 struct compat_kvm_dirty_log { 1671 struct compat_kvm_dirty_log {
1672 __u32 slot; 1672 __u32 slot;
1673 __u32 padding1; 1673 __u32 padding1;
1674 union { 1674 union {
1675 compat_uptr_t dirty_bitmap; /* one bit per page */ 1675 compat_uptr_t dirty_bitmap; /* one bit per page */
1676 __u64 padding2; 1676 __u64 padding2;
1677 }; 1677 };
1678 }; 1678 };
1679 1679
1680 static long kvm_vm_compat_ioctl(struct file *filp, 1680 static long kvm_vm_compat_ioctl(struct file *filp,
1681 unsigned int ioctl, unsigned long arg) 1681 unsigned int ioctl, unsigned long arg)
1682 { 1682 {
1683 struct kvm *kvm = filp->private_data; 1683 struct kvm *kvm = filp->private_data;
1684 int r; 1684 int r;
1685 1685
1686 if (kvm->mm != current->mm) 1686 if (kvm->mm != current->mm)
1687 return -EIO; 1687 return -EIO;
1688 switch (ioctl) { 1688 switch (ioctl) {
1689 case KVM_GET_DIRTY_LOG: { 1689 case KVM_GET_DIRTY_LOG: {
1690 struct compat_kvm_dirty_log compat_log; 1690 struct compat_kvm_dirty_log compat_log;
1691 struct kvm_dirty_log log; 1691 struct kvm_dirty_log log;
1692 1692
1693 r = -EFAULT; 1693 r = -EFAULT;
1694 if (copy_from_user(&compat_log, (void __user *)arg, 1694 if (copy_from_user(&compat_log, (void __user *)arg,
1695 sizeof(compat_log))) 1695 sizeof(compat_log)))
1696 goto out; 1696 goto out;
1697 log.slot = compat_log.slot; 1697 log.slot = compat_log.slot;
1698 log.padding1 = compat_log.padding1; 1698 log.padding1 = compat_log.padding1;
1699 log.padding2 = compat_log.padding2; 1699 log.padding2 = compat_log.padding2;
1700 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1700 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1701 1701
1702 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1702 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1703 if (r) 1703 if (r)
1704 goto out; 1704 goto out;
1705 break; 1705 break;
1706 } 1706 }
1707 default: 1707 default:
1708 r = kvm_vm_ioctl(filp, ioctl, arg); 1708 r = kvm_vm_ioctl(filp, ioctl, arg);
1709 } 1709 }
1710 1710
1711 out: 1711 out:
1712 return r; 1712 return r;
1713 } 1713 }
1714 #endif 1714 #endif
1715 1715
1716 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1716 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1717 { 1717 {
1718 struct page *page[1]; 1718 struct page *page[1];
1719 unsigned long addr; 1719 unsigned long addr;
1720 int npages; 1720 int npages;
1721 gfn_t gfn = vmf->pgoff; 1721 gfn_t gfn = vmf->pgoff;
1722 struct kvm *kvm = vma->vm_file->private_data; 1722 struct kvm *kvm = vma->vm_file->private_data;
1723 1723
1724 addr = gfn_to_hva(kvm, gfn); 1724 addr = gfn_to_hva(kvm, gfn);
1725 if (kvm_is_error_hva(addr)) 1725 if (kvm_is_error_hva(addr))
1726 return VM_FAULT_SIGBUS; 1726 return VM_FAULT_SIGBUS;
1727 1727
1728 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1728 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1729 NULL); 1729 NULL);
1730 if (unlikely(npages != 1)) 1730 if (unlikely(npages != 1))
1731 return VM_FAULT_SIGBUS; 1731 return VM_FAULT_SIGBUS;
1732 1732
1733 vmf->page = page[0]; 1733 vmf->page = page[0];
1734 return 0; 1734 return 0;
1735 } 1735 }
1736 1736
1737 static const struct vm_operations_struct kvm_vm_vm_ops = { 1737 static const struct vm_operations_struct kvm_vm_vm_ops = {
1738 .fault = kvm_vm_fault, 1738 .fault = kvm_vm_fault,
1739 }; 1739 };
1740 1740
1741 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1741 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1742 { 1742 {
1743 vma->vm_ops = &kvm_vm_vm_ops; 1743 vma->vm_ops = &kvm_vm_vm_ops;
1744 return 0; 1744 return 0;
1745 } 1745 }
1746 1746
1747 static struct file_operations kvm_vm_fops = { 1747 static struct file_operations kvm_vm_fops = {
1748 .release = kvm_vm_release, 1748 .release = kvm_vm_release,
1749 .unlocked_ioctl = kvm_vm_ioctl, 1749 .unlocked_ioctl = kvm_vm_ioctl,
1750 #ifdef CONFIG_COMPAT 1750 #ifdef CONFIG_COMPAT
1751 .compat_ioctl = kvm_vm_compat_ioctl, 1751 .compat_ioctl = kvm_vm_compat_ioctl,
1752 #endif 1752 #endif
1753 .mmap = kvm_vm_mmap, 1753 .mmap = kvm_vm_mmap,
1754 }; 1754 };
1755 1755
1756 static int kvm_dev_ioctl_create_vm(void) 1756 static int kvm_dev_ioctl_create_vm(void)
1757 { 1757 {
1758 int fd; 1758 int fd;
1759 struct kvm *kvm; 1759 struct kvm *kvm;
1760 1760
1761 kvm = kvm_create_vm(); 1761 kvm = kvm_create_vm();
1762 if (IS_ERR(kvm)) 1762 if (IS_ERR(kvm))
1763 return PTR_ERR(kvm); 1763 return PTR_ERR(kvm);
1764 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 1764 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
1765 if (fd < 0) 1765 if (fd < 0)
1766 kvm_put_kvm(kvm); 1766 kvm_put_kvm(kvm);
1767 1767
1768 return fd; 1768 return fd;
1769 } 1769 }
1770 1770
1771 static long kvm_dev_ioctl_check_extension_generic(long arg) 1771 static long kvm_dev_ioctl_check_extension_generic(long arg)
1772 { 1772 {
1773 switch (arg) { 1773 switch (arg) {
1774 case KVM_CAP_USER_MEMORY: 1774 case KVM_CAP_USER_MEMORY:
1775 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1775 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1776 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 1776 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
1777 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1777 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1778 case KVM_CAP_SET_BOOT_CPU_ID: 1778 case KVM_CAP_SET_BOOT_CPU_ID:
1779 #endif 1779 #endif
1780 case KVM_CAP_INTERNAL_ERROR_DATA: 1780 case KVM_CAP_INTERNAL_ERROR_DATA:
1781 return 1; 1781 return 1;
1782 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1782 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1783 case KVM_CAP_IRQ_ROUTING: 1783 case KVM_CAP_IRQ_ROUTING:
1784 return KVM_MAX_IRQ_ROUTES; 1784 return KVM_MAX_IRQ_ROUTES;
1785 #endif 1785 #endif
1786 default: 1786 default:
1787 break; 1787 break;
1788 } 1788 }
1789 return kvm_dev_ioctl_check_extension(arg); 1789 return kvm_dev_ioctl_check_extension(arg);
1790 } 1790 }
1791 1791
1792 static long kvm_dev_ioctl(struct file *filp, 1792 static long kvm_dev_ioctl(struct file *filp,
1793 unsigned int ioctl, unsigned long arg) 1793 unsigned int ioctl, unsigned long arg)
1794 { 1794 {
1795 long r = -EINVAL; 1795 long r = -EINVAL;
1796 1796
1797 switch (ioctl) { 1797 switch (ioctl) {
1798 case KVM_GET_API_VERSION: 1798 case KVM_GET_API_VERSION:
1799 r = -EINVAL; 1799 r = -EINVAL;
1800 if (arg) 1800 if (arg)
1801 goto out; 1801 goto out;
1802 r = KVM_API_VERSION; 1802 r = KVM_API_VERSION;
1803 break; 1803 break;
1804 case KVM_CREATE_VM: 1804 case KVM_CREATE_VM:
1805 r = -EINVAL; 1805 r = -EINVAL;
1806 if (arg) 1806 if (arg)
1807 goto out; 1807 goto out;
1808 r = kvm_dev_ioctl_create_vm(); 1808 r = kvm_dev_ioctl_create_vm();
1809 break; 1809 break;
1810 case KVM_CHECK_EXTENSION: 1810 case KVM_CHECK_EXTENSION:
1811 r = kvm_dev_ioctl_check_extension_generic(arg); 1811 r = kvm_dev_ioctl_check_extension_generic(arg);
1812 break; 1812 break;
1813 case KVM_GET_VCPU_MMAP_SIZE: 1813 case KVM_GET_VCPU_MMAP_SIZE:
1814 r = -EINVAL; 1814 r = -EINVAL;
1815 if (arg) 1815 if (arg)
1816 goto out; 1816 goto out;
1817 r = PAGE_SIZE; /* struct kvm_run */ 1817 r = PAGE_SIZE; /* struct kvm_run */
1818 #ifdef CONFIG_X86 1818 #ifdef CONFIG_X86
1819 r += PAGE_SIZE; /* pio data page */ 1819 r += PAGE_SIZE; /* pio data page */
1820 #endif 1820 #endif
1821 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1821 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1822 r += PAGE_SIZE; /* coalesced mmio ring page */ 1822 r += PAGE_SIZE; /* coalesced mmio ring page */
1823 #endif 1823 #endif
1824 break; 1824 break;
1825 case KVM_TRACE_ENABLE: 1825 case KVM_TRACE_ENABLE:
1826 case KVM_TRACE_PAUSE: 1826 case KVM_TRACE_PAUSE:
1827 case KVM_TRACE_DISABLE: 1827 case KVM_TRACE_DISABLE:
1828 r = -EOPNOTSUPP; 1828 r = -EOPNOTSUPP;
1829 break; 1829 break;
1830 default: 1830 default:
1831 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1831 return kvm_arch_dev_ioctl(filp, ioctl, arg);
1832 } 1832 }
1833 out: 1833 out:
1834 return r; 1834 return r;
1835 } 1835 }
1836 1836
1837 static struct file_operations kvm_chardev_ops = { 1837 static struct file_operations kvm_chardev_ops = {
1838 .unlocked_ioctl = kvm_dev_ioctl, 1838 .unlocked_ioctl = kvm_dev_ioctl,
1839 .compat_ioctl = kvm_dev_ioctl, 1839 .compat_ioctl = kvm_dev_ioctl,
1840 }; 1840 };
1841 1841
1842 static struct miscdevice kvm_dev = { 1842 static struct miscdevice kvm_dev = {
1843 KVM_MINOR, 1843 KVM_MINOR,
1844 "kvm", 1844 "kvm",
1845 &kvm_chardev_ops, 1845 &kvm_chardev_ops,
1846 }; 1846 };
1847 1847
1848 static void hardware_enable(void *junk) 1848 static void hardware_enable(void *junk)
1849 { 1849 {
1850 int cpu = raw_smp_processor_id(); 1850 int cpu = raw_smp_processor_id();
1851 int r; 1851 int r;
1852 1852
1853 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1853 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1854 return; 1854 return;
1855 1855
1856 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1856 cpumask_set_cpu(cpu, cpus_hardware_enabled);
1857 1857
1858 r = kvm_arch_hardware_enable(NULL); 1858 r = kvm_arch_hardware_enable(NULL);
1859 1859
1860 if (r) { 1860 if (r) {
1861 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1861 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1862 atomic_inc(&hardware_enable_failed); 1862 atomic_inc(&hardware_enable_failed);
1863 printk(KERN_INFO "kvm: enabling virtualization on " 1863 printk(KERN_INFO "kvm: enabling virtualization on "
1864 "CPU%d failed\n", cpu); 1864 "CPU%d failed\n", cpu);
1865 } 1865 }
1866 } 1866 }
1867 1867
1868 static void hardware_disable(void *junk) 1868 static void hardware_disable(void *junk)
1869 { 1869 {
1870 int cpu = raw_smp_processor_id(); 1870 int cpu = raw_smp_processor_id();
1871 1871
1872 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1872 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1873 return; 1873 return;
1874 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1874 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1875 kvm_arch_hardware_disable(NULL); 1875 kvm_arch_hardware_disable(NULL);
1876 } 1876 }
1877 1877
1878 static void hardware_disable_all_nolock(void) 1878 static void hardware_disable_all_nolock(void)
1879 { 1879 {
1880 BUG_ON(!kvm_usage_count); 1880 BUG_ON(!kvm_usage_count);
1881 1881
1882 kvm_usage_count--; 1882 kvm_usage_count--;
1883 if (!kvm_usage_count) 1883 if (!kvm_usage_count)
1884 on_each_cpu(hardware_disable, NULL, 1); 1884 on_each_cpu(hardware_disable, NULL, 1);
1885 } 1885 }
1886 1886
1887 static void hardware_disable_all(void) 1887 static void hardware_disable_all(void)
1888 { 1888 {
1889 spin_lock(&kvm_lock); 1889 spin_lock(&kvm_lock);
1890 hardware_disable_all_nolock(); 1890 hardware_disable_all_nolock();
1891 spin_unlock(&kvm_lock); 1891 spin_unlock(&kvm_lock);
1892 } 1892 }
1893 1893
1894 static int hardware_enable_all(void) 1894 static int hardware_enable_all(void)
1895 { 1895 {
1896 int r = 0; 1896 int r = 0;
1897 1897
1898 spin_lock(&kvm_lock); 1898 spin_lock(&kvm_lock);
1899 1899
1900 kvm_usage_count++; 1900 kvm_usage_count++;
1901 if (kvm_usage_count == 1) { 1901 if (kvm_usage_count == 1) {
1902 atomic_set(&hardware_enable_failed, 0); 1902 atomic_set(&hardware_enable_failed, 0);
1903 on_each_cpu(hardware_enable, NULL, 1); 1903 on_each_cpu(hardware_enable, NULL, 1);
1904 1904
1905 if (atomic_read(&hardware_enable_failed)) { 1905 if (atomic_read(&hardware_enable_failed)) {
1906 hardware_disable_all_nolock(); 1906 hardware_disable_all_nolock();
1907 r = -EBUSY; 1907 r = -EBUSY;
1908 } 1908 }
1909 } 1909 }
1910 1910
1911 spin_unlock(&kvm_lock); 1911 spin_unlock(&kvm_lock);
1912 1912
1913 return r; 1913 return r;
1914 } 1914 }
1915 1915
1916 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1916 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1917 void *v) 1917 void *v)
1918 { 1918 {
1919 int cpu = (long)v; 1919 int cpu = (long)v;
1920 1920
1921 if (!kvm_usage_count) 1921 if (!kvm_usage_count)
1922 return NOTIFY_OK; 1922 return NOTIFY_OK;
1923 1923
1924 val &= ~CPU_TASKS_FROZEN; 1924 val &= ~CPU_TASKS_FROZEN;
1925 switch (val) { 1925 switch (val) {
1926 case CPU_DYING: 1926 case CPU_DYING:
1927 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1927 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1928 cpu); 1928 cpu);
1929 hardware_disable(NULL); 1929 hardware_disable(NULL);
1930 break; 1930 break;
1931 case CPU_UP_CANCELED: 1931 case CPU_UP_CANCELED:
1932 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1932 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1933 cpu); 1933 cpu);
1934 smp_call_function_single(cpu, hardware_disable, NULL, 1); 1934 smp_call_function_single(cpu, hardware_disable, NULL, 1);
1935 break; 1935 break;
1936 case CPU_ONLINE: 1936 case CPU_ONLINE:
1937 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1937 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1938 cpu); 1938 cpu);
1939 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1939 smp_call_function_single(cpu, hardware_enable, NULL, 1);
1940 break; 1940 break;
1941 } 1941 }
1942 return NOTIFY_OK; 1942 return NOTIFY_OK;
1943 } 1943 }
1944 1944
1945 1945
1946 asmlinkage void kvm_handle_fault_on_reboot(void) 1946 asmlinkage void kvm_handle_fault_on_reboot(void)
1947 { 1947 {
1948 if (kvm_rebooting) 1948 if (kvm_rebooting)
1949 /* spin while reset goes on */ 1949 /* spin while reset goes on */
1950 while (true) 1950 while (true)
1951 ; 1951 ;
1952 /* Fault while not rebooting. We want the trace. */ 1952 /* Fault while not rebooting. We want the trace. */
1953 BUG(); 1953 BUG();
1954 } 1954 }
1955 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 1955 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1956 1956
1957 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 1957 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1958 void *v) 1958 void *v)
1959 { 1959 {
1960 /* 1960 /*
1961 * Some (well, at least mine) BIOSes hang on reboot if 1961 * Some (well, at least mine) BIOSes hang on reboot if
1962 * in vmx root mode. 1962 * in vmx root mode.
1963 * 1963 *
1964 * And Intel TXT required VMX off for all cpu when system shutdown. 1964 * And Intel TXT required VMX off for all cpu when system shutdown.
1965 */ 1965 */
1966 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 1966 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1967 kvm_rebooting = true; 1967 kvm_rebooting = true;
1968 on_each_cpu(hardware_disable, NULL, 1); 1968 on_each_cpu(hardware_disable, NULL, 1);
1969 return NOTIFY_OK; 1969 return NOTIFY_OK;
1970 } 1970 }
1971 1971
1972 static struct notifier_block kvm_reboot_notifier = { 1972 static struct notifier_block kvm_reboot_notifier = {
1973 .notifier_call = kvm_reboot, 1973 .notifier_call = kvm_reboot,
1974 .priority = 0, 1974 .priority = 0,
1975 }; 1975 };
1976 1976
1977 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 1977 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1978 { 1978 {
1979 int i; 1979 int i;
1980 1980
1981 for (i = 0; i < bus->dev_count; i++) { 1981 for (i = 0; i < bus->dev_count; i++) {
1982 struct kvm_io_device *pos = bus->devs[i]; 1982 struct kvm_io_device *pos = bus->devs[i];
1983 1983
1984 kvm_iodevice_destructor(pos); 1984 kvm_iodevice_destructor(pos);
1985 } 1985 }
1986 kfree(bus); 1986 kfree(bus);
1987 } 1987 }
1988 1988
1989 /* kvm_io_bus_write - called under kvm->slots_lock */ 1989 /* kvm_io_bus_write - called under kvm->slots_lock */
1990 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 1990 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
1991 int len, const void *val) 1991 int len, const void *val)
1992 { 1992 {
1993 int i; 1993 int i;
1994 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); 1994 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
1995 for (i = 0; i < bus->dev_count; i++) 1995 for (i = 0; i < bus->dev_count; i++)
1996 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 1996 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
1997 return 0; 1997 return 0;
1998 return -EOPNOTSUPP; 1998 return -EOPNOTSUPP;
1999 } 1999 }
2000 2000
2001 /* kvm_io_bus_read - called under kvm->slots_lock */ 2001 /* kvm_io_bus_read - called under kvm->slots_lock */
2002 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2002 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2003 int len, void *val) 2003 int len, void *val)
2004 { 2004 {
2005 int i; 2005 int i;
2006 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); 2006 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
2007 2007
2008 for (i = 0; i < bus->dev_count; i++) 2008 for (i = 0; i < bus->dev_count; i++)
2009 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2009 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2010 return 0; 2010 return 0;
2011 return -EOPNOTSUPP; 2011 return -EOPNOTSUPP;
2012 } 2012 }
2013 2013
2014 /* Caller must hold slots_lock. */ 2014 /* Caller must hold slots_lock. */
2015 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2015 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2016 struct kvm_io_device *dev) 2016 struct kvm_io_device *dev)
2017 { 2017 {
2018 struct kvm_io_bus *new_bus, *bus; 2018 struct kvm_io_bus *new_bus, *bus;
2019 2019
2020 bus = kvm->buses[bus_idx]; 2020 bus = kvm->buses[bus_idx];
2021 if (bus->dev_count > NR_IOBUS_DEVS-1) 2021 if (bus->dev_count > NR_IOBUS_DEVS-1)
2022 return -ENOSPC; 2022 return -ENOSPC;
2023 2023
2024 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2024 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2025 if (!new_bus) 2025 if (!new_bus)
2026 return -ENOMEM; 2026 return -ENOMEM;
2027 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2027 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2028 new_bus->devs[new_bus->dev_count++] = dev; 2028 new_bus->devs[new_bus->dev_count++] = dev;
2029 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2029 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2030 synchronize_srcu_expedited(&kvm->srcu); 2030 synchronize_srcu_expedited(&kvm->srcu);
2031 kfree(bus); 2031 kfree(bus);
2032 2032
2033 return 0; 2033 return 0;
2034 } 2034 }
2035 2035
2036 /* Caller must hold slots_lock. */ 2036 /* Caller must hold slots_lock. */
2037 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2037 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2038 struct kvm_io_device *dev) 2038 struct kvm_io_device *dev)
2039 { 2039 {
2040 int i, r; 2040 int i, r;
2041 struct kvm_io_bus *new_bus, *bus; 2041 struct kvm_io_bus *new_bus, *bus;
2042 2042
2043 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2043 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2044 if (!new_bus) 2044 if (!new_bus)
2045 return -ENOMEM; 2045 return -ENOMEM;
2046 2046
2047 bus = kvm->buses[bus_idx]; 2047 bus = kvm->buses[bus_idx];
2048 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2048 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2049 2049
2050 r = -ENOENT; 2050 r = -ENOENT;
2051 for (i = 0; i < new_bus->dev_count; i++) 2051 for (i = 0; i < new_bus->dev_count; i++)
2052 if (new_bus->devs[i] == dev) { 2052 if (new_bus->devs[i] == dev) {
2053 r = 0; 2053 r = 0;
2054 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2054 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
2055 break; 2055 break;
2056 } 2056 }
2057 2057
2058 if (r) { 2058 if (r) {
2059 kfree(new_bus); 2059 kfree(new_bus);
2060 return r; 2060 return r;
2061 } 2061 }
2062 2062
2063 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2063 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2064 synchronize_srcu_expedited(&kvm->srcu); 2064 synchronize_srcu_expedited(&kvm->srcu);
2065 kfree(bus); 2065 kfree(bus);
2066 return r; 2066 return r;
2067 } 2067 }
2068 2068
2069 static struct notifier_block kvm_cpu_notifier = { 2069 static struct notifier_block kvm_cpu_notifier = {
2070 .notifier_call = kvm_cpu_hotplug, 2070 .notifier_call = kvm_cpu_hotplug,
2071 .priority = 20, /* must be > scheduler priority */ 2071 .priority = 20, /* must be > scheduler priority */
2072 }; 2072 };
2073 2073
2074 static int vm_stat_get(void *_offset, u64 *val) 2074 static int vm_stat_get(void *_offset, u64 *val)
2075 { 2075 {
2076 unsigned offset = (long)_offset; 2076 unsigned offset = (long)_offset;
2077 struct kvm *kvm; 2077 struct kvm *kvm;
2078 2078
2079 *val = 0; 2079 *val = 0;
2080 spin_lock(&kvm_lock); 2080 spin_lock(&kvm_lock);
2081 list_for_each_entry(kvm, &vm_list, vm_list) 2081 list_for_each_entry(kvm, &vm_list, vm_list)
2082 *val += *(u32 *)((void *)kvm + offset); 2082 *val += *(u32 *)((void *)kvm + offset);
2083 spin_unlock(&kvm_lock); 2083 spin_unlock(&kvm_lock);
2084 return 0; 2084 return 0;
2085 } 2085 }
2086 2086
2087 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2087 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2088 2088
2089 static int vcpu_stat_get(void *_offset, u64 *val) 2089 static int vcpu_stat_get(void *_offset, u64 *val)
2090 { 2090 {
2091 unsigned offset = (long)_offset; 2091 unsigned offset = (long)_offset;
2092 struct kvm *kvm; 2092 struct kvm *kvm;
2093 struct kvm_vcpu *vcpu; 2093 struct kvm_vcpu *vcpu;
2094 int i; 2094 int i;
2095 2095
2096 *val = 0; 2096 *val = 0;
2097 spin_lock(&kvm_lock); 2097 spin_lock(&kvm_lock);
2098 list_for_each_entry(kvm, &vm_list, vm_list) 2098 list_for_each_entry(kvm, &vm_list, vm_list)
2099 kvm_for_each_vcpu(i, vcpu, kvm) 2099 kvm_for_each_vcpu(i, vcpu, kvm)
2100 *val += *(u32 *)((void *)vcpu + offset); 2100 *val += *(u32 *)((void *)vcpu + offset);
2101 2101
2102 spin_unlock(&kvm_lock); 2102 spin_unlock(&kvm_lock);
2103 return 0; 2103 return 0;
2104 } 2104 }
2105 2105
2106 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2106 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2107 2107
2108 static const struct file_operations *stat_fops[] = { 2108 static const struct file_operations *stat_fops[] = {
2109 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2109 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2110 [KVM_STAT_VM] = &vm_stat_fops, 2110 [KVM_STAT_VM] = &vm_stat_fops,
2111 }; 2111 };
2112 2112
2113 static void kvm_init_debug(void) 2113 static void kvm_init_debug(void)
2114 { 2114 {
2115 struct kvm_stats_debugfs_item *p; 2115 struct kvm_stats_debugfs_item *p;
2116 2116
2117 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2117 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2118 for (p = debugfs_entries; p->name; ++p) 2118 for (p = debugfs_entries; p->name; ++p)
2119 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2119 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2120 (void *)(long)p->offset, 2120 (void *)(long)p->offset,
2121 stat_fops[p->kind]); 2121 stat_fops[p->kind]);
2122 } 2122 }
2123 2123
2124 static void kvm_exit_debug(void) 2124 static void kvm_exit_debug(void)
2125 { 2125 {
2126 struct kvm_stats_debugfs_item *p; 2126 struct kvm_stats_debugfs_item *p;
2127 2127
2128 for (p = debugfs_entries; p->name; ++p) 2128 for (p = debugfs_entries; p->name; ++p)
2129 debugfs_remove(p->dentry); 2129 debugfs_remove(p->dentry);
2130 debugfs_remove(kvm_debugfs_dir); 2130 debugfs_remove(kvm_debugfs_dir);
2131 } 2131 }
2132 2132
2133 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2133 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2134 { 2134 {
2135 if (kvm_usage_count) 2135 if (kvm_usage_count)
2136 hardware_disable(NULL); 2136 hardware_disable(NULL);
2137 return 0; 2137 return 0;
2138 } 2138 }
2139 2139
2140 static int kvm_resume(struct sys_device *dev) 2140 static int kvm_resume(struct sys_device *dev)
2141 { 2141 {
2142 if (kvm_usage_count) 2142 if (kvm_usage_count)
2143 hardware_enable(NULL); 2143 hardware_enable(NULL);
2144 return 0; 2144 return 0;
2145 } 2145 }
2146 2146
2147 static struct sysdev_class kvm_sysdev_class = { 2147 static struct sysdev_class kvm_sysdev_class = {
2148 .name = "kvm", 2148 .name = "kvm",
2149 .suspend = kvm_suspend, 2149 .suspend = kvm_suspend,
2150 .resume = kvm_resume, 2150 .resume = kvm_resume,
2151 }; 2151 };
2152 2152
2153 static struct sys_device kvm_sysdev = { 2153 static struct sys_device kvm_sysdev = {
2154 .id = 0, 2154 .id = 0,
2155 .cls = &kvm_sysdev_class, 2155 .cls = &kvm_sysdev_class,
2156 }; 2156 };
2157 2157
2158 struct page *bad_page; 2158 struct page *bad_page;
2159 pfn_t bad_pfn; 2159 pfn_t bad_pfn;
2160 2160
2161 static inline 2161 static inline
2162 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2162 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2163 { 2163 {
2164 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2164 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2165 } 2165 }
2166 2166
2167 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2167 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2168 { 2168 {
2169 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2169 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2170 2170
2171 kvm_arch_vcpu_load(vcpu, cpu); 2171 kvm_arch_vcpu_load(vcpu, cpu);
2172 } 2172 }
2173 2173
2174 static void kvm_sched_out(struct preempt_notifier *pn, 2174 static void kvm_sched_out(struct preempt_notifier *pn,
2175 struct task_struct *next) 2175 struct task_struct *next)
2176 { 2176 {
2177 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2177 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2178 2178
2179 kvm_arch_vcpu_put(vcpu); 2179 kvm_arch_vcpu_put(vcpu);
2180 } 2180 }
2181 2181
2182 int kvm_init(void *opaque, unsigned int vcpu_size, 2182 int kvm_init(void *opaque, unsigned int vcpu_size,
2183 struct module *module) 2183 struct module *module)
2184 { 2184 {
2185 int r; 2185 int r;
2186 int cpu; 2186 int cpu;
2187 2187
2188 r = kvm_arch_init(opaque); 2188 r = kvm_arch_init(opaque);
2189 if (r) 2189 if (r)
2190 goto out_fail; 2190 goto out_fail;
2191 2191
2192 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2192 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2193 2193
2194 if (bad_page == NULL) { 2194 if (bad_page == NULL) {
2195 r = -ENOMEM; 2195 r = -ENOMEM;
2196 goto out; 2196 goto out;
2197 } 2197 }
2198 2198
2199 bad_pfn = page_to_pfn(bad_page); 2199 bad_pfn = page_to_pfn(bad_page);
2200 2200
2201 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2201 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2202 r = -ENOMEM; 2202 r = -ENOMEM;
2203 goto out_free_0; 2203 goto out_free_0;
2204 } 2204 }
2205 2205
2206 r = kvm_arch_hardware_setup(); 2206 r = kvm_arch_hardware_setup();
2207 if (r < 0) 2207 if (r < 0)
2208 goto out_free_0a; 2208 goto out_free_0a;
2209 2209
2210 for_each_online_cpu(cpu) { 2210 for_each_online_cpu(cpu) {
2211 smp_call_function_single(cpu, 2211 smp_call_function_single(cpu,
2212 kvm_arch_check_processor_compat, 2212 kvm_arch_check_processor_compat,
2213 &r, 1); 2213 &r, 1);
2214 if (r < 0) 2214 if (r < 0)
2215 goto out_free_1; 2215 goto out_free_1;
2216 } 2216 }
2217 2217
2218 r = register_cpu_notifier(&kvm_cpu_notifier); 2218 r = register_cpu_notifier(&kvm_cpu_notifier);
2219 if (r) 2219 if (r)
2220 goto out_free_2; 2220 goto out_free_2;
2221 register_reboot_notifier(&kvm_reboot_notifier); 2221 register_reboot_notifier(&kvm_reboot_notifier);
2222 2222
2223 r = sysdev_class_register(&kvm_sysdev_class); 2223 r = sysdev_class_register(&kvm_sysdev_class);
2224 if (r) 2224 if (r)
2225 goto out_free_3; 2225 goto out_free_3;
2226 2226
2227 r = sysdev_register(&kvm_sysdev); 2227 r = sysdev_register(&kvm_sysdev);
2228 if (r) 2228 if (r)
2229 goto out_free_4; 2229 goto out_free_4;
2230 2230
2231 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2231 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2232 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, 2232 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2233 __alignof__(struct kvm_vcpu), 2233 __alignof__(struct kvm_vcpu),
2234 0, NULL); 2234 0, NULL);
2235 if (!kvm_vcpu_cache) { 2235 if (!kvm_vcpu_cache) {
2236 r = -ENOMEM; 2236 r = -ENOMEM;
2237 goto out_free_5; 2237 goto out_free_5;
2238 } 2238 }
2239 2239
2240 kvm_chardev_ops.owner = module; 2240 kvm_chardev_ops.owner = module;
2241 kvm_vm_fops.owner = module; 2241 kvm_vm_fops.owner = module;
2242 kvm_vcpu_fops.owner = module; 2242 kvm_vcpu_fops.owner = module;
2243 2243
2244 r = misc_register(&kvm_dev); 2244 r = misc_register(&kvm_dev);
2245 if (r) { 2245 if (r) {
2246 printk(KERN_ERR "kvm: misc device register failed\n"); 2246 printk(KERN_ERR "kvm: misc device register failed\n");
2247 goto out_free; 2247 goto out_free;
2248 } 2248 }
2249 2249
2250 kvm_preempt_ops.sched_in = kvm_sched_in; 2250 kvm_preempt_ops.sched_in = kvm_sched_in;
2251 kvm_preempt_ops.sched_out = kvm_sched_out; 2251 kvm_preempt_ops.sched_out = kvm_sched_out;
2252 2252
2253 kvm_init_debug(); 2253 kvm_init_debug();
2254 2254
2255 return 0; 2255 return 0;
2256 2256
2257 out_free: 2257 out_free:
2258 kmem_cache_destroy(kvm_vcpu_cache); 2258 kmem_cache_destroy(kvm_vcpu_cache);
2259 out_free_5: 2259 out_free_5:
2260 sysdev_unregister(&kvm_sysdev); 2260 sysdev_unregister(&kvm_sysdev);
2261 out_free_4: 2261 out_free_4:
2262 sysdev_class_unregister(&kvm_sysdev_class); 2262 sysdev_class_unregister(&kvm_sysdev_class);
2263 out_free_3: 2263 out_free_3:
2264 unregister_reboot_notifier(&kvm_reboot_notifier); 2264 unregister_reboot_notifier(&kvm_reboot_notifier);
2265 unregister_cpu_notifier(&kvm_cpu_notifier); 2265 unregister_cpu_notifier(&kvm_cpu_notifier);
2266 out_free_2: 2266 out_free_2:
2267 out_free_1: 2267 out_free_1:
2268 kvm_arch_hardware_unsetup(); 2268 kvm_arch_hardware_unsetup();
2269 out_free_0a: 2269 out_free_0a:
2270 free_cpumask_var(cpus_hardware_enabled); 2270 free_cpumask_var(cpus_hardware_enabled);
2271 out_free_0: 2271 out_free_0:
2272 __free_page(bad_page); 2272 __free_page(bad_page);
2273 out: 2273 out:
2274 kvm_arch_exit(); 2274 kvm_arch_exit();
2275 out_fail: 2275 out_fail:
2276 return r; 2276 return r;
2277 } 2277 }
2278 EXPORT_SYMBOL_GPL(kvm_init); 2278 EXPORT_SYMBOL_GPL(kvm_init);
2279 2279
2280 void kvm_exit(void) 2280 void kvm_exit(void)
2281 { 2281 {
2282 tracepoint_synchronize_unregister(); 2282 tracepoint_synchronize_unregister();
2283 kvm_exit_debug(); 2283 kvm_exit_debug();
2284 misc_deregister(&kvm_dev); 2284 misc_deregister(&kvm_dev);
2285 kmem_cache_destroy(kvm_vcpu_cache); 2285 kmem_cache_destroy(kvm_vcpu_cache);
2286 sysdev_unregister(&kvm_sysdev); 2286 sysdev_unregister(&kvm_sysdev);
2287 sysdev_class_unregister(&kvm_sysdev_class); 2287 sysdev_class_unregister(&kvm_sysdev_class);
2288 unregister_reboot_notifier(&kvm_reboot_notifier); 2288 unregister_reboot_notifier(&kvm_reboot_notifier);
2289 unregister_cpu_notifier(&kvm_cpu_notifier); 2289 unregister_cpu_notifier(&kvm_cpu_notifier);
2290 on_each_cpu(hardware_disable, NULL, 1); 2290 on_each_cpu(hardware_disable, NULL, 1);
2291 kvm_arch_hardware_unsetup(); 2291 kvm_arch_hardware_unsetup();
2292 kvm_arch_exit(); 2292 kvm_arch_exit();