Commit 73880c80aa9c8dc353cd0ad26579023213cd5314
Committed by
Avi Kivity
1 parent
1ed0ce000a
Exists in
master
and in
7 other branches
KVM: Break dependency between vcpu index in vcpus array and vcpu_id.
Archs are free to use vcpu_id as they see fit. For x86 it is used as vcpu's apic id. New ioctl is added to configure boot vcpu id that was assumed to be 0 till now. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 9 changed files with 55 additions and 33 deletions Inline Diff
arch/ia64/include/asm/kvm_host.h
1 | /* | 1 | /* |
2 | * kvm_host.h: used for kvm module, and hold ia64-specific sections. | 2 | * kvm_host.h: used for kvm module, and hold ia64-specific sections. |
3 | * | 3 | * |
4 | * Copyright (C) 2007, Intel Corporation. | 4 | * Copyright (C) 2007, Intel Corporation. |
5 | * | 5 | * |
6 | * Xiantao Zhang <xiantao.zhang@intel.com> | 6 | * Xiantao Zhang <xiantao.zhang@intel.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms and conditions of the GNU General Public License, | 9 | * under the terms and conditions of the GNU General Public License, |
10 | * version 2, as published by the Free Software Foundation. | 10 | * version 2, as published by the Free Software Foundation. |
11 | * | 11 | * |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | 12 | * This program is distributed in the hope it will be useful, but WITHOUT |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
15 | * more details. | 15 | * more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License along with | 17 | * You should have received a copy of the GNU General Public License along with |
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 19 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #ifndef __ASM_KVM_HOST_H | 23 | #ifndef __ASM_KVM_HOST_H |
24 | #define __ASM_KVM_HOST_H | 24 | #define __ASM_KVM_HOST_H |
25 | 25 | ||
26 | #define KVM_MEMORY_SLOTS 32 | 26 | #define KVM_MEMORY_SLOTS 32 |
27 | /* memory slots that does not exposed to userspace */ | 27 | /* memory slots that does not exposed to userspace */ |
28 | #define KVM_PRIVATE_MEM_SLOTS 4 | 28 | #define KVM_PRIVATE_MEM_SLOTS 4 |
29 | 29 | ||
30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
31 | 31 | ||
32 | /* define exit reasons from vmm to kvm*/ | 32 | /* define exit reasons from vmm to kvm*/ |
33 | #define EXIT_REASON_VM_PANIC 0 | 33 | #define EXIT_REASON_VM_PANIC 0 |
34 | #define EXIT_REASON_MMIO_INSTRUCTION 1 | 34 | #define EXIT_REASON_MMIO_INSTRUCTION 1 |
35 | #define EXIT_REASON_PAL_CALL 2 | 35 | #define EXIT_REASON_PAL_CALL 2 |
36 | #define EXIT_REASON_SAL_CALL 3 | 36 | #define EXIT_REASON_SAL_CALL 3 |
37 | #define EXIT_REASON_SWITCH_RR6 4 | 37 | #define EXIT_REASON_SWITCH_RR6 4 |
38 | #define EXIT_REASON_VM_DESTROY 5 | 38 | #define EXIT_REASON_VM_DESTROY 5 |
39 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 | 39 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 |
40 | #define EXIT_REASON_IPI 7 | 40 | #define EXIT_REASON_IPI 7 |
41 | #define EXIT_REASON_PTC_G 8 | 41 | #define EXIT_REASON_PTC_G 8 |
42 | #define EXIT_REASON_DEBUG 20 | 42 | #define EXIT_REASON_DEBUG 20 |
43 | 43 | ||
44 | /*Define vmm address space and vm data space.*/ | 44 | /*Define vmm address space and vm data space.*/ |
45 | #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) | 45 | #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) |
46 | #define KVM_VMM_SHIFT 24 | 46 | #define KVM_VMM_SHIFT 24 |
47 | #define KVM_VMM_BASE 0xD000000000000000 | 47 | #define KVM_VMM_BASE 0xD000000000000000 |
48 | #define VMM_SIZE (__IA64_UL_CONST(8)<<20) | 48 | #define VMM_SIZE (__IA64_UL_CONST(8)<<20) |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * Define vm_buffer, used by PAL Services, base address. | 51 | * Define vm_buffer, used by PAL Services, base address. |
52 | * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M | 52 | * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M |
53 | */ | 53 | */ |
54 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) | 54 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) |
55 | #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) | 55 | #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * kvm guest's data area looks as follow: | 58 | * kvm guest's data area looks as follow: |
59 | * | 59 | * |
60 | * +----------------------+ ------- KVM_VM_DATA_SIZE | 60 | * +----------------------+ ------- KVM_VM_DATA_SIZE |
61 | * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET | 61 | * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET |
62 | * | | | / | | 62 | * | | | / | |
63 | * | .......... | | /vcpu's struct&stack | | 63 | * | .......... | | /vcpu's struct&stack | |
64 | * | .......... | | /---------------------|---- 0 | 64 | * | .......... | | /---------------------|---- 0 |
65 | * | vcpu[5]'s data | | / vpd | | 65 | * | vcpu[5]'s data | | / vpd | |
66 | * | vcpu[4]'s data | |/-----------------------| | 66 | * | vcpu[4]'s data | |/-----------------------| |
67 | * | vcpu[3]'s data | / vtlb | | 67 | * | vcpu[3]'s data | / vtlb | |
68 | * | vcpu[2]'s data | /|------------------------| | 68 | * | vcpu[2]'s data | /|------------------------| |
69 | * | vcpu[1]'s data |/ | vhpt | | 69 | * | vcpu[1]'s data |/ | vhpt | |
70 | * | vcpu[0]'s data |____________________________| | 70 | * | vcpu[0]'s data |____________________________| |
71 | * +----------------------+ | | 71 | * +----------------------+ | |
72 | * | memory dirty log | | | 72 | * | memory dirty log | | |
73 | * +----------------------+ | | 73 | * +----------------------+ | |
74 | * | vm's data struct | | | 74 | * | vm's data struct | | |
75 | * +----------------------+ | | 75 | * +----------------------+ | |
76 | * | | | | 76 | * | | | |
77 | * | | | | 77 | * | | | |
78 | * | | | | 78 | * | | | |
79 | * | | | | 79 | * | | | |
80 | * | | | | 80 | * | | | |
81 | * | | | | 81 | * | | | |
82 | * | | | | 82 | * | | | |
83 | * | vm's p2m table | | | 83 | * | vm's p2m table | | |
84 | * | | | | 84 | * | | | |
85 | * | | | | 85 | * | | | |
86 | * | | | | | 86 | * | | | | |
87 | * vm's data->| | | | | 87 | * vm's data->| | | | |
88 | * +----------------------+ ------- 0 | 88 | * +----------------------+ ------- 0 |
89 | * To support large memory, needs to increase the size of p2m. | 89 | * To support large memory, needs to increase the size of p2m. |
90 | * To support more vcpus, needs to ensure it has enough space to | 90 | * To support more vcpus, needs to ensure it has enough space to |
91 | * hold vcpus' data. | 91 | * hold vcpus' data. |
92 | */ | 92 | */ |
93 | 93 | ||
94 | #define KVM_VM_DATA_SHIFT 26 | 94 | #define KVM_VM_DATA_SHIFT 26 |
95 | #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) | 95 | #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) |
96 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) | 96 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) |
97 | 97 | ||
98 | #define KVM_P2M_BASE KVM_VM_DATA_BASE | 98 | #define KVM_P2M_BASE KVM_VM_DATA_BASE |
99 | #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) | 99 | #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) |
100 | 100 | ||
101 | #define VHPT_SHIFT 16 | 101 | #define VHPT_SHIFT 16 |
102 | #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) | 102 | #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) |
103 | #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) | 103 | #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) |
104 | 104 | ||
105 | #define VTLB_SHIFT 16 | 105 | #define VTLB_SHIFT 16 |
106 | #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) | 106 | #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) |
107 | #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) | 107 | #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) |
108 | 108 | ||
109 | #define VPD_SHIFT 16 | 109 | #define VPD_SHIFT 16 |
110 | #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) | 110 | #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) |
111 | 111 | ||
112 | #define VCPU_STRUCT_SHIFT 16 | 112 | #define VCPU_STRUCT_SHIFT 16 |
113 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) | 113 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h | 116 | * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h |
117 | */ | 117 | */ |
118 | #define KVM_STK_SHIFT 16 | 118 | #define KVM_STK_SHIFT 16 |
119 | #define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) | 119 | #define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) |
120 | 120 | ||
121 | #define KVM_VM_STRUCT_SHIFT 19 | 121 | #define KVM_VM_STRUCT_SHIFT 19 |
122 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) | 122 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) |
123 | 123 | ||
124 | #define KVM_MEM_DIRY_LOG_SHIFT 19 | 124 | #define KVM_MEM_DIRY_LOG_SHIFT 19 |
125 | #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) | 125 | #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) |
126 | 126 | ||
127 | #ifndef __ASSEMBLY__ | 127 | #ifndef __ASSEMBLY__ |
128 | 128 | ||
129 | /*Define the max vcpus and memory for Guests.*/ | 129 | /*Define the max vcpus and memory for Guests.*/ |
130 | #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ | 130 | #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ |
131 | KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) | 131 | KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) |
132 | #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) | 132 | #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) |
133 | 133 | ||
134 | #define VMM_LOG_LEN 256 | 134 | #define VMM_LOG_LEN 256 |
135 | 135 | ||
136 | #include <linux/types.h> | 136 | #include <linux/types.h> |
137 | #include <linux/mm.h> | 137 | #include <linux/mm.h> |
138 | #include <linux/kvm.h> | 138 | #include <linux/kvm.h> |
139 | #include <linux/kvm_para.h> | 139 | #include <linux/kvm_para.h> |
140 | #include <linux/kvm_types.h> | 140 | #include <linux/kvm_types.h> |
141 | 141 | ||
142 | #include <asm/pal.h> | 142 | #include <asm/pal.h> |
143 | #include <asm/sal.h> | 143 | #include <asm/sal.h> |
144 | #include <asm/page.h> | 144 | #include <asm/page.h> |
145 | 145 | ||
146 | struct kvm_vcpu_data { | 146 | struct kvm_vcpu_data { |
147 | char vcpu_vhpt[VHPT_SIZE]; | 147 | char vcpu_vhpt[VHPT_SIZE]; |
148 | char vcpu_vtlb[VTLB_SIZE]; | 148 | char vcpu_vtlb[VTLB_SIZE]; |
149 | char vcpu_vpd[VPD_SIZE]; | 149 | char vcpu_vpd[VPD_SIZE]; |
150 | char vcpu_struct[VCPU_STRUCT_SIZE]; | 150 | char vcpu_struct[VCPU_STRUCT_SIZE]; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kvm_vm_data { | 153 | struct kvm_vm_data { |
154 | char kvm_p2m[KVM_P2M_SIZE]; | 154 | char kvm_p2m[KVM_P2M_SIZE]; |
155 | char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; | 155 | char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; |
156 | char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; | 156 | char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; |
157 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; | 157 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ | 160 | #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ |
161 | offsetof(struct kvm_vm_data, vcpu_data[n])) | 161 | offsetof(struct kvm_vm_data, vcpu_data[n])) |
162 | #define KVM_VM_BASE (KVM_VM_DATA_BASE + \ | 162 | #define KVM_VM_BASE (KVM_VM_DATA_BASE + \ |
163 | offsetof(struct kvm_vm_data, kvm_vm_struct)) | 163 | offsetof(struct kvm_vm_data, kvm_vm_struct)) |
164 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ | 164 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ |
165 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) | 165 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) |
166 | 166 | ||
167 | #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) | 167 | #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) |
168 | #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) | 168 | #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) |
169 | #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) | 169 | #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) |
170 | #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ | 170 | #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ |
171 | offsetof(struct kvm_vcpu_data, vcpu_struct)) | 171 | offsetof(struct kvm_vcpu_data, vcpu_struct)) |
172 | 172 | ||
173 | /*IO section definitions*/ | 173 | /*IO section definitions*/ |
174 | #define IOREQ_READ 1 | 174 | #define IOREQ_READ 1 |
175 | #define IOREQ_WRITE 0 | 175 | #define IOREQ_WRITE 0 |
176 | 176 | ||
177 | #define STATE_IOREQ_NONE 0 | 177 | #define STATE_IOREQ_NONE 0 |
178 | #define STATE_IOREQ_READY 1 | 178 | #define STATE_IOREQ_READY 1 |
179 | #define STATE_IOREQ_INPROCESS 2 | 179 | #define STATE_IOREQ_INPROCESS 2 |
180 | #define STATE_IORESP_READY 3 | 180 | #define STATE_IORESP_READY 3 |
181 | 181 | ||
182 | /*Guest Physical address layout.*/ | 182 | /*Guest Physical address layout.*/ |
183 | #define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ | 183 | #define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ |
184 | #define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ | 184 | #define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ |
185 | #define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ | 185 | #define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ |
186 | #define GPFN_PIB (3UL << 60) /* PIB base */ | 186 | #define GPFN_PIB (3UL << 60) /* PIB base */ |
187 | #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ | 187 | #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ |
188 | #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ | 188 | #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ |
189 | #define GPFN_GFW (6UL << 60) /* Guest Firmware */ | 189 | #define GPFN_GFW (6UL << 60) /* Guest Firmware */ |
190 | #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ | 190 | #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ |
191 | 191 | ||
192 | #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ | 192 | #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ |
193 | #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ | 193 | #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ |
194 | #define INVALID_MFN (~0UL) | 194 | #define INVALID_MFN (~0UL) |
195 | #define MEM_G (1UL << 30) | 195 | #define MEM_G (1UL << 30) |
196 | #define MEM_M (1UL << 20) | 196 | #define MEM_M (1UL << 20) |
197 | #define MMIO_START (3 * MEM_G) | 197 | #define MMIO_START (3 * MEM_G) |
198 | #define MMIO_SIZE (512 * MEM_M) | 198 | #define MMIO_SIZE (512 * MEM_M) |
199 | #define VGA_IO_START 0xA0000UL | 199 | #define VGA_IO_START 0xA0000UL |
200 | #define VGA_IO_SIZE 0x20000 | 200 | #define VGA_IO_SIZE 0x20000 |
201 | #define LEGACY_IO_START (MMIO_START + MMIO_SIZE) | 201 | #define LEGACY_IO_START (MMIO_START + MMIO_SIZE) |
202 | #define LEGACY_IO_SIZE (64 * MEM_M) | 202 | #define LEGACY_IO_SIZE (64 * MEM_M) |
203 | #define IO_SAPIC_START 0xfec00000UL | 203 | #define IO_SAPIC_START 0xfec00000UL |
204 | #define IO_SAPIC_SIZE 0x100000 | 204 | #define IO_SAPIC_SIZE 0x100000 |
205 | #define PIB_START 0xfee00000UL | 205 | #define PIB_START 0xfee00000UL |
206 | #define PIB_SIZE 0x200000 | 206 | #define PIB_SIZE 0x200000 |
207 | #define GFW_START (4 * MEM_G - 16 * MEM_M) | 207 | #define GFW_START (4 * MEM_G - 16 * MEM_M) |
208 | #define GFW_SIZE (16 * MEM_M) | 208 | #define GFW_SIZE (16 * MEM_M) |
209 | 209 | ||
210 | /*Deliver mode, defined for ioapic.c*/ | 210 | /*Deliver mode, defined for ioapic.c*/ |
211 | #define dest_Fixed IOSAPIC_FIXED | 211 | #define dest_Fixed IOSAPIC_FIXED |
212 | #define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY | 212 | #define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY |
213 | 213 | ||
214 | #define NMI_VECTOR 2 | 214 | #define NMI_VECTOR 2 |
215 | #define ExtINT_VECTOR 0 | 215 | #define ExtINT_VECTOR 0 |
216 | #define NULL_VECTOR (-1) | 216 | #define NULL_VECTOR (-1) |
217 | #define IA64_SPURIOUS_INT_VECTOR 0x0f | 217 | #define IA64_SPURIOUS_INT_VECTOR 0x0f |
218 | 218 | ||
219 | #define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24) | 219 | #define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24) |
220 | 220 | ||
221 | /* | 221 | /* |
222 | *Delivery mode | 222 | *Delivery mode |
223 | */ | 223 | */ |
224 | #define SAPIC_DELIV_SHIFT 8 | 224 | #define SAPIC_DELIV_SHIFT 8 |
225 | #define SAPIC_FIXED 0x0 | 225 | #define SAPIC_FIXED 0x0 |
226 | #define SAPIC_LOWEST_PRIORITY 0x1 | 226 | #define SAPIC_LOWEST_PRIORITY 0x1 |
227 | #define SAPIC_PMI 0x2 | 227 | #define SAPIC_PMI 0x2 |
228 | #define SAPIC_NMI 0x4 | 228 | #define SAPIC_NMI 0x4 |
229 | #define SAPIC_INIT 0x5 | 229 | #define SAPIC_INIT 0x5 |
230 | #define SAPIC_EXTINT 0x7 | 230 | #define SAPIC_EXTINT 0x7 |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * vcpu->requests bit members for arch | 233 | * vcpu->requests bit members for arch |
234 | */ | 234 | */ |
235 | #define KVM_REQ_PTC_G 32 | 235 | #define KVM_REQ_PTC_G 32 |
236 | #define KVM_REQ_RESUME 33 | 236 | #define KVM_REQ_RESUME 33 |
237 | 237 | ||
238 | #define KVM_PAGES_PER_HPAGE 1 | 238 | #define KVM_PAGES_PER_HPAGE 1 |
239 | 239 | ||
240 | struct kvm; | 240 | struct kvm; |
241 | struct kvm_vcpu; | 241 | struct kvm_vcpu; |
242 | 242 | ||
243 | struct kvm_mmio_req { | 243 | struct kvm_mmio_req { |
244 | uint64_t addr; /* physical address */ | 244 | uint64_t addr; /* physical address */ |
245 | uint64_t size; /* size in bytes */ | 245 | uint64_t size; /* size in bytes */ |
246 | uint64_t data; /* data (or paddr of data) */ | 246 | uint64_t data; /* data (or paddr of data) */ |
247 | uint8_t state:4; | 247 | uint8_t state:4; |
248 | uint8_t dir:1; /* 1=read, 0=write */ | 248 | uint8_t dir:1; /* 1=read, 0=write */ |
249 | }; | 249 | }; |
250 | 250 | ||
251 | /*Pal data struct */ | 251 | /*Pal data struct */ |
252 | struct kvm_pal_call{ | 252 | struct kvm_pal_call{ |
253 | /*In area*/ | 253 | /*In area*/ |
254 | uint64_t gr28; | 254 | uint64_t gr28; |
255 | uint64_t gr29; | 255 | uint64_t gr29; |
256 | uint64_t gr30; | 256 | uint64_t gr30; |
257 | uint64_t gr31; | 257 | uint64_t gr31; |
258 | /*Out area*/ | 258 | /*Out area*/ |
259 | struct ia64_pal_retval ret; | 259 | struct ia64_pal_retval ret; |
260 | }; | 260 | }; |
261 | 261 | ||
262 | /* Sal data structure */ | 262 | /* Sal data structure */ |
263 | struct kvm_sal_call{ | 263 | struct kvm_sal_call{ |
264 | /*In area*/ | 264 | /*In area*/ |
265 | uint64_t in0; | 265 | uint64_t in0; |
266 | uint64_t in1; | 266 | uint64_t in1; |
267 | uint64_t in2; | 267 | uint64_t in2; |
268 | uint64_t in3; | 268 | uint64_t in3; |
269 | uint64_t in4; | 269 | uint64_t in4; |
270 | uint64_t in5; | 270 | uint64_t in5; |
271 | uint64_t in6; | 271 | uint64_t in6; |
272 | uint64_t in7; | 272 | uint64_t in7; |
273 | struct sal_ret_values ret; | 273 | struct sal_ret_values ret; |
274 | }; | 274 | }; |
275 | 275 | ||
276 | /*Guest change rr6*/ | 276 | /*Guest change rr6*/ |
277 | struct kvm_switch_rr6 { | 277 | struct kvm_switch_rr6 { |
278 | uint64_t old_rr; | 278 | uint64_t old_rr; |
279 | uint64_t new_rr; | 279 | uint64_t new_rr; |
280 | }; | 280 | }; |
281 | 281 | ||
282 | union ia64_ipi_a{ | 282 | union ia64_ipi_a{ |
283 | unsigned long val; | 283 | unsigned long val; |
284 | struct { | 284 | struct { |
285 | unsigned long rv : 3; | 285 | unsigned long rv : 3; |
286 | unsigned long ir : 1; | 286 | unsigned long ir : 1; |
287 | unsigned long eid : 8; | 287 | unsigned long eid : 8; |
288 | unsigned long id : 8; | 288 | unsigned long id : 8; |
289 | unsigned long ib_base : 44; | 289 | unsigned long ib_base : 44; |
290 | }; | 290 | }; |
291 | }; | 291 | }; |
292 | 292 | ||
293 | union ia64_ipi_d { | 293 | union ia64_ipi_d { |
294 | unsigned long val; | 294 | unsigned long val; |
295 | struct { | 295 | struct { |
296 | unsigned long vector : 8; | 296 | unsigned long vector : 8; |
297 | unsigned long dm : 3; | 297 | unsigned long dm : 3; |
298 | unsigned long ig : 53; | 298 | unsigned long ig : 53; |
299 | }; | 299 | }; |
300 | }; | 300 | }; |
301 | 301 | ||
302 | /*ipi check exit data*/ | 302 | /*ipi check exit data*/ |
303 | struct kvm_ipi_data{ | 303 | struct kvm_ipi_data{ |
304 | union ia64_ipi_a addr; | 304 | union ia64_ipi_a addr; |
305 | union ia64_ipi_d data; | 305 | union ia64_ipi_d data; |
306 | }; | 306 | }; |
307 | 307 | ||
308 | /*global purge data*/ | 308 | /*global purge data*/ |
309 | struct kvm_ptc_g { | 309 | struct kvm_ptc_g { |
310 | unsigned long vaddr; | 310 | unsigned long vaddr; |
311 | unsigned long rr; | 311 | unsigned long rr; |
312 | unsigned long ps; | 312 | unsigned long ps; |
313 | struct kvm_vcpu *vcpu; | 313 | struct kvm_vcpu *vcpu; |
314 | }; | 314 | }; |
315 | 315 | ||
316 | /*Exit control data */ | 316 | /*Exit control data */ |
317 | struct exit_ctl_data{ | 317 | struct exit_ctl_data{ |
318 | uint32_t exit_reason; | 318 | uint32_t exit_reason; |
319 | uint32_t vm_status; | 319 | uint32_t vm_status; |
320 | union { | 320 | union { |
321 | struct kvm_mmio_req ioreq; | 321 | struct kvm_mmio_req ioreq; |
322 | struct kvm_pal_call pal_data; | 322 | struct kvm_pal_call pal_data; |
323 | struct kvm_sal_call sal_data; | 323 | struct kvm_sal_call sal_data; |
324 | struct kvm_switch_rr6 rr_data; | 324 | struct kvm_switch_rr6 rr_data; |
325 | struct kvm_ipi_data ipi_data; | 325 | struct kvm_ipi_data ipi_data; |
326 | struct kvm_ptc_g ptc_g_data; | 326 | struct kvm_ptc_g ptc_g_data; |
327 | } u; | 327 | } u; |
328 | }; | 328 | }; |
329 | 329 | ||
330 | union pte_flags { | 330 | union pte_flags { |
331 | unsigned long val; | 331 | unsigned long val; |
332 | struct { | 332 | struct { |
333 | unsigned long p : 1; /*0 */ | 333 | unsigned long p : 1; /*0 */ |
334 | unsigned long : 1; /* 1 */ | 334 | unsigned long : 1; /* 1 */ |
335 | unsigned long ma : 3; /* 2-4 */ | 335 | unsigned long ma : 3; /* 2-4 */ |
336 | unsigned long a : 1; /* 5 */ | 336 | unsigned long a : 1; /* 5 */ |
337 | unsigned long d : 1; /* 6 */ | 337 | unsigned long d : 1; /* 6 */ |
338 | unsigned long pl : 2; /* 7-8 */ | 338 | unsigned long pl : 2; /* 7-8 */ |
339 | unsigned long ar : 3; /* 9-11 */ | 339 | unsigned long ar : 3; /* 9-11 */ |
340 | unsigned long ppn : 38; /* 12-49 */ | 340 | unsigned long ppn : 38; /* 12-49 */ |
341 | unsigned long : 2; /* 50-51 */ | 341 | unsigned long : 2; /* 50-51 */ |
342 | unsigned long ed : 1; /* 52 */ | 342 | unsigned long ed : 1; /* 52 */ |
343 | }; | 343 | }; |
344 | }; | 344 | }; |
345 | 345 | ||
346 | union ia64_pta { | 346 | union ia64_pta { |
347 | unsigned long val; | 347 | unsigned long val; |
348 | struct { | 348 | struct { |
349 | unsigned long ve : 1; | 349 | unsigned long ve : 1; |
350 | unsigned long reserved0 : 1; | 350 | unsigned long reserved0 : 1; |
351 | unsigned long size : 6; | 351 | unsigned long size : 6; |
352 | unsigned long vf : 1; | 352 | unsigned long vf : 1; |
353 | unsigned long reserved1 : 6; | 353 | unsigned long reserved1 : 6; |
354 | unsigned long base : 49; | 354 | unsigned long base : 49; |
355 | }; | 355 | }; |
356 | }; | 356 | }; |
357 | 357 | ||
358 | struct thash_cb { | 358 | struct thash_cb { |
359 | /* THASH base information */ | 359 | /* THASH base information */ |
360 | struct thash_data *hash; /* hash table pointer */ | 360 | struct thash_data *hash; /* hash table pointer */ |
361 | union ia64_pta pta; | 361 | union ia64_pta pta; |
362 | int num; | 362 | int num; |
363 | }; | 363 | }; |
364 | 364 | ||
365 | struct kvm_vcpu_stat { | 365 | struct kvm_vcpu_stat { |
366 | }; | 366 | }; |
367 | 367 | ||
368 | struct kvm_vcpu_arch { | 368 | struct kvm_vcpu_arch { |
369 | int launched; | 369 | int launched; |
370 | int last_exit; | 370 | int last_exit; |
371 | int last_run_cpu; | 371 | int last_run_cpu; |
372 | int vmm_tr_slot; | 372 | int vmm_tr_slot; |
373 | int vm_tr_slot; | 373 | int vm_tr_slot; |
374 | int sn_rtc_tr_slot; | 374 | int sn_rtc_tr_slot; |
375 | 375 | ||
376 | #define KVM_MP_STATE_RUNNABLE 0 | 376 | #define KVM_MP_STATE_RUNNABLE 0 |
377 | #define KVM_MP_STATE_UNINITIALIZED 1 | 377 | #define KVM_MP_STATE_UNINITIALIZED 1 |
378 | #define KVM_MP_STATE_INIT_RECEIVED 2 | 378 | #define KVM_MP_STATE_INIT_RECEIVED 2 |
379 | #define KVM_MP_STATE_HALTED 3 | 379 | #define KVM_MP_STATE_HALTED 3 |
380 | int mp_state; | 380 | int mp_state; |
381 | 381 | ||
382 | #define MAX_PTC_G_NUM 3 | 382 | #define MAX_PTC_G_NUM 3 |
383 | int ptc_g_count; | 383 | int ptc_g_count; |
384 | struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM]; | 384 | struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM]; |
385 | 385 | ||
386 | /*halt timer to wake up sleepy vcpus*/ | 386 | /*halt timer to wake up sleepy vcpus*/ |
387 | struct hrtimer hlt_timer; | 387 | struct hrtimer hlt_timer; |
388 | long ht_active; | 388 | long ht_active; |
389 | 389 | ||
390 | struct kvm_lapic *apic; /* kernel irqchip context */ | 390 | struct kvm_lapic *apic; /* kernel irqchip context */ |
391 | struct vpd *vpd; | 391 | struct vpd *vpd; |
392 | 392 | ||
393 | /* Exit data for vmm_transition*/ | 393 | /* Exit data for vmm_transition*/ |
394 | struct exit_ctl_data exit_data; | 394 | struct exit_ctl_data exit_data; |
395 | 395 | ||
396 | cpumask_t cache_coherent_map; | 396 | cpumask_t cache_coherent_map; |
397 | 397 | ||
398 | unsigned long vmm_rr; | 398 | unsigned long vmm_rr; |
399 | unsigned long host_rr6; | 399 | unsigned long host_rr6; |
400 | unsigned long psbits[8]; | 400 | unsigned long psbits[8]; |
401 | unsigned long cr_iipa; | 401 | unsigned long cr_iipa; |
402 | unsigned long cr_isr; | 402 | unsigned long cr_isr; |
403 | unsigned long vsa_base; | 403 | unsigned long vsa_base; |
404 | unsigned long dirty_log_lock_pa; | 404 | unsigned long dirty_log_lock_pa; |
405 | unsigned long __gp; | 405 | unsigned long __gp; |
406 | /* TR and TC. */ | 406 | /* TR and TC. */ |
407 | struct thash_data itrs[NITRS]; | 407 | struct thash_data itrs[NITRS]; |
408 | struct thash_data dtrs[NDTRS]; | 408 | struct thash_data dtrs[NDTRS]; |
409 | /* Bit is set if there is a tr/tc for the region. */ | 409 | /* Bit is set if there is a tr/tc for the region. */ |
410 | unsigned char itr_regions; | 410 | unsigned char itr_regions; |
411 | unsigned char dtr_regions; | 411 | unsigned char dtr_regions; |
412 | unsigned char tc_regions; | 412 | unsigned char tc_regions; |
413 | /* purge all */ | 413 | /* purge all */ |
414 | unsigned long ptce_base; | 414 | unsigned long ptce_base; |
415 | unsigned long ptce_count[2]; | 415 | unsigned long ptce_count[2]; |
416 | unsigned long ptce_stride[2]; | 416 | unsigned long ptce_stride[2]; |
417 | /* itc/itm */ | 417 | /* itc/itm */ |
418 | unsigned long last_itc; | 418 | unsigned long last_itc; |
419 | long itc_offset; | 419 | long itc_offset; |
420 | unsigned long itc_check; | 420 | unsigned long itc_check; |
421 | unsigned long timer_check; | 421 | unsigned long timer_check; |
422 | unsigned int timer_pending; | 422 | unsigned int timer_pending; |
423 | unsigned int timer_fired; | 423 | unsigned int timer_fired; |
424 | 424 | ||
425 | unsigned long vrr[8]; | 425 | unsigned long vrr[8]; |
426 | unsigned long ibr[8]; | 426 | unsigned long ibr[8]; |
427 | unsigned long dbr[8]; | 427 | unsigned long dbr[8]; |
428 | unsigned long insvc[4]; /* Interrupt in service. */ | 428 | unsigned long insvc[4]; /* Interrupt in service. */ |
429 | unsigned long xtp; | 429 | unsigned long xtp; |
430 | 430 | ||
431 | unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ | 431 | unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ |
432 | unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ | 432 | unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ |
433 | unsigned long metaphysical_saved_rr0; /* from kvm_arch */ | 433 | unsigned long metaphysical_saved_rr0; /* from kvm_arch */ |
434 | unsigned long metaphysical_saved_rr4; /* from kvm_arch */ | 434 | unsigned long metaphysical_saved_rr4; /* from kvm_arch */ |
435 | unsigned long fp_psr; /*used for lazy float register */ | 435 | unsigned long fp_psr; /*used for lazy float register */ |
436 | unsigned long saved_gp; | 436 | unsigned long saved_gp; |
437 | /*for phycial emulation */ | 437 | /*for phycial emulation */ |
438 | int mode_flags; | 438 | int mode_flags; |
439 | struct thash_cb vtlb; | 439 | struct thash_cb vtlb; |
440 | struct thash_cb vhpt; | 440 | struct thash_cb vhpt; |
441 | char irq_check; | 441 | char irq_check; |
442 | char irq_new_pending; | 442 | char irq_new_pending; |
443 | 443 | ||
444 | unsigned long opcode; | 444 | unsigned long opcode; |
445 | unsigned long cause; | 445 | unsigned long cause; |
446 | char log_buf[VMM_LOG_LEN]; | 446 | char log_buf[VMM_LOG_LEN]; |
447 | union context host; | 447 | union context host; |
448 | union context guest; | 448 | union context guest; |
449 | }; | 449 | }; |
450 | 450 | ||
451 | struct kvm_vm_stat { | 451 | struct kvm_vm_stat { |
452 | u64 remote_tlb_flush; | 452 | u64 remote_tlb_flush; |
453 | }; | 453 | }; |
454 | 454 | ||
455 | struct kvm_sal_data { | 455 | struct kvm_sal_data { |
456 | unsigned long boot_ip; | 456 | unsigned long boot_ip; |
457 | unsigned long boot_gp; | 457 | unsigned long boot_gp; |
458 | }; | 458 | }; |
459 | 459 | ||
460 | struct kvm_arch { | 460 | struct kvm_arch { |
461 | spinlock_t dirty_log_lock; | 461 | spinlock_t dirty_log_lock; |
462 | 462 | ||
463 | unsigned long vm_base; | 463 | unsigned long vm_base; |
464 | unsigned long metaphysical_rr0; | 464 | unsigned long metaphysical_rr0; |
465 | unsigned long metaphysical_rr4; | 465 | unsigned long metaphysical_rr4; |
466 | unsigned long vmm_init_rr; | 466 | unsigned long vmm_init_rr; |
467 | 467 | ||
468 | int online_vcpus; | ||
469 | int is_sn2; | 468 | int is_sn2; |
470 | 469 | ||
471 | struct kvm_ioapic *vioapic; | 470 | struct kvm_ioapic *vioapic; |
472 | struct kvm_vm_stat stat; | 471 | struct kvm_vm_stat stat; |
473 | struct kvm_sal_data rdv_sal_data; | 472 | struct kvm_sal_data rdv_sal_data; |
474 | 473 | ||
475 | struct list_head assigned_dev_head; | 474 | struct list_head assigned_dev_head; |
476 | struct iommu_domain *iommu_domain; | 475 | struct iommu_domain *iommu_domain; |
477 | int iommu_flags; | 476 | int iommu_flags; |
478 | struct hlist_head irq_ack_notifier_list; | 477 | struct hlist_head irq_ack_notifier_list; |
479 | 478 | ||
480 | unsigned long irq_sources_bitmap; | 479 | unsigned long irq_sources_bitmap; |
481 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | 480 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; |
482 | }; | 481 | }; |
483 | 482 | ||
484 | union cpuid3_t { | 483 | union cpuid3_t { |
485 | u64 value; | 484 | u64 value; |
486 | struct { | 485 | struct { |
487 | u64 number : 8; | 486 | u64 number : 8; |
488 | u64 revision : 8; | 487 | u64 revision : 8; |
489 | u64 model : 8; | 488 | u64 model : 8; |
490 | u64 family : 8; | 489 | u64 family : 8; |
491 | u64 archrev : 8; | 490 | u64 archrev : 8; |
492 | u64 rv : 24; | 491 | u64 rv : 24; |
493 | }; | 492 | }; |
494 | }; | 493 | }; |
495 | 494 | ||
496 | struct kvm_pt_regs { | 495 | struct kvm_pt_regs { |
497 | /* The following registers are saved by SAVE_MIN: */ | 496 | /* The following registers are saved by SAVE_MIN: */ |
498 | unsigned long b6; /* scratch */ | 497 | unsigned long b6; /* scratch */ |
499 | unsigned long b7; /* scratch */ | 498 | unsigned long b7; /* scratch */ |
500 | 499 | ||
501 | unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ | 500 | unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ |
502 | unsigned long ar_ssd; /* reserved for future use (scratch) */ | 501 | unsigned long ar_ssd; /* reserved for future use (scratch) */ |
503 | 502 | ||
504 | unsigned long r8; /* scratch (return value register 0) */ | 503 | unsigned long r8; /* scratch (return value register 0) */ |
505 | unsigned long r9; /* scratch (return value register 1) */ | 504 | unsigned long r9; /* scratch (return value register 1) */ |
506 | unsigned long r10; /* scratch (return value register 2) */ | 505 | unsigned long r10; /* scratch (return value register 2) */ |
507 | unsigned long r11; /* scratch (return value register 3) */ | 506 | unsigned long r11; /* scratch (return value register 3) */ |
508 | 507 | ||
509 | unsigned long cr_ipsr; /* interrupted task's psr */ | 508 | unsigned long cr_ipsr; /* interrupted task's psr */ |
510 | unsigned long cr_iip; /* interrupted task's instruction pointer */ | 509 | unsigned long cr_iip; /* interrupted task's instruction pointer */ |
511 | unsigned long cr_ifs; /* interrupted task's function state */ | 510 | unsigned long cr_ifs; /* interrupted task's function state */ |
512 | 511 | ||
513 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ | 512 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ |
514 | unsigned long ar_pfs; /* prev function state */ | 513 | unsigned long ar_pfs; /* prev function state */ |
515 | unsigned long ar_rsc; /* RSE configuration */ | 514 | unsigned long ar_rsc; /* RSE configuration */ |
516 | /* The following two are valid only if cr_ipsr.cpl > 0: */ | 515 | /* The following two are valid only if cr_ipsr.cpl > 0: */ |
517 | unsigned long ar_rnat; /* RSE NaT */ | 516 | unsigned long ar_rnat; /* RSE NaT */ |
518 | unsigned long ar_bspstore; /* RSE bspstore */ | 517 | unsigned long ar_bspstore; /* RSE bspstore */ |
519 | 518 | ||
520 | unsigned long pr; /* 64 predicate registers (1 bit each) */ | 519 | unsigned long pr; /* 64 predicate registers (1 bit each) */ |
521 | unsigned long b0; /* return pointer (bp) */ | 520 | unsigned long b0; /* return pointer (bp) */ |
522 | unsigned long loadrs; /* size of dirty partition << 16 */ | 521 | unsigned long loadrs; /* size of dirty partition << 16 */ |
523 | 522 | ||
524 | unsigned long r1; /* the gp pointer */ | 523 | unsigned long r1; /* the gp pointer */ |
525 | unsigned long r12; /* interrupted task's memory stack pointer */ | 524 | unsigned long r12; /* interrupted task's memory stack pointer */ |
526 | unsigned long r13; /* thread pointer */ | 525 | unsigned long r13; /* thread pointer */ |
527 | 526 | ||
528 | unsigned long ar_fpsr; /* floating point status (preserved) */ | 527 | unsigned long ar_fpsr; /* floating point status (preserved) */ |
529 | unsigned long r15; /* scratch */ | 528 | unsigned long r15; /* scratch */ |
530 | 529 | ||
531 | /* The remaining registers are NOT saved for system calls. */ | 530 | /* The remaining registers are NOT saved for system calls. */ |
532 | unsigned long r14; /* scratch */ | 531 | unsigned long r14; /* scratch */ |
533 | unsigned long r2; /* scratch */ | 532 | unsigned long r2; /* scratch */ |
534 | unsigned long r3; /* scratch */ | 533 | unsigned long r3; /* scratch */ |
535 | unsigned long r16; /* scratch */ | 534 | unsigned long r16; /* scratch */ |
536 | unsigned long r17; /* scratch */ | 535 | unsigned long r17; /* scratch */ |
537 | unsigned long r18; /* scratch */ | 536 | unsigned long r18; /* scratch */ |
538 | unsigned long r19; /* scratch */ | 537 | unsigned long r19; /* scratch */ |
539 | unsigned long r20; /* scratch */ | 538 | unsigned long r20; /* scratch */ |
540 | unsigned long r21; /* scratch */ | 539 | unsigned long r21; /* scratch */ |
541 | unsigned long r22; /* scratch */ | 540 | unsigned long r22; /* scratch */ |
542 | unsigned long r23; /* scratch */ | 541 | unsigned long r23; /* scratch */ |
543 | unsigned long r24; /* scratch */ | 542 | unsigned long r24; /* scratch */ |
544 | unsigned long r25; /* scratch */ | 543 | unsigned long r25; /* scratch */ |
545 | unsigned long r26; /* scratch */ | 544 | unsigned long r26; /* scratch */ |
546 | unsigned long r27; /* scratch */ | 545 | unsigned long r27; /* scratch */ |
547 | unsigned long r28; /* scratch */ | 546 | unsigned long r28; /* scratch */ |
548 | unsigned long r29; /* scratch */ | 547 | unsigned long r29; /* scratch */ |
549 | unsigned long r30; /* scratch */ | 548 | unsigned long r30; /* scratch */ |
550 | unsigned long r31; /* scratch */ | 549 | unsigned long r31; /* scratch */ |
551 | unsigned long ar_ccv; /* compare/exchange value (scratch) */ | 550 | unsigned long ar_ccv; /* compare/exchange value (scratch) */ |
552 | 551 | ||
553 | /* | 552 | /* |
554 | * Floating point registers that the kernel considers scratch: | 553 | * Floating point registers that the kernel considers scratch: |
555 | */ | 554 | */ |
556 | struct ia64_fpreg f6; /* scratch */ | 555 | struct ia64_fpreg f6; /* scratch */ |
557 | struct ia64_fpreg f7; /* scratch */ | 556 | struct ia64_fpreg f7; /* scratch */ |
558 | struct ia64_fpreg f8; /* scratch */ | 557 | struct ia64_fpreg f8; /* scratch */ |
559 | struct ia64_fpreg f9; /* scratch */ | 558 | struct ia64_fpreg f9; /* scratch */ |
560 | struct ia64_fpreg f10; /* scratch */ | 559 | struct ia64_fpreg f10; /* scratch */ |
561 | struct ia64_fpreg f11; /* scratch */ | 560 | struct ia64_fpreg f11; /* scratch */ |
562 | 561 | ||
563 | unsigned long r4; /* preserved */ | 562 | unsigned long r4; /* preserved */ |
564 | unsigned long r5; /* preserved */ | 563 | unsigned long r5; /* preserved */ |
565 | unsigned long r6; /* preserved */ | 564 | unsigned long r6; /* preserved */ |
566 | unsigned long r7; /* preserved */ | 565 | unsigned long r7; /* preserved */ |
567 | unsigned long eml_unat; /* used for emulating instruction */ | 566 | unsigned long eml_unat; /* used for emulating instruction */ |
568 | unsigned long pad0; /* alignment pad */ | 567 | unsigned long pad0; /* alignment pad */ |
569 | }; | 568 | }; |
570 | 569 | ||
571 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) | 570 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) |
572 | { | 571 | { |
573 | return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; | 572 | return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; |
574 | } | 573 | } |
575 | 574 | ||
576 | typedef int kvm_vmm_entry(void); | 575 | typedef int kvm_vmm_entry(void); |
577 | typedef void kvm_tramp_entry(union context *host, union context *guest); | 576 | typedef void kvm_tramp_entry(union context *host, union context *guest); |
578 | 577 | ||
579 | struct kvm_vmm_info{ | 578 | struct kvm_vmm_info{ |
580 | struct module *module; | 579 | struct module *module; |
581 | kvm_vmm_entry *vmm_entry; | 580 | kvm_vmm_entry *vmm_entry; |
582 | kvm_tramp_entry *tramp_entry; | 581 | kvm_tramp_entry *tramp_entry; |
583 | unsigned long vmm_ivt; | 582 | unsigned long vmm_ivt; |
584 | unsigned long patch_mov_ar; | 583 | unsigned long patch_mov_ar; |
585 | unsigned long patch_mov_ar_sn2; | 584 | unsigned long patch_mov_ar_sn2; |
586 | }; | 585 | }; |
587 | 586 | ||
588 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); | 587 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); |
589 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | 588 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
590 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | 589 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
591 | void kvm_sal_emul(struct kvm_vcpu *vcpu); | 590 | void kvm_sal_emul(struct kvm_vcpu *vcpu); |
592 | 591 | ||
593 | #endif /* __ASSEMBLY__*/ | 592 | #endif /* __ASSEMBLY__*/ |
594 | 593 | ||
595 | #endif | 594 | #endif |
596 | 595 |
arch/ia64/kvm/Kconfig
1 | # | 1 | # |
2 | # KVM configuration | 2 | # KVM configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | source "virt/kvm/Kconfig" | 5 | source "virt/kvm/Kconfig" |
6 | 6 | ||
7 | menuconfig VIRTUALIZATION | 7 | menuconfig VIRTUALIZATION |
8 | bool "Virtualization" | 8 | bool "Virtualization" |
9 | depends on HAVE_KVM || IA64 | 9 | depends on HAVE_KVM || IA64 |
10 | default y | 10 | default y |
11 | ---help--- | 11 | ---help--- |
12 | Say Y here to get to see options for using your Linux host to run other | 12 | Say Y here to get to see options for using your Linux host to run other |
13 | operating systems inside virtual machines (guests). | 13 | operating systems inside virtual machines (guests). |
14 | This option alone does not add any kernel code. | 14 | This option alone does not add any kernel code. |
15 | 15 | ||
16 | If you say N, all options in this submenu will be skipped and disabled. | 16 | If you say N, all options in this submenu will be skipped and disabled. |
17 | 17 | ||
18 | if VIRTUALIZATION | 18 | if VIRTUALIZATION |
19 | 19 | ||
20 | config KVM | 20 | config KVM |
21 | tristate "Kernel-based Virtual Machine (KVM) support" | 21 | tristate "Kernel-based Virtual Machine (KVM) support" |
22 | depends on HAVE_KVM && MODULES && EXPERIMENTAL | 22 | depends on HAVE_KVM && MODULES && EXPERIMENTAL |
23 | # for device assignment: | 23 | # for device assignment: |
24 | depends on PCI | 24 | depends on PCI |
25 | select PREEMPT_NOTIFIERS | 25 | select PREEMPT_NOTIFIERS |
26 | select ANON_INODES | 26 | select ANON_INODES |
27 | select HAVE_KVM_IRQCHIP | 27 | select HAVE_KVM_IRQCHIP |
28 | select KVM_APIC_ARCHITECTURE | ||
28 | ---help--- | 29 | ---help--- |
29 | Support hosting fully virtualized guest machines using hardware | 30 | Support hosting fully virtualized guest machines using hardware |
30 | virtualization extensions. You will need a fairly recent | 31 | virtualization extensions. You will need a fairly recent |
31 | processor equipped with virtualization extensions. You will also | 32 | processor equipped with virtualization extensions. You will also |
32 | need to select one or more of the processor modules below. | 33 | need to select one or more of the processor modules below. |
33 | 34 | ||
34 | This module provides access to the hardware capabilities through | 35 | This module provides access to the hardware capabilities through |
35 | a character device node named /dev/kvm. | 36 | a character device node named /dev/kvm. |
36 | 37 | ||
37 | To compile this as a module, choose M here: the module | 38 | To compile this as a module, choose M here: the module |
38 | will be called kvm. | 39 | will be called kvm. |
39 | 40 | ||
40 | If unsure, say N. | 41 | If unsure, say N. |
41 | 42 | ||
42 | config KVM_INTEL | 43 | config KVM_INTEL |
43 | tristate "KVM for Intel Itanium 2 processors support" | 44 | tristate "KVM for Intel Itanium 2 processors support" |
44 | depends on KVM && m | 45 | depends on KVM && m |
45 | ---help--- | 46 | ---help--- |
46 | Provides support for KVM on Itanium 2 processors equipped with the VT | 47 | Provides support for KVM on Itanium 2 processors equipped with the VT |
47 | extensions. | 48 | extensions. |
48 | 49 | ||
49 | config KVM_TRACE | 50 | config KVM_TRACE |
50 | bool | 51 | bool |
51 | 52 | ||
52 | source drivers/virtio/Kconfig | 53 | source drivers/virtio/Kconfig |
53 | 54 | ||
54 | endif # VIRTUALIZATION | 55 | endif # VIRTUALIZATION |
55 | 56 |
arch/ia64/kvm/kvm-ia64.c
1 | /* | 1 | /* |
2 | * kvm_ia64.c: Basic KVM suppport On Itanium series processors | 2 | * kvm_ia64.c: Basic KVM suppport On Itanium series processors |
3 | * | 3 | * |
4 | * | 4 | * |
5 | * Copyright (C) 2007, Intel Corporation. | 5 | * Copyright (C) 2007, Intel Corporation. |
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | 6 | * Xiantao Zhang (xiantao.zhang@intel.com) |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms and conditions of the GNU General Public License, | 9 | * under the terms and conditions of the GNU General Public License, |
10 | * version 2, as published by the Free Software Foundation. | 10 | * version 2, as published by the Free Software Foundation. |
11 | * | 11 | * |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | 12 | * This program is distributed in the hope it will be useful, but WITHOUT |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
15 | * more details. | 15 | * more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License along with | 17 | * You should have received a copy of the GNU General Public License along with |
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 19 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/gfp.h> | 26 | #include <linux/gfp.h> |
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
29 | #include <linux/kvm_host.h> | 29 | #include <linux/kvm_host.h> |
30 | #include <linux/kvm.h> | 30 | #include <linux/kvm.h> |
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/hrtimer.h> | 32 | #include <linux/hrtimer.h> |
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/iommu.h> | 34 | #include <linux/iommu.h> |
35 | #include <linux/intel-iommu.h> | 35 | #include <linux/intel-iommu.h> |
36 | 36 | ||
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
38 | #include <asm/gcc_intrin.h> | 38 | #include <asm/gcc_intrin.h> |
39 | #include <asm/pal.h> | 39 | #include <asm/pal.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #include <asm/div64.h> | 41 | #include <asm/div64.h> |
42 | #include <asm/tlb.h> | 42 | #include <asm/tlb.h> |
43 | #include <asm/elf.h> | 43 | #include <asm/elf.h> |
44 | #include <asm/sn/addrs.h> | 44 | #include <asm/sn/addrs.h> |
45 | #include <asm/sn/clksupport.h> | 45 | #include <asm/sn/clksupport.h> |
46 | #include <asm/sn/shub_mmr.h> | 46 | #include <asm/sn/shub_mmr.h> |
47 | 47 | ||
48 | #include "misc.h" | 48 | #include "misc.h" |
49 | #include "vti.h" | 49 | #include "vti.h" |
50 | #include "iodev.h" | 50 | #include "iodev.h" |
51 | #include "ioapic.h" | 51 | #include "ioapic.h" |
52 | #include "lapic.h" | 52 | #include "lapic.h" |
53 | #include "irq.h" | 53 | #include "irq.h" |
54 | 54 | ||
55 | static unsigned long kvm_vmm_base; | 55 | static unsigned long kvm_vmm_base; |
56 | static unsigned long kvm_vsa_base; | 56 | static unsigned long kvm_vsa_base; |
57 | static unsigned long kvm_vm_buffer; | 57 | static unsigned long kvm_vm_buffer; |
58 | static unsigned long kvm_vm_buffer_size; | 58 | static unsigned long kvm_vm_buffer_size; |
59 | unsigned long kvm_vmm_gp; | 59 | unsigned long kvm_vmm_gp; |
60 | 60 | ||
61 | static long vp_env_info; | 61 | static long vp_env_info; |
62 | 62 | ||
63 | static struct kvm_vmm_info *kvm_vmm_info; | 63 | static struct kvm_vmm_info *kvm_vmm_info; |
64 | 64 | ||
65 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); | 65 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); |
66 | 66 | ||
67 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 67 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
68 | { NULL } | 68 | { NULL } |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) | 71 | static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) |
72 | { | 72 | { |
73 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | 73 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) |
74 | if (vcpu->kvm->arch.is_sn2) | 74 | if (vcpu->kvm->arch.is_sn2) |
75 | return rtc_time(); | 75 | return rtc_time(); |
76 | else | 76 | else |
77 | #endif | 77 | #endif |
78 | return ia64_getreg(_IA64_REG_AR_ITC); | 78 | return ia64_getreg(_IA64_REG_AR_ITC); |
79 | } | 79 | } |
80 | 80 | ||
81 | static void kvm_flush_icache(unsigned long start, unsigned long len) | 81 | static void kvm_flush_icache(unsigned long start, unsigned long len) |
82 | { | 82 | { |
83 | int l; | 83 | int l; |
84 | 84 | ||
85 | for (l = 0; l < (len + 32); l += 32) | 85 | for (l = 0; l < (len + 32); l += 32) |
86 | ia64_fc((void *)(start + l)); | 86 | ia64_fc((void *)(start + l)); |
87 | 87 | ||
88 | ia64_sync_i(); | 88 | ia64_sync_i(); |
89 | ia64_srlz_i(); | 89 | ia64_srlz_i(); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void kvm_flush_tlb_all(void) | 92 | static void kvm_flush_tlb_all(void) |
93 | { | 93 | { |
94 | unsigned long i, j, count0, count1, stride0, stride1, addr; | 94 | unsigned long i, j, count0, count1, stride0, stride1, addr; |
95 | long flags; | 95 | long flags; |
96 | 96 | ||
97 | addr = local_cpu_data->ptce_base; | 97 | addr = local_cpu_data->ptce_base; |
98 | count0 = local_cpu_data->ptce_count[0]; | 98 | count0 = local_cpu_data->ptce_count[0]; |
99 | count1 = local_cpu_data->ptce_count[1]; | 99 | count1 = local_cpu_data->ptce_count[1]; |
100 | stride0 = local_cpu_data->ptce_stride[0]; | 100 | stride0 = local_cpu_data->ptce_stride[0]; |
101 | stride1 = local_cpu_data->ptce_stride[1]; | 101 | stride1 = local_cpu_data->ptce_stride[1]; |
102 | 102 | ||
103 | local_irq_save(flags); | 103 | local_irq_save(flags); |
104 | for (i = 0; i < count0; ++i) { | 104 | for (i = 0; i < count0; ++i) { |
105 | for (j = 0; j < count1; ++j) { | 105 | for (j = 0; j < count1; ++j) { |
106 | ia64_ptce(addr); | 106 | ia64_ptce(addr); |
107 | addr += stride1; | 107 | addr += stride1; |
108 | } | 108 | } |
109 | addr += stride0; | 109 | addr += stride0; |
110 | } | 110 | } |
111 | local_irq_restore(flags); | 111 | local_irq_restore(flags); |
112 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | 112 | ia64_srlz_i(); /* srlz.i implies srlz.d */ |
113 | } | 113 | } |
114 | 114 | ||
115 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | 115 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) |
116 | { | 116 | { |
117 | struct ia64_pal_retval iprv; | 117 | struct ia64_pal_retval iprv; |
118 | 118 | ||
119 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, | 119 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, |
120 | (u64)opt_handler); | 120 | (u64)opt_handler); |
121 | 121 | ||
122 | return iprv.status; | 122 | return iprv.status; |
123 | } | 123 | } |
124 | 124 | ||
125 | static DEFINE_SPINLOCK(vp_lock); | 125 | static DEFINE_SPINLOCK(vp_lock); |
126 | 126 | ||
127 | void kvm_arch_hardware_enable(void *garbage) | 127 | void kvm_arch_hardware_enable(void *garbage) |
128 | { | 128 | { |
129 | long status; | 129 | long status; |
130 | long tmp_base; | 130 | long tmp_base; |
131 | unsigned long pte; | 131 | unsigned long pte; |
132 | unsigned long saved_psr; | 132 | unsigned long saved_psr; |
133 | int slot; | 133 | int slot; |
134 | 134 | ||
135 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | 135 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); |
136 | local_irq_save(saved_psr); | 136 | local_irq_save(saved_psr); |
137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
138 | local_irq_restore(saved_psr); | 138 | local_irq_restore(saved_psr); |
139 | if (slot < 0) | 139 | if (slot < 0) |
140 | return; | 140 | return; |
141 | 141 | ||
142 | spin_lock(&vp_lock); | 142 | spin_lock(&vp_lock); |
143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | 143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? |
144 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, | 144 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, |
145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | 145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); |
146 | if (status != 0) { | 146 | if (status != 0) { |
147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | 147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); |
148 | return ; | 148 | return ; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (!kvm_vsa_base) { | 151 | if (!kvm_vsa_base) { |
152 | kvm_vsa_base = tmp_base; | 152 | kvm_vsa_base = tmp_base; |
153 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); | 153 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); |
154 | } | 154 | } |
155 | spin_unlock(&vp_lock); | 155 | spin_unlock(&vp_lock); |
156 | ia64_ptr_entry(0x3, slot); | 156 | ia64_ptr_entry(0x3, slot); |
157 | } | 157 | } |
158 | 158 | ||
159 | void kvm_arch_hardware_disable(void *garbage) | 159 | void kvm_arch_hardware_disable(void *garbage) |
160 | { | 160 | { |
161 | 161 | ||
162 | long status; | 162 | long status; |
163 | int slot; | 163 | int slot; |
164 | unsigned long pte; | 164 | unsigned long pte; |
165 | unsigned long saved_psr; | 165 | unsigned long saved_psr; |
166 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); | 166 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); |
167 | 167 | ||
168 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | 168 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), |
169 | PAGE_KERNEL)); | 169 | PAGE_KERNEL)); |
170 | 170 | ||
171 | local_irq_save(saved_psr); | 171 | local_irq_save(saved_psr); |
172 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 172 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
173 | local_irq_restore(saved_psr); | 173 | local_irq_restore(saved_psr); |
174 | if (slot < 0) | 174 | if (slot < 0) |
175 | return; | 175 | return; |
176 | 176 | ||
177 | status = ia64_pal_vp_exit_env(host_iva); | 177 | status = ia64_pal_vp_exit_env(host_iva); |
178 | if (status) | 178 | if (status) |
179 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", | 179 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", |
180 | status); | 180 | status); |
181 | ia64_ptr_entry(0x3, slot); | 181 | ia64_ptr_entry(0x3, slot); |
182 | } | 182 | } |
183 | 183 | ||
184 | void kvm_arch_check_processor_compat(void *rtn) | 184 | void kvm_arch_check_processor_compat(void *rtn) |
185 | { | 185 | { |
186 | *(int *)rtn = 0; | 186 | *(int *)rtn = 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | int kvm_dev_ioctl_check_extension(long ext) | 189 | int kvm_dev_ioctl_check_extension(long ext) |
190 | { | 190 | { |
191 | 191 | ||
192 | int r; | 192 | int r; |
193 | 193 | ||
194 | switch (ext) { | 194 | switch (ext) { |
195 | case KVM_CAP_IRQCHIP: | 195 | case KVM_CAP_IRQCHIP: |
196 | case KVM_CAP_MP_STATE: | 196 | case KVM_CAP_MP_STATE: |
197 | case KVM_CAP_IRQ_INJECT_STATUS: | 197 | case KVM_CAP_IRQ_INJECT_STATUS: |
198 | r = 1; | 198 | r = 1; |
199 | break; | 199 | break; |
200 | case KVM_CAP_COALESCED_MMIO: | 200 | case KVM_CAP_COALESCED_MMIO: |
201 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 201 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
202 | break; | 202 | break; |
203 | case KVM_CAP_IOMMU: | 203 | case KVM_CAP_IOMMU: |
204 | r = iommu_found(); | 204 | r = iommu_found(); |
205 | break; | 205 | break; |
206 | default: | 206 | default: |
207 | r = 0; | 207 | r = 0; |
208 | } | 208 | } |
209 | return r; | 209 | return r; |
210 | 210 | ||
211 | } | 211 | } |
212 | 212 | ||
213 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | 213 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, |
214 | gpa_t addr, int len, int is_write) | 214 | gpa_t addr, int len, int is_write) |
215 | { | 215 | { |
216 | struct kvm_io_device *dev; | 216 | struct kvm_io_device *dev; |
217 | 217 | ||
218 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write); | 218 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write); |
219 | 219 | ||
220 | return dev; | 220 | return dev; |
221 | } | 221 | } |
222 | 222 | ||
223 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 223 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
224 | { | 224 | { |
225 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 225 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
226 | kvm_run->hw.hardware_exit_reason = 1; | 226 | kvm_run->hw.hardware_exit_reason = 1; |
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 230 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
231 | { | 231 | { |
232 | struct kvm_mmio_req *p; | 232 | struct kvm_mmio_req *p; |
233 | struct kvm_io_device *mmio_dev; | 233 | struct kvm_io_device *mmio_dev; |
234 | 234 | ||
235 | p = kvm_get_vcpu_ioreq(vcpu); | 235 | p = kvm_get_vcpu_ioreq(vcpu); |
236 | 236 | ||
237 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) | 237 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) |
238 | goto mmio; | 238 | goto mmio; |
239 | vcpu->mmio_needed = 1; | 239 | vcpu->mmio_needed = 1; |
240 | vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; | 240 | vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; |
241 | vcpu->mmio_size = kvm_run->mmio.len = p->size; | 241 | vcpu->mmio_size = kvm_run->mmio.len = p->size; |
242 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; | 242 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; |
243 | 243 | ||
244 | if (vcpu->mmio_is_write) | 244 | if (vcpu->mmio_is_write) |
245 | memcpy(vcpu->mmio_data, &p->data, p->size); | 245 | memcpy(vcpu->mmio_data, &p->data, p->size); |
246 | memcpy(kvm_run->mmio.data, &p->data, p->size); | 246 | memcpy(kvm_run->mmio.data, &p->data, p->size); |
247 | kvm_run->exit_reason = KVM_EXIT_MMIO; | 247 | kvm_run->exit_reason = KVM_EXIT_MMIO; |
248 | return 0; | 248 | return 0; |
249 | mmio: | 249 | mmio: |
250 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir); | 250 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir); |
251 | if (mmio_dev) { | 251 | if (mmio_dev) { |
252 | if (!p->dir) | 252 | if (!p->dir) |
253 | kvm_iodevice_write(mmio_dev, p->addr, p->size, | 253 | kvm_iodevice_write(mmio_dev, p->addr, p->size, |
254 | &p->data); | 254 | &p->data); |
255 | else | 255 | else |
256 | kvm_iodevice_read(mmio_dev, p->addr, p->size, | 256 | kvm_iodevice_read(mmio_dev, p->addr, p->size, |
257 | &p->data); | 257 | &p->data); |
258 | 258 | ||
259 | } else | 259 | } else |
260 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | 260 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); |
261 | p->state = STATE_IORESP_READY; | 261 | p->state = STATE_IORESP_READY; |
262 | 262 | ||
263 | return 1; | 263 | return 1; |
264 | } | 264 | } |
265 | 265 | ||
266 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 266 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
267 | { | 267 | { |
268 | struct exit_ctl_data *p; | 268 | struct exit_ctl_data *p; |
269 | 269 | ||
270 | p = kvm_get_exit_data(vcpu); | 270 | p = kvm_get_exit_data(vcpu); |
271 | 271 | ||
272 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | 272 | if (p->exit_reason == EXIT_REASON_PAL_CALL) |
273 | return kvm_pal_emul(vcpu, kvm_run); | 273 | return kvm_pal_emul(vcpu, kvm_run); |
274 | else { | 274 | else { |
275 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 275 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
276 | kvm_run->hw.hardware_exit_reason = 2; | 276 | kvm_run->hw.hardware_exit_reason = 2; |
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | } | 279 | } |
280 | 280 | ||
281 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 281 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
282 | { | 282 | { |
283 | struct exit_ctl_data *p; | 283 | struct exit_ctl_data *p; |
284 | 284 | ||
285 | p = kvm_get_exit_data(vcpu); | 285 | p = kvm_get_exit_data(vcpu); |
286 | 286 | ||
287 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | 287 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { |
288 | kvm_sal_emul(vcpu); | 288 | kvm_sal_emul(vcpu); |
289 | return 1; | 289 | return 1; |
290 | } else { | 290 | } else { |
291 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 291 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
292 | kvm_run->hw.hardware_exit_reason = 3; | 292 | kvm_run->hw.hardware_exit_reason = 3; |
293 | return 0; | 293 | return 0; |
294 | } | 294 | } |
295 | 295 | ||
296 | } | 296 | } |
297 | 297 | ||
298 | static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) | 298 | static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) |
299 | { | 299 | { |
300 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 300 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
301 | 301 | ||
302 | if (!test_and_set_bit(vector, &vpd->irr[0])) { | 302 | if (!test_and_set_bit(vector, &vpd->irr[0])) { |
303 | vcpu->arch.irq_new_pending = 1; | 303 | vcpu->arch.irq_new_pending = 1; |
304 | kvm_vcpu_kick(vcpu); | 304 | kvm_vcpu_kick(vcpu); |
305 | return 1; | 305 | return 1; |
306 | } | 306 | } |
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * offset: address offset to IPI space. | 311 | * offset: address offset to IPI space. |
312 | * value: deliver value. | 312 | * value: deliver value. |
313 | */ | 313 | */ |
314 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, | 314 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, |
315 | uint64_t vector) | 315 | uint64_t vector) |
316 | { | 316 | { |
317 | switch (dm) { | 317 | switch (dm) { |
318 | case SAPIC_FIXED: | 318 | case SAPIC_FIXED: |
319 | break; | 319 | break; |
320 | case SAPIC_NMI: | 320 | case SAPIC_NMI: |
321 | vector = 2; | 321 | vector = 2; |
322 | break; | 322 | break; |
323 | case SAPIC_EXTINT: | 323 | case SAPIC_EXTINT: |
324 | vector = 0; | 324 | vector = 0; |
325 | break; | 325 | break; |
326 | case SAPIC_INIT: | 326 | case SAPIC_INIT: |
327 | case SAPIC_PMI: | 327 | case SAPIC_PMI: |
328 | default: | 328 | default: |
329 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); | 329 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); |
330 | return; | 330 | return; |
331 | } | 331 | } |
332 | __apic_accept_irq(vcpu, vector); | 332 | __apic_accept_irq(vcpu, vector); |
333 | } | 333 | } |
334 | 334 | ||
335 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | 335 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, |
336 | unsigned long eid) | 336 | unsigned long eid) |
337 | { | 337 | { |
338 | union ia64_lid lid; | 338 | union ia64_lid lid; |
339 | int i; | 339 | int i; |
340 | 340 | ||
341 | for (i = 0; i < kvm->arch.online_vcpus; i++) { | 341 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { |
342 | if (kvm->vcpus[i]) { | 342 | if (kvm->vcpus[i]) { |
343 | lid.val = VCPU_LID(kvm->vcpus[i]); | 343 | lid.val = VCPU_LID(kvm->vcpus[i]); |
344 | if (lid.id == id && lid.eid == eid) | 344 | if (lid.id == id && lid.eid == eid) |
345 | return kvm->vcpus[i]; | 345 | return kvm->vcpus[i]; |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
349 | return NULL; | 349 | return NULL; |
350 | } | 350 | } |
351 | 351 | ||
352 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 352 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
353 | { | 353 | { |
354 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | 354 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); |
355 | struct kvm_vcpu *target_vcpu; | 355 | struct kvm_vcpu *target_vcpu; |
356 | struct kvm_pt_regs *regs; | 356 | struct kvm_pt_regs *regs; |
357 | union ia64_ipi_a addr = p->u.ipi_data.addr; | 357 | union ia64_ipi_a addr = p->u.ipi_data.addr; |
358 | union ia64_ipi_d data = p->u.ipi_data.data; | 358 | union ia64_ipi_d data = p->u.ipi_data.data; |
359 | 359 | ||
360 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); | 360 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); |
361 | if (!target_vcpu) | 361 | if (!target_vcpu) |
362 | return handle_vm_error(vcpu, kvm_run); | 362 | return handle_vm_error(vcpu, kvm_run); |
363 | 363 | ||
364 | if (!target_vcpu->arch.launched) { | 364 | if (!target_vcpu->arch.launched) { |
365 | regs = vcpu_regs(target_vcpu); | 365 | regs = vcpu_regs(target_vcpu); |
366 | 366 | ||
367 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | 367 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; |
368 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | 368 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; |
369 | 369 | ||
370 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 370 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
371 | if (waitqueue_active(&target_vcpu->wq)) | 371 | if (waitqueue_active(&target_vcpu->wq)) |
372 | wake_up_interruptible(&target_vcpu->wq); | 372 | wake_up_interruptible(&target_vcpu->wq); |
373 | } else { | 373 | } else { |
374 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); | 374 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); |
375 | if (target_vcpu != vcpu) | 375 | if (target_vcpu != vcpu) |
376 | kvm_vcpu_kick(target_vcpu); | 376 | kvm_vcpu_kick(target_vcpu); |
377 | } | 377 | } |
378 | 378 | ||
379 | return 1; | 379 | return 1; |
380 | } | 380 | } |
381 | 381 | ||
382 | struct call_data { | 382 | struct call_data { |
383 | struct kvm_ptc_g ptc_g_data; | 383 | struct kvm_ptc_g ptc_g_data; |
384 | struct kvm_vcpu *vcpu; | 384 | struct kvm_vcpu *vcpu; |
385 | }; | 385 | }; |
386 | 386 | ||
387 | static void vcpu_global_purge(void *info) | 387 | static void vcpu_global_purge(void *info) |
388 | { | 388 | { |
389 | struct call_data *p = (struct call_data *)info; | 389 | struct call_data *p = (struct call_data *)info; |
390 | struct kvm_vcpu *vcpu = p->vcpu; | 390 | struct kvm_vcpu *vcpu = p->vcpu; |
391 | 391 | ||
392 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | 392 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) |
393 | return; | 393 | return; |
394 | 394 | ||
395 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); | 395 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); |
396 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { | 396 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { |
397 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = | 397 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = |
398 | p->ptc_g_data; | 398 | p->ptc_g_data; |
399 | } else { | 399 | } else { |
400 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); | 400 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); |
401 | vcpu->arch.ptc_g_count = 0; | 401 | vcpu->arch.ptc_g_count = 0; |
402 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); | 402 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); |
403 | } | 403 | } |
404 | } | 404 | } |
405 | 405 | ||
406 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 406 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
407 | { | 407 | { |
408 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | 408 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); |
409 | struct kvm *kvm = vcpu->kvm; | 409 | struct kvm *kvm = vcpu->kvm; |
410 | struct call_data call_data; | 410 | struct call_data call_data; |
411 | int i; | 411 | int i; |
412 | 412 | ||
413 | call_data.ptc_g_data = p->u.ptc_g_data; | 413 | call_data.ptc_g_data = p->u.ptc_g_data; |
414 | 414 | ||
415 | for (i = 0; i < kvm->arch.online_vcpus; i++) { | 415 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { |
416 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | 416 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == |
417 | KVM_MP_STATE_UNINITIALIZED || | 417 | KVM_MP_STATE_UNINITIALIZED || |
418 | vcpu == kvm->vcpus[i]) | 418 | vcpu == kvm->vcpus[i]) |
419 | continue; | 419 | continue; |
420 | 420 | ||
421 | if (waitqueue_active(&kvm->vcpus[i]->wq)) | 421 | if (waitqueue_active(&kvm->vcpus[i]->wq)) |
422 | wake_up_interruptible(&kvm->vcpus[i]->wq); | 422 | wake_up_interruptible(&kvm->vcpus[i]->wq); |
423 | 423 | ||
424 | if (kvm->vcpus[i]->cpu != -1) { | 424 | if (kvm->vcpus[i]->cpu != -1) { |
425 | call_data.vcpu = kvm->vcpus[i]; | 425 | call_data.vcpu = kvm->vcpus[i]; |
426 | smp_call_function_single(kvm->vcpus[i]->cpu, | 426 | smp_call_function_single(kvm->vcpus[i]->cpu, |
427 | vcpu_global_purge, &call_data, 1); | 427 | vcpu_global_purge, &call_data, 1); |
428 | } else | 428 | } else |
429 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); | 429 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); |
430 | 430 | ||
431 | } | 431 | } |
432 | return 1; | 432 | return 1; |
433 | } | 433 | } |
434 | 434 | ||
435 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 435 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
436 | { | 436 | { |
437 | return 1; | 437 | return 1; |
438 | } | 438 | } |
439 | 439 | ||
440 | static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) | 440 | static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) |
441 | { | 441 | { |
442 | unsigned long pte, rtc_phys_addr, map_addr; | 442 | unsigned long pte, rtc_phys_addr, map_addr; |
443 | int slot; | 443 | int slot; |
444 | 444 | ||
445 | map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); | 445 | map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); |
446 | rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; | 446 | rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; |
447 | pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); | 447 | pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); |
448 | slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); | 448 | slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); |
449 | vcpu->arch.sn_rtc_tr_slot = slot; | 449 | vcpu->arch.sn_rtc_tr_slot = slot; |
450 | if (slot < 0) { | 450 | if (slot < 0) { |
451 | printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); | 451 | printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); |
452 | slot = 0; | 452 | slot = 0; |
453 | } | 453 | } |
454 | return slot; | 454 | return slot; |
455 | } | 455 | } |
456 | 456 | ||
457 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | 457 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) |
458 | { | 458 | { |
459 | 459 | ||
460 | ktime_t kt; | 460 | ktime_t kt; |
461 | long itc_diff; | 461 | long itc_diff; |
462 | unsigned long vcpu_now_itc; | 462 | unsigned long vcpu_now_itc; |
463 | unsigned long expires; | 463 | unsigned long expires; |
464 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | 464 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; |
465 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | 465 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; |
466 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 466 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
467 | 467 | ||
468 | if (irqchip_in_kernel(vcpu->kvm)) { | 468 | if (irqchip_in_kernel(vcpu->kvm)) { |
469 | 469 | ||
470 | vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; | 470 | vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; |
471 | 471 | ||
472 | if (time_after(vcpu_now_itc, vpd->itm)) { | 472 | if (time_after(vcpu_now_itc, vpd->itm)) { |
473 | vcpu->arch.timer_check = 1; | 473 | vcpu->arch.timer_check = 1; |
474 | return 1; | 474 | return 1; |
475 | } | 475 | } |
476 | itc_diff = vpd->itm - vcpu_now_itc; | 476 | itc_diff = vpd->itm - vcpu_now_itc; |
477 | if (itc_diff < 0) | 477 | if (itc_diff < 0) |
478 | itc_diff = -itc_diff; | 478 | itc_diff = -itc_diff; |
479 | 479 | ||
480 | expires = div64_u64(itc_diff, cyc_per_usec); | 480 | expires = div64_u64(itc_diff, cyc_per_usec); |
481 | kt = ktime_set(0, 1000 * expires); | 481 | kt = ktime_set(0, 1000 * expires); |
482 | 482 | ||
483 | vcpu->arch.ht_active = 1; | 483 | vcpu->arch.ht_active = 1; |
484 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 484 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); |
485 | 485 | ||
486 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 486 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
487 | kvm_vcpu_block(vcpu); | 487 | kvm_vcpu_block(vcpu); |
488 | hrtimer_cancel(p_ht); | 488 | hrtimer_cancel(p_ht); |
489 | vcpu->arch.ht_active = 0; | 489 | vcpu->arch.ht_active = 0; |
490 | 490 | ||
491 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || | 491 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || |
492 | kvm_cpu_has_pending_timer(vcpu)) | 492 | kvm_cpu_has_pending_timer(vcpu)) |
493 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | 493 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) |
494 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 494 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
495 | 495 | ||
496 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | 496 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
497 | return -EINTR; | 497 | return -EINTR; |
498 | return 1; | 498 | return 1; |
499 | } else { | 499 | } else { |
500 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); | 500 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); |
501 | return 0; | 501 | return 0; |
502 | } | 502 | } |
503 | } | 503 | } |
504 | 504 | ||
505 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, | 505 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, |
506 | struct kvm_run *kvm_run) | 506 | struct kvm_run *kvm_run) |
507 | { | 507 | { |
508 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 508 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
509 | return 0; | 509 | return 0; |
510 | } | 510 | } |
511 | 511 | ||
512 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | 512 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, |
513 | struct kvm_run *kvm_run) | 513 | struct kvm_run *kvm_run) |
514 | { | 514 | { |
515 | return 1; | 515 | return 1; |
516 | } | 516 | } |
517 | 517 | ||
518 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, | 518 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, |
519 | struct kvm_run *kvm_run) | 519 | struct kvm_run *kvm_run) |
520 | { | 520 | { |
521 | printk("VMM: %s", vcpu->arch.log_buf); | 521 | printk("VMM: %s", vcpu->arch.log_buf); |
522 | return 1; | 522 | return 1; |
523 | } | 523 | } |
524 | 524 | ||
525 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | 525 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, |
526 | struct kvm_run *kvm_run) = { | 526 | struct kvm_run *kvm_run) = { |
527 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | 527 | [EXIT_REASON_VM_PANIC] = handle_vm_error, |
528 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, | 528 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, |
529 | [EXIT_REASON_PAL_CALL] = handle_pal_call, | 529 | [EXIT_REASON_PAL_CALL] = handle_pal_call, |
530 | [EXIT_REASON_SAL_CALL] = handle_sal_call, | 530 | [EXIT_REASON_SAL_CALL] = handle_sal_call, |
531 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, | 531 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, |
532 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, | 532 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, |
533 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 533 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
534 | [EXIT_REASON_IPI] = handle_ipi, | 534 | [EXIT_REASON_IPI] = handle_ipi, |
535 | [EXIT_REASON_PTC_G] = handle_global_purge, | 535 | [EXIT_REASON_PTC_G] = handle_global_purge, |
536 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, | 536 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, |
537 | 537 | ||
538 | }; | 538 | }; |
539 | 539 | ||
540 | static const int kvm_vti_max_exit_handlers = | 540 | static const int kvm_vti_max_exit_handlers = |
541 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | 541 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); |
542 | 542 | ||
543 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) | 543 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) |
544 | { | 544 | { |
545 | struct exit_ctl_data *p_exit_data; | 545 | struct exit_ctl_data *p_exit_data; |
546 | 546 | ||
547 | p_exit_data = kvm_get_exit_data(vcpu); | 547 | p_exit_data = kvm_get_exit_data(vcpu); |
548 | return p_exit_data->exit_reason; | 548 | return p_exit_data->exit_reason; |
549 | } | 549 | } |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * The guest has exited. See if we can fix it or if we need userspace | 552 | * The guest has exited. See if we can fix it or if we need userspace |
553 | * assistance. | 553 | * assistance. |
554 | */ | 554 | */ |
555 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 555 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
556 | { | 556 | { |
557 | u32 exit_reason = kvm_get_exit_reason(vcpu); | 557 | u32 exit_reason = kvm_get_exit_reason(vcpu); |
558 | vcpu->arch.last_exit = exit_reason; | 558 | vcpu->arch.last_exit = exit_reason; |
559 | 559 | ||
560 | if (exit_reason < kvm_vti_max_exit_handlers | 560 | if (exit_reason < kvm_vti_max_exit_handlers |
561 | && kvm_vti_exit_handlers[exit_reason]) | 561 | && kvm_vti_exit_handlers[exit_reason]) |
562 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); | 562 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); |
563 | else { | 563 | else { |
564 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 564 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
565 | kvm_run->hw.hardware_exit_reason = exit_reason; | 565 | kvm_run->hw.hardware_exit_reason = exit_reason; |
566 | } | 566 | } |
567 | return 0; | 567 | return 0; |
568 | } | 568 | } |
569 | 569 | ||
570 | static inline void vti_set_rr6(unsigned long rr6) | 570 | static inline void vti_set_rr6(unsigned long rr6) |
571 | { | 571 | { |
572 | ia64_set_rr(RR6, rr6); | 572 | ia64_set_rr(RR6, rr6); |
573 | ia64_srlz_i(); | 573 | ia64_srlz_i(); |
574 | } | 574 | } |
575 | 575 | ||
576 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) | 576 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) |
577 | { | 577 | { |
578 | unsigned long pte; | 578 | unsigned long pte; |
579 | struct kvm *kvm = vcpu->kvm; | 579 | struct kvm *kvm = vcpu->kvm; |
580 | int r; | 580 | int r; |
581 | 581 | ||
582 | /*Insert a pair of tr to map vmm*/ | 582 | /*Insert a pair of tr to map vmm*/ |
583 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | 583 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); |
584 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 584 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
585 | if (r < 0) | 585 | if (r < 0) |
586 | goto out; | 586 | goto out; |
587 | vcpu->arch.vmm_tr_slot = r; | 587 | vcpu->arch.vmm_tr_slot = r; |
588 | /*Insert a pairt of tr to map data of vm*/ | 588 | /*Insert a pairt of tr to map data of vm*/ |
589 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); | 589 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); |
590 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, | 590 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, |
591 | pte, KVM_VM_DATA_SHIFT); | 591 | pte, KVM_VM_DATA_SHIFT); |
592 | if (r < 0) | 592 | if (r < 0) |
593 | goto out; | 593 | goto out; |
594 | vcpu->arch.vm_tr_slot = r; | 594 | vcpu->arch.vm_tr_slot = r; |
595 | 595 | ||
596 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | 596 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) |
597 | if (kvm->arch.is_sn2) { | 597 | if (kvm->arch.is_sn2) { |
598 | r = kvm_sn2_setup_mappings(vcpu); | 598 | r = kvm_sn2_setup_mappings(vcpu); |
599 | if (r < 0) | 599 | if (r < 0) |
600 | goto out; | 600 | goto out; |
601 | } | 601 | } |
602 | #endif | 602 | #endif |
603 | 603 | ||
604 | r = 0; | 604 | r = 0; |
605 | out: | 605 | out: |
606 | return r; | 606 | return r; |
607 | } | 607 | } |
608 | 608 | ||
609 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) | 609 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) |
610 | { | 610 | { |
611 | struct kvm *kvm = vcpu->kvm; | 611 | struct kvm *kvm = vcpu->kvm; |
612 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); | 612 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); |
613 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); | 613 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); |
614 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | 614 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) |
615 | if (kvm->arch.is_sn2) | 615 | if (kvm->arch.is_sn2) |
616 | ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); | 616 | ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); |
617 | #endif | 617 | #endif |
618 | } | 618 | } |
619 | 619 | ||
620 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | 620 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) |
621 | { | 621 | { |
622 | unsigned long psr; | 622 | unsigned long psr; |
623 | int r; | 623 | int r; |
624 | int cpu = smp_processor_id(); | 624 | int cpu = smp_processor_id(); |
625 | 625 | ||
626 | if (vcpu->arch.last_run_cpu != cpu || | 626 | if (vcpu->arch.last_run_cpu != cpu || |
627 | per_cpu(last_vcpu, cpu) != vcpu) { | 627 | per_cpu(last_vcpu, cpu) != vcpu) { |
628 | per_cpu(last_vcpu, cpu) = vcpu; | 628 | per_cpu(last_vcpu, cpu) = vcpu; |
629 | vcpu->arch.last_run_cpu = cpu; | 629 | vcpu->arch.last_run_cpu = cpu; |
630 | kvm_flush_tlb_all(); | 630 | kvm_flush_tlb_all(); |
631 | } | 631 | } |
632 | 632 | ||
633 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); | 633 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); |
634 | vti_set_rr6(vcpu->arch.vmm_rr); | 634 | vti_set_rr6(vcpu->arch.vmm_rr); |
635 | local_irq_save(psr); | 635 | local_irq_save(psr); |
636 | r = kvm_insert_vmm_mapping(vcpu); | 636 | r = kvm_insert_vmm_mapping(vcpu); |
637 | local_irq_restore(psr); | 637 | local_irq_restore(psr); |
638 | return r; | 638 | return r; |
639 | } | 639 | } |
640 | 640 | ||
641 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | 641 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) |
642 | { | 642 | { |
643 | kvm_purge_vmm_mapping(vcpu); | 643 | kvm_purge_vmm_mapping(vcpu); |
644 | vti_set_rr6(vcpu->arch.host_rr6); | 644 | vti_set_rr6(vcpu->arch.host_rr6); |
645 | } | 645 | } |
646 | 646 | ||
647 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 647 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
648 | { | 648 | { |
649 | union context *host_ctx, *guest_ctx; | 649 | union context *host_ctx, *guest_ctx; |
650 | int r; | 650 | int r; |
651 | 651 | ||
652 | /* | 652 | /* |
653 | * down_read() may sleep and return with interrupts enabled | 653 | * down_read() may sleep and return with interrupts enabled |
654 | */ | 654 | */ |
655 | down_read(&vcpu->kvm->slots_lock); | 655 | down_read(&vcpu->kvm->slots_lock); |
656 | 656 | ||
657 | again: | 657 | again: |
658 | if (signal_pending(current)) { | 658 | if (signal_pending(current)) { |
659 | r = -EINTR; | 659 | r = -EINTR; |
660 | kvm_run->exit_reason = KVM_EXIT_INTR; | 660 | kvm_run->exit_reason = KVM_EXIT_INTR; |
661 | goto out; | 661 | goto out; |
662 | } | 662 | } |
663 | 663 | ||
664 | preempt_disable(); | 664 | preempt_disable(); |
665 | local_irq_disable(); | 665 | local_irq_disable(); |
666 | 666 | ||
667 | /*Get host and guest context with guest address space.*/ | 667 | /*Get host and guest context with guest address space.*/ |
668 | host_ctx = kvm_get_host_context(vcpu); | 668 | host_ctx = kvm_get_host_context(vcpu); |
669 | guest_ctx = kvm_get_guest_context(vcpu); | 669 | guest_ctx = kvm_get_guest_context(vcpu); |
670 | 670 | ||
671 | clear_bit(KVM_REQ_KICK, &vcpu->requests); | 671 | clear_bit(KVM_REQ_KICK, &vcpu->requests); |
672 | 672 | ||
673 | r = kvm_vcpu_pre_transition(vcpu); | 673 | r = kvm_vcpu_pre_transition(vcpu); |
674 | if (r < 0) | 674 | if (r < 0) |
675 | goto vcpu_run_fail; | 675 | goto vcpu_run_fail; |
676 | 676 | ||
677 | up_read(&vcpu->kvm->slots_lock); | 677 | up_read(&vcpu->kvm->slots_lock); |
678 | kvm_guest_enter(); | 678 | kvm_guest_enter(); |
679 | 679 | ||
680 | /* | 680 | /* |
681 | * Transition to the guest | 681 | * Transition to the guest |
682 | */ | 682 | */ |
683 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | 683 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); |
684 | 684 | ||
685 | kvm_vcpu_post_transition(vcpu); | 685 | kvm_vcpu_post_transition(vcpu); |
686 | 686 | ||
687 | vcpu->arch.launched = 1; | 687 | vcpu->arch.launched = 1; |
688 | set_bit(KVM_REQ_KICK, &vcpu->requests); | 688 | set_bit(KVM_REQ_KICK, &vcpu->requests); |
689 | local_irq_enable(); | 689 | local_irq_enable(); |
690 | 690 | ||
691 | /* | 691 | /* |
692 | * We must have an instruction between local_irq_enable() and | 692 | * We must have an instruction between local_irq_enable() and |
693 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | 693 | * kvm_guest_exit(), so the timer interrupt isn't delayed by |
694 | * the interrupt shadow. The stat.exits increment will do nicely. | 694 | * the interrupt shadow. The stat.exits increment will do nicely. |
695 | * But we need to prevent reordering, hence this barrier(): | 695 | * But we need to prevent reordering, hence this barrier(): |
696 | */ | 696 | */ |
697 | barrier(); | 697 | barrier(); |
698 | kvm_guest_exit(); | 698 | kvm_guest_exit(); |
699 | preempt_enable(); | 699 | preempt_enable(); |
700 | 700 | ||
701 | down_read(&vcpu->kvm->slots_lock); | 701 | down_read(&vcpu->kvm->slots_lock); |
702 | 702 | ||
703 | r = kvm_handle_exit(kvm_run, vcpu); | 703 | r = kvm_handle_exit(kvm_run, vcpu); |
704 | 704 | ||
705 | if (r > 0) { | 705 | if (r > 0) { |
706 | if (!need_resched()) | 706 | if (!need_resched()) |
707 | goto again; | 707 | goto again; |
708 | } | 708 | } |
709 | 709 | ||
710 | out: | 710 | out: |
711 | up_read(&vcpu->kvm->slots_lock); | 711 | up_read(&vcpu->kvm->slots_lock); |
712 | if (r > 0) { | 712 | if (r > 0) { |
713 | kvm_resched(vcpu); | 713 | kvm_resched(vcpu); |
714 | down_read(&vcpu->kvm->slots_lock); | 714 | down_read(&vcpu->kvm->slots_lock); |
715 | goto again; | 715 | goto again; |
716 | } | 716 | } |
717 | 717 | ||
718 | return r; | 718 | return r; |
719 | 719 | ||
720 | vcpu_run_fail: | 720 | vcpu_run_fail: |
721 | local_irq_enable(); | 721 | local_irq_enable(); |
722 | preempt_enable(); | 722 | preempt_enable(); |
723 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 723 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
724 | goto out; | 724 | goto out; |
725 | } | 725 | } |
726 | 726 | ||
727 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | 727 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) |
728 | { | 728 | { |
729 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); | 729 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); |
730 | 730 | ||
731 | if (!vcpu->mmio_is_write) | 731 | if (!vcpu->mmio_is_write) |
732 | memcpy(&p->data, vcpu->mmio_data, 8); | 732 | memcpy(&p->data, vcpu->mmio_data, 8); |
733 | p->state = STATE_IORESP_READY; | 733 | p->state = STATE_IORESP_READY; |
734 | } | 734 | } |
735 | 735 | ||
736 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 736 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
737 | { | 737 | { |
738 | int r; | 738 | int r; |
739 | sigset_t sigsaved; | 739 | sigset_t sigsaved; |
740 | 740 | ||
741 | vcpu_load(vcpu); | 741 | vcpu_load(vcpu); |
742 | 742 | ||
743 | if (vcpu->sigset_active) | 743 | if (vcpu->sigset_active) |
744 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 744 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
745 | 745 | ||
746 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 746 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
747 | kvm_vcpu_block(vcpu); | 747 | kvm_vcpu_block(vcpu); |
748 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 748 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
749 | r = -EAGAIN; | 749 | r = -EAGAIN; |
750 | goto out; | 750 | goto out; |
751 | } | 751 | } |
752 | 752 | ||
753 | if (vcpu->mmio_needed) { | 753 | if (vcpu->mmio_needed) { |
754 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | 754 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); |
755 | kvm_set_mmio_data(vcpu); | 755 | kvm_set_mmio_data(vcpu); |
756 | vcpu->mmio_read_completed = 1; | 756 | vcpu->mmio_read_completed = 1; |
757 | vcpu->mmio_needed = 0; | 757 | vcpu->mmio_needed = 0; |
758 | } | 758 | } |
759 | r = __vcpu_run(vcpu, kvm_run); | 759 | r = __vcpu_run(vcpu, kvm_run); |
760 | out: | 760 | out: |
761 | if (vcpu->sigset_active) | 761 | if (vcpu->sigset_active) |
762 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 762 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
763 | 763 | ||
764 | vcpu_put(vcpu); | 764 | vcpu_put(vcpu); |
765 | return r; | 765 | return r; |
766 | } | 766 | } |
767 | 767 | ||
768 | static struct kvm *kvm_alloc_kvm(void) | 768 | static struct kvm *kvm_alloc_kvm(void) |
769 | { | 769 | { |
770 | 770 | ||
771 | struct kvm *kvm; | 771 | struct kvm *kvm; |
772 | uint64_t vm_base; | 772 | uint64_t vm_base; |
773 | 773 | ||
774 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); | 774 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); |
775 | 775 | ||
776 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | 776 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); |
777 | 777 | ||
778 | if (!vm_base) | 778 | if (!vm_base) |
779 | return ERR_PTR(-ENOMEM); | 779 | return ERR_PTR(-ENOMEM); |
780 | 780 | ||
781 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | 781 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
782 | kvm = (struct kvm *)(vm_base + | 782 | kvm = (struct kvm *)(vm_base + |
783 | offsetof(struct kvm_vm_data, kvm_vm_struct)); | 783 | offsetof(struct kvm_vm_data, kvm_vm_struct)); |
784 | kvm->arch.vm_base = vm_base; | 784 | kvm->arch.vm_base = vm_base; |
785 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); | 785 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); |
786 | 786 | ||
787 | return kvm; | 787 | return kvm; |
788 | } | 788 | } |
789 | 789 | ||
790 | struct kvm_io_range { | 790 | struct kvm_io_range { |
791 | unsigned long start; | 791 | unsigned long start; |
792 | unsigned long size; | 792 | unsigned long size; |
793 | unsigned long type; | 793 | unsigned long type; |
794 | }; | 794 | }; |
795 | 795 | ||
796 | static const struct kvm_io_range io_ranges[] = { | 796 | static const struct kvm_io_range io_ranges[] = { |
797 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, | 797 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, |
798 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, | 798 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, |
799 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, | 799 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, |
800 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, | 800 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, |
801 | {PIB_START, PIB_SIZE, GPFN_PIB}, | 801 | {PIB_START, PIB_SIZE, GPFN_PIB}, |
802 | }; | 802 | }; |
803 | 803 | ||
804 | static void kvm_build_io_pmt(struct kvm *kvm) | 804 | static void kvm_build_io_pmt(struct kvm *kvm) |
805 | { | 805 | { |
806 | unsigned long i, j; | 806 | unsigned long i, j; |
807 | 807 | ||
808 | /* Mark I/O ranges */ | 808 | /* Mark I/O ranges */ |
809 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); | 809 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); |
810 | i++) { | 810 | i++) { |
811 | for (j = io_ranges[i].start; | 811 | for (j = io_ranges[i].start; |
812 | j < io_ranges[i].start + io_ranges[i].size; | 812 | j < io_ranges[i].start + io_ranges[i].size; |
813 | j += PAGE_SIZE) | 813 | j += PAGE_SIZE) |
814 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, | 814 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, |
815 | io_ranges[i].type, 0); | 815 | io_ranges[i].type, 0); |
816 | } | 816 | } |
817 | 817 | ||
818 | } | 818 | } |
819 | 819 | ||
820 | /*Use unused rids to virtualize guest rid.*/ | 820 | /*Use unused rids to virtualize guest rid.*/ |
821 | #define GUEST_PHYSICAL_RR0 0x1739 | 821 | #define GUEST_PHYSICAL_RR0 0x1739 |
822 | #define GUEST_PHYSICAL_RR4 0x2739 | 822 | #define GUEST_PHYSICAL_RR4 0x2739 |
823 | #define VMM_INIT_RR 0x1660 | 823 | #define VMM_INIT_RR 0x1660 |
824 | 824 | ||
825 | static void kvm_init_vm(struct kvm *kvm) | 825 | static void kvm_init_vm(struct kvm *kvm) |
826 | { | 826 | { |
827 | BUG_ON(!kvm); | 827 | BUG_ON(!kvm); |
828 | 828 | ||
829 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | 829 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; |
830 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | 830 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; |
831 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | 831 | kvm->arch.vmm_init_rr = VMM_INIT_RR; |
832 | 832 | ||
833 | /* | 833 | /* |
834 | *Fill P2M entries for MMIO/IO ranges | 834 | *Fill P2M entries for MMIO/IO ranges |
835 | */ | 835 | */ |
836 | kvm_build_io_pmt(kvm); | 836 | kvm_build_io_pmt(kvm); |
837 | 837 | ||
838 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 838 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
839 | 839 | ||
840 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | 840 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ |
841 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | 841 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); |
842 | } | 842 | } |
843 | 843 | ||
844 | struct kvm *kvm_arch_create_vm(void) | 844 | struct kvm *kvm_arch_create_vm(void) |
845 | { | 845 | { |
846 | struct kvm *kvm = kvm_alloc_kvm(); | 846 | struct kvm *kvm = kvm_alloc_kvm(); |
847 | 847 | ||
848 | if (IS_ERR(kvm)) | 848 | if (IS_ERR(kvm)) |
849 | return ERR_PTR(-ENOMEM); | 849 | return ERR_PTR(-ENOMEM); |
850 | 850 | ||
851 | kvm->arch.is_sn2 = ia64_platform_is("sn2"); | 851 | kvm->arch.is_sn2 = ia64_platform_is("sn2"); |
852 | 852 | ||
853 | kvm_init_vm(kvm); | 853 | kvm_init_vm(kvm); |
854 | 854 | ||
855 | kvm->arch.online_vcpus = 0; | ||
856 | |||
857 | return kvm; | 855 | return kvm; |
858 | 856 | ||
859 | } | 857 | } |
860 | 858 | ||
861 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | 859 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, |
862 | struct kvm_irqchip *chip) | 860 | struct kvm_irqchip *chip) |
863 | { | 861 | { |
864 | int r; | 862 | int r; |
865 | 863 | ||
866 | r = 0; | 864 | r = 0; |
867 | switch (chip->chip_id) { | 865 | switch (chip->chip_id) { |
868 | case KVM_IRQCHIP_IOAPIC: | 866 | case KVM_IRQCHIP_IOAPIC: |
869 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | 867 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), |
870 | sizeof(struct kvm_ioapic_state)); | 868 | sizeof(struct kvm_ioapic_state)); |
871 | break; | 869 | break; |
872 | default: | 870 | default: |
873 | r = -EINVAL; | 871 | r = -EINVAL; |
874 | break; | 872 | break; |
875 | } | 873 | } |
876 | return r; | 874 | return r; |
877 | } | 875 | } |
878 | 876 | ||
879 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | 877 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) |
880 | { | 878 | { |
881 | int r; | 879 | int r; |
882 | 880 | ||
883 | r = 0; | 881 | r = 0; |
884 | switch (chip->chip_id) { | 882 | switch (chip->chip_id) { |
885 | case KVM_IRQCHIP_IOAPIC: | 883 | case KVM_IRQCHIP_IOAPIC: |
886 | memcpy(ioapic_irqchip(kvm), | 884 | memcpy(ioapic_irqchip(kvm), |
887 | &chip->chip.ioapic, | 885 | &chip->chip.ioapic, |
888 | sizeof(struct kvm_ioapic_state)); | 886 | sizeof(struct kvm_ioapic_state)); |
889 | break; | 887 | break; |
890 | default: | 888 | default: |
891 | r = -EINVAL; | 889 | r = -EINVAL; |
892 | break; | 890 | break; |
893 | } | 891 | } |
894 | return r; | 892 | return r; |
895 | } | 893 | } |
896 | 894 | ||
897 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x | 895 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x |
898 | 896 | ||
899 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 897 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
900 | { | 898 | { |
901 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 899 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
902 | int i; | 900 | int i; |
903 | 901 | ||
904 | vcpu_load(vcpu); | 902 | vcpu_load(vcpu); |
905 | 903 | ||
906 | for (i = 0; i < 16; i++) { | 904 | for (i = 0; i < 16; i++) { |
907 | vpd->vgr[i] = regs->vpd.vgr[i]; | 905 | vpd->vgr[i] = regs->vpd.vgr[i]; |
908 | vpd->vbgr[i] = regs->vpd.vbgr[i]; | 906 | vpd->vbgr[i] = regs->vpd.vbgr[i]; |
909 | } | 907 | } |
910 | for (i = 0; i < 128; i++) | 908 | for (i = 0; i < 128; i++) |
911 | vpd->vcr[i] = regs->vpd.vcr[i]; | 909 | vpd->vcr[i] = regs->vpd.vcr[i]; |
912 | vpd->vhpi = regs->vpd.vhpi; | 910 | vpd->vhpi = regs->vpd.vhpi; |
913 | vpd->vnat = regs->vpd.vnat; | 911 | vpd->vnat = regs->vpd.vnat; |
914 | vpd->vbnat = regs->vpd.vbnat; | 912 | vpd->vbnat = regs->vpd.vbnat; |
915 | vpd->vpsr = regs->vpd.vpsr; | 913 | vpd->vpsr = regs->vpd.vpsr; |
916 | 914 | ||
917 | vpd->vpr = regs->vpd.vpr; | 915 | vpd->vpr = regs->vpd.vpr; |
918 | 916 | ||
919 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); | 917 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); |
920 | 918 | ||
921 | RESTORE_REGS(mp_state); | 919 | RESTORE_REGS(mp_state); |
922 | RESTORE_REGS(vmm_rr); | 920 | RESTORE_REGS(vmm_rr); |
923 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); | 921 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); |
924 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); | 922 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); |
925 | RESTORE_REGS(itr_regions); | 923 | RESTORE_REGS(itr_regions); |
926 | RESTORE_REGS(dtr_regions); | 924 | RESTORE_REGS(dtr_regions); |
927 | RESTORE_REGS(tc_regions); | 925 | RESTORE_REGS(tc_regions); |
928 | RESTORE_REGS(irq_check); | 926 | RESTORE_REGS(irq_check); |
929 | RESTORE_REGS(itc_check); | 927 | RESTORE_REGS(itc_check); |
930 | RESTORE_REGS(timer_check); | 928 | RESTORE_REGS(timer_check); |
931 | RESTORE_REGS(timer_pending); | 929 | RESTORE_REGS(timer_pending); |
932 | RESTORE_REGS(last_itc); | 930 | RESTORE_REGS(last_itc); |
933 | for (i = 0; i < 8; i++) { | 931 | for (i = 0; i < 8; i++) { |
934 | vcpu->arch.vrr[i] = regs->vrr[i]; | 932 | vcpu->arch.vrr[i] = regs->vrr[i]; |
935 | vcpu->arch.ibr[i] = regs->ibr[i]; | 933 | vcpu->arch.ibr[i] = regs->ibr[i]; |
936 | vcpu->arch.dbr[i] = regs->dbr[i]; | 934 | vcpu->arch.dbr[i] = regs->dbr[i]; |
937 | } | 935 | } |
938 | for (i = 0; i < 4; i++) | 936 | for (i = 0; i < 4; i++) |
939 | vcpu->arch.insvc[i] = regs->insvc[i]; | 937 | vcpu->arch.insvc[i] = regs->insvc[i]; |
940 | RESTORE_REGS(xtp); | 938 | RESTORE_REGS(xtp); |
941 | RESTORE_REGS(metaphysical_rr0); | 939 | RESTORE_REGS(metaphysical_rr0); |
942 | RESTORE_REGS(metaphysical_rr4); | 940 | RESTORE_REGS(metaphysical_rr4); |
943 | RESTORE_REGS(metaphysical_saved_rr0); | 941 | RESTORE_REGS(metaphysical_saved_rr0); |
944 | RESTORE_REGS(metaphysical_saved_rr4); | 942 | RESTORE_REGS(metaphysical_saved_rr4); |
945 | RESTORE_REGS(fp_psr); | 943 | RESTORE_REGS(fp_psr); |
946 | RESTORE_REGS(saved_gp); | 944 | RESTORE_REGS(saved_gp); |
947 | 945 | ||
948 | vcpu->arch.irq_new_pending = 1; | 946 | vcpu->arch.irq_new_pending = 1; |
949 | vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); | 947 | vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); |
950 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | 948 | set_bit(KVM_REQ_RESUME, &vcpu->requests); |
951 | 949 | ||
952 | vcpu_put(vcpu); | 950 | vcpu_put(vcpu); |
953 | 951 | ||
954 | return 0; | 952 | return 0; |
955 | } | 953 | } |
956 | 954 | ||
957 | long kvm_arch_vm_ioctl(struct file *filp, | 955 | long kvm_arch_vm_ioctl(struct file *filp, |
958 | unsigned int ioctl, unsigned long arg) | 956 | unsigned int ioctl, unsigned long arg) |
959 | { | 957 | { |
960 | struct kvm *kvm = filp->private_data; | 958 | struct kvm *kvm = filp->private_data; |
961 | void __user *argp = (void __user *)arg; | 959 | void __user *argp = (void __user *)arg; |
962 | int r = -EINVAL; | 960 | int r = -EINVAL; |
963 | 961 | ||
964 | switch (ioctl) { | 962 | switch (ioctl) { |
965 | case KVM_SET_MEMORY_REGION: { | 963 | case KVM_SET_MEMORY_REGION: { |
966 | struct kvm_memory_region kvm_mem; | 964 | struct kvm_memory_region kvm_mem; |
967 | struct kvm_userspace_memory_region kvm_userspace_mem; | 965 | struct kvm_userspace_memory_region kvm_userspace_mem; |
968 | 966 | ||
969 | r = -EFAULT; | 967 | r = -EFAULT; |
970 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | 968 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) |
971 | goto out; | 969 | goto out; |
972 | kvm_userspace_mem.slot = kvm_mem.slot; | 970 | kvm_userspace_mem.slot = kvm_mem.slot; |
973 | kvm_userspace_mem.flags = kvm_mem.flags; | 971 | kvm_userspace_mem.flags = kvm_mem.flags; |
974 | kvm_userspace_mem.guest_phys_addr = | 972 | kvm_userspace_mem.guest_phys_addr = |
975 | kvm_mem.guest_phys_addr; | 973 | kvm_mem.guest_phys_addr; |
976 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | 974 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; |
977 | r = kvm_vm_ioctl_set_memory_region(kvm, | 975 | r = kvm_vm_ioctl_set_memory_region(kvm, |
978 | &kvm_userspace_mem, 0); | 976 | &kvm_userspace_mem, 0); |
979 | if (r) | 977 | if (r) |
980 | goto out; | 978 | goto out; |
981 | break; | 979 | break; |
982 | } | 980 | } |
983 | case KVM_CREATE_IRQCHIP: | 981 | case KVM_CREATE_IRQCHIP: |
984 | r = -EFAULT; | 982 | r = -EFAULT; |
985 | r = kvm_ioapic_init(kvm); | 983 | r = kvm_ioapic_init(kvm); |
986 | if (r) | 984 | if (r) |
987 | goto out; | 985 | goto out; |
988 | r = kvm_setup_default_irq_routing(kvm); | 986 | r = kvm_setup_default_irq_routing(kvm); |
989 | if (r) { | 987 | if (r) { |
990 | kfree(kvm->arch.vioapic); | 988 | kfree(kvm->arch.vioapic); |
991 | goto out; | 989 | goto out; |
992 | } | 990 | } |
993 | break; | 991 | break; |
994 | case KVM_IRQ_LINE_STATUS: | 992 | case KVM_IRQ_LINE_STATUS: |
995 | case KVM_IRQ_LINE: { | 993 | case KVM_IRQ_LINE: { |
996 | struct kvm_irq_level irq_event; | 994 | struct kvm_irq_level irq_event; |
997 | 995 | ||
998 | r = -EFAULT; | 996 | r = -EFAULT; |
999 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 997 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) |
1000 | goto out; | 998 | goto out; |
1001 | if (irqchip_in_kernel(kvm)) { | 999 | if (irqchip_in_kernel(kvm)) { |
1002 | __s32 status; | 1000 | __s32 status; |
1003 | mutex_lock(&kvm->irq_lock); | 1001 | mutex_lock(&kvm->irq_lock); |
1004 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 1002 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
1005 | irq_event.irq, irq_event.level); | 1003 | irq_event.irq, irq_event.level); |
1006 | mutex_unlock(&kvm->irq_lock); | 1004 | mutex_unlock(&kvm->irq_lock); |
1007 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 1005 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
1008 | irq_event.status = status; | 1006 | irq_event.status = status; |
1009 | if (copy_to_user(argp, &irq_event, | 1007 | if (copy_to_user(argp, &irq_event, |
1010 | sizeof irq_event)) | 1008 | sizeof irq_event)) |
1011 | goto out; | 1009 | goto out; |
1012 | } | 1010 | } |
1013 | r = 0; | 1011 | r = 0; |
1014 | } | 1012 | } |
1015 | break; | 1013 | break; |
1016 | } | 1014 | } |
1017 | case KVM_GET_IRQCHIP: { | 1015 | case KVM_GET_IRQCHIP: { |
1018 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | 1016 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ |
1019 | struct kvm_irqchip chip; | 1017 | struct kvm_irqchip chip; |
1020 | 1018 | ||
1021 | r = -EFAULT; | 1019 | r = -EFAULT; |
1022 | if (copy_from_user(&chip, argp, sizeof chip)) | 1020 | if (copy_from_user(&chip, argp, sizeof chip)) |
1023 | goto out; | 1021 | goto out; |
1024 | r = -ENXIO; | 1022 | r = -ENXIO; |
1025 | if (!irqchip_in_kernel(kvm)) | 1023 | if (!irqchip_in_kernel(kvm)) |
1026 | goto out; | 1024 | goto out; |
1027 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | 1025 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); |
1028 | if (r) | 1026 | if (r) |
1029 | goto out; | 1027 | goto out; |
1030 | r = -EFAULT; | 1028 | r = -EFAULT; |
1031 | if (copy_to_user(argp, &chip, sizeof chip)) | 1029 | if (copy_to_user(argp, &chip, sizeof chip)) |
1032 | goto out; | 1030 | goto out; |
1033 | r = 0; | 1031 | r = 0; |
1034 | break; | 1032 | break; |
1035 | } | 1033 | } |
1036 | case KVM_SET_IRQCHIP: { | 1034 | case KVM_SET_IRQCHIP: { |
1037 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | 1035 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ |
1038 | struct kvm_irqchip chip; | 1036 | struct kvm_irqchip chip; |
1039 | 1037 | ||
1040 | r = -EFAULT; | 1038 | r = -EFAULT; |
1041 | if (copy_from_user(&chip, argp, sizeof chip)) | 1039 | if (copy_from_user(&chip, argp, sizeof chip)) |
1042 | goto out; | 1040 | goto out; |
1043 | r = -ENXIO; | 1041 | r = -ENXIO; |
1044 | if (!irqchip_in_kernel(kvm)) | 1042 | if (!irqchip_in_kernel(kvm)) |
1045 | goto out; | 1043 | goto out; |
1046 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | 1044 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); |
1047 | if (r) | 1045 | if (r) |
1048 | goto out; | 1046 | goto out; |
1049 | r = 0; | 1047 | r = 0; |
1050 | break; | 1048 | break; |
1051 | } | 1049 | } |
1052 | default: | 1050 | default: |
1053 | ; | 1051 | ; |
1054 | } | 1052 | } |
1055 | out: | 1053 | out: |
1056 | return r; | 1054 | return r; |
1057 | } | 1055 | } |
1058 | 1056 | ||
1059 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 1057 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1060 | struct kvm_sregs *sregs) | 1058 | struct kvm_sregs *sregs) |
1061 | { | 1059 | { |
1062 | return -EINVAL; | 1060 | return -EINVAL; |
1063 | } | 1061 | } |
1064 | 1062 | ||
1065 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 1063 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1066 | struct kvm_sregs *sregs) | 1064 | struct kvm_sregs *sregs) |
1067 | { | 1065 | { |
1068 | return -EINVAL; | 1066 | return -EINVAL; |
1069 | 1067 | ||
1070 | } | 1068 | } |
1071 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 1069 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1072 | struct kvm_translation *tr) | 1070 | struct kvm_translation *tr) |
1073 | { | 1071 | { |
1074 | 1072 | ||
1075 | return -EINVAL; | 1073 | return -EINVAL; |
1076 | } | 1074 | } |
1077 | 1075 | ||
1078 | static int kvm_alloc_vmm_area(void) | 1076 | static int kvm_alloc_vmm_area(void) |
1079 | { | 1077 | { |
1080 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { | 1078 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { |
1081 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, | 1079 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, |
1082 | get_order(KVM_VMM_SIZE)); | 1080 | get_order(KVM_VMM_SIZE)); |
1083 | if (!kvm_vmm_base) | 1081 | if (!kvm_vmm_base) |
1084 | return -ENOMEM; | 1082 | return -ENOMEM; |
1085 | 1083 | ||
1086 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | 1084 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); |
1087 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; | 1085 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; |
1088 | 1086 | ||
1089 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", | 1087 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", |
1090 | kvm_vmm_base, kvm_vm_buffer); | 1088 | kvm_vmm_base, kvm_vm_buffer); |
1091 | } | 1089 | } |
1092 | 1090 | ||
1093 | return 0; | 1091 | return 0; |
1094 | } | 1092 | } |
1095 | 1093 | ||
1096 | static void kvm_free_vmm_area(void) | 1094 | static void kvm_free_vmm_area(void) |
1097 | { | 1095 | { |
1098 | if (kvm_vmm_base) { | 1096 | if (kvm_vmm_base) { |
1099 | /*Zero this area before free to avoid bits leak!!*/ | 1097 | /*Zero this area before free to avoid bits leak!!*/ |
1100 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | 1098 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); |
1101 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); | 1099 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); |
1102 | kvm_vmm_base = 0; | 1100 | kvm_vmm_base = 0; |
1103 | kvm_vm_buffer = 0; | 1101 | kvm_vm_buffer = 0; |
1104 | kvm_vsa_base = 0; | 1102 | kvm_vsa_base = 0; |
1105 | } | 1103 | } |
1106 | } | 1104 | } |
1107 | 1105 | ||
1108 | static int vti_init_vpd(struct kvm_vcpu *vcpu) | 1106 | static int vti_init_vpd(struct kvm_vcpu *vcpu) |
1109 | { | 1107 | { |
1110 | int i; | 1108 | int i; |
1111 | union cpuid3_t cpuid3; | 1109 | union cpuid3_t cpuid3; |
1112 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 1110 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
1113 | 1111 | ||
1114 | if (IS_ERR(vpd)) | 1112 | if (IS_ERR(vpd)) |
1115 | return PTR_ERR(vpd); | 1113 | return PTR_ERR(vpd); |
1116 | 1114 | ||
1117 | /* CPUID init */ | 1115 | /* CPUID init */ |
1118 | for (i = 0; i < 5; i++) | 1116 | for (i = 0; i < 5; i++) |
1119 | vpd->vcpuid[i] = ia64_get_cpuid(i); | 1117 | vpd->vcpuid[i] = ia64_get_cpuid(i); |
1120 | 1118 | ||
1121 | /* Limit the CPUID number to 5 */ | 1119 | /* Limit the CPUID number to 5 */ |
1122 | cpuid3.value = vpd->vcpuid[3]; | 1120 | cpuid3.value = vpd->vcpuid[3]; |
1123 | cpuid3.number = 4; /* 5 - 1 */ | 1121 | cpuid3.number = 4; /* 5 - 1 */ |
1124 | vpd->vcpuid[3] = cpuid3.value; | 1122 | vpd->vcpuid[3] = cpuid3.value; |
1125 | 1123 | ||
1126 | /*Set vac and vdc fields*/ | 1124 | /*Set vac and vdc fields*/ |
1127 | vpd->vac.a_from_int_cr = 1; | 1125 | vpd->vac.a_from_int_cr = 1; |
1128 | vpd->vac.a_to_int_cr = 1; | 1126 | vpd->vac.a_to_int_cr = 1; |
1129 | vpd->vac.a_from_psr = 1; | 1127 | vpd->vac.a_from_psr = 1; |
1130 | vpd->vac.a_from_cpuid = 1; | 1128 | vpd->vac.a_from_cpuid = 1; |
1131 | vpd->vac.a_cover = 1; | 1129 | vpd->vac.a_cover = 1; |
1132 | vpd->vac.a_bsw = 1; | 1130 | vpd->vac.a_bsw = 1; |
1133 | vpd->vac.a_int = 1; | 1131 | vpd->vac.a_int = 1; |
1134 | vpd->vdc.d_vmsw = 1; | 1132 | vpd->vdc.d_vmsw = 1; |
1135 | 1133 | ||
1136 | /*Set virtual buffer*/ | 1134 | /*Set virtual buffer*/ |
1137 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; | 1135 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; |
1138 | 1136 | ||
1139 | return 0; | 1137 | return 0; |
1140 | } | 1138 | } |
1141 | 1139 | ||
1142 | static int vti_create_vp(struct kvm_vcpu *vcpu) | 1140 | static int vti_create_vp(struct kvm_vcpu *vcpu) |
1143 | { | 1141 | { |
1144 | long ret; | 1142 | long ret; |
1145 | struct vpd *vpd = vcpu->arch.vpd; | 1143 | struct vpd *vpd = vcpu->arch.vpd; |
1146 | unsigned long vmm_ivt; | 1144 | unsigned long vmm_ivt; |
1147 | 1145 | ||
1148 | vmm_ivt = kvm_vmm_info->vmm_ivt; | 1146 | vmm_ivt = kvm_vmm_info->vmm_ivt; |
1149 | 1147 | ||
1150 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); | 1148 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); |
1151 | 1149 | ||
1152 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); | 1150 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); |
1153 | 1151 | ||
1154 | if (ret) { | 1152 | if (ret) { |
1155 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); | 1153 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); |
1156 | return -EINVAL; | 1154 | return -EINVAL; |
1157 | } | 1155 | } |
1158 | return 0; | 1156 | return 0; |
1159 | } | 1157 | } |
1160 | 1158 | ||
1161 | static void init_ptce_info(struct kvm_vcpu *vcpu) | 1159 | static void init_ptce_info(struct kvm_vcpu *vcpu) |
1162 | { | 1160 | { |
1163 | ia64_ptce_info_t ptce = {0}; | 1161 | ia64_ptce_info_t ptce = {0}; |
1164 | 1162 | ||
1165 | ia64_get_ptce(&ptce); | 1163 | ia64_get_ptce(&ptce); |
1166 | vcpu->arch.ptce_base = ptce.base; | 1164 | vcpu->arch.ptce_base = ptce.base; |
1167 | vcpu->arch.ptce_count[0] = ptce.count[0]; | 1165 | vcpu->arch.ptce_count[0] = ptce.count[0]; |
1168 | vcpu->arch.ptce_count[1] = ptce.count[1]; | 1166 | vcpu->arch.ptce_count[1] = ptce.count[1]; |
1169 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; | 1167 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; |
1170 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; | 1168 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; |
1171 | } | 1169 | } |
1172 | 1170 | ||
1173 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) | 1171 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) |
1174 | { | 1172 | { |
1175 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | 1173 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; |
1176 | 1174 | ||
1177 | if (hrtimer_cancel(p_ht)) | 1175 | if (hrtimer_cancel(p_ht)) |
1178 | hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); | 1176 | hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); |
1179 | } | 1177 | } |
1180 | 1178 | ||
1181 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | 1179 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) |
1182 | { | 1180 | { |
1183 | struct kvm_vcpu *vcpu; | 1181 | struct kvm_vcpu *vcpu; |
1184 | wait_queue_head_t *q; | 1182 | wait_queue_head_t *q; |
1185 | 1183 | ||
1186 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | 1184 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); |
1187 | q = &vcpu->wq; | 1185 | q = &vcpu->wq; |
1188 | 1186 | ||
1189 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) | 1187 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) |
1190 | goto out; | 1188 | goto out; |
1191 | 1189 | ||
1192 | if (waitqueue_active(q)) | 1190 | if (waitqueue_active(q)) |
1193 | wake_up_interruptible(q); | 1191 | wake_up_interruptible(q); |
1194 | 1192 | ||
1195 | out: | 1193 | out: |
1196 | vcpu->arch.timer_fired = 1; | 1194 | vcpu->arch.timer_fired = 1; |
1197 | vcpu->arch.timer_check = 1; | 1195 | vcpu->arch.timer_check = 1; |
1198 | return HRTIMER_NORESTART; | 1196 | return HRTIMER_NORESTART; |
1199 | } | 1197 | } |
1200 | 1198 | ||
1201 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL | 1199 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL |
1202 | 1200 | ||
1203 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 1201 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
1204 | { | 1202 | { |
1205 | struct kvm_vcpu *v; | 1203 | struct kvm_vcpu *v; |
1206 | int r; | 1204 | int r; |
1207 | int i; | 1205 | int i; |
1208 | long itc_offset; | 1206 | long itc_offset; |
1209 | struct kvm *kvm = vcpu->kvm; | 1207 | struct kvm *kvm = vcpu->kvm; |
1210 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1208 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1211 | 1209 | ||
1212 | union context *p_ctx = &vcpu->arch.guest; | 1210 | union context *p_ctx = &vcpu->arch.guest; |
1213 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); | 1211 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); |
1214 | 1212 | ||
1215 | /*Init vcpu context for first run.*/ | 1213 | /*Init vcpu context for first run.*/ |
1216 | if (IS_ERR(vmm_vcpu)) | 1214 | if (IS_ERR(vmm_vcpu)) |
1217 | return PTR_ERR(vmm_vcpu); | 1215 | return PTR_ERR(vmm_vcpu); |
1218 | 1216 | ||
1219 | if (kvm_vcpu_is_bsp(vcpu)) { | 1217 | if (kvm_vcpu_is_bsp(vcpu)) { |
1220 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 1218 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
1221 | 1219 | ||
1222 | /*Set entry address for first run.*/ | 1220 | /*Set entry address for first run.*/ |
1223 | regs->cr_iip = PALE_RESET_ENTRY; | 1221 | regs->cr_iip = PALE_RESET_ENTRY; |
1224 | 1222 | ||
1225 | /*Initialize itc offset for vcpus*/ | 1223 | /*Initialize itc offset for vcpus*/ |
1226 | itc_offset = 0UL - kvm_get_itc(vcpu); | 1224 | itc_offset = 0UL - kvm_get_itc(vcpu); |
1227 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 1225 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
1228 | v = (struct kvm_vcpu *)((char *)vcpu + | 1226 | v = (struct kvm_vcpu *)((char *)vcpu + |
1229 | sizeof(struct kvm_vcpu_data) * i); | 1227 | sizeof(struct kvm_vcpu_data) * i); |
1230 | v->arch.itc_offset = itc_offset; | 1228 | v->arch.itc_offset = itc_offset; |
1231 | v->arch.last_itc = 0; | 1229 | v->arch.last_itc = 0; |
1232 | } | 1230 | } |
1233 | } else | 1231 | } else |
1234 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; | 1232 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
1235 | 1233 | ||
1236 | r = -ENOMEM; | 1234 | r = -ENOMEM; |
1237 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | 1235 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); |
1238 | if (!vcpu->arch.apic) | 1236 | if (!vcpu->arch.apic) |
1239 | goto out; | 1237 | goto out; |
1240 | vcpu->arch.apic->vcpu = vcpu; | 1238 | vcpu->arch.apic->vcpu = vcpu; |
1241 | 1239 | ||
1242 | p_ctx->gr[1] = 0; | 1240 | p_ctx->gr[1] = 0; |
1243 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); | 1241 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); |
1244 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | 1242 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; |
1245 | p_ctx->psr = 0x1008522000UL; | 1243 | p_ctx->psr = 0x1008522000UL; |
1246 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | 1244 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ |
1247 | p_ctx->caller_unat = 0; | 1245 | p_ctx->caller_unat = 0; |
1248 | p_ctx->pr = 0x0; | 1246 | p_ctx->pr = 0x0; |
1249 | p_ctx->ar[36] = 0x0; /*unat*/ | 1247 | p_ctx->ar[36] = 0x0; /*unat*/ |
1250 | p_ctx->ar[19] = 0x0; /*rnat*/ | 1248 | p_ctx->ar[19] = 0x0; /*rnat*/ |
1251 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + | 1249 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + |
1252 | ((sizeof(struct kvm_vcpu)+15) & ~15); | 1250 | ((sizeof(struct kvm_vcpu)+15) & ~15); |
1253 | p_ctx->ar[64] = 0x0; /*pfs*/ | 1251 | p_ctx->ar[64] = 0x0; /*pfs*/ |
1254 | p_ctx->cr[0] = 0x7e04UL; | 1252 | p_ctx->cr[0] = 0x7e04UL; |
1255 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; | 1253 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; |
1256 | p_ctx->cr[8] = 0x3c; | 1254 | p_ctx->cr[8] = 0x3c; |
1257 | 1255 | ||
1258 | /*Initilize region register*/ | 1256 | /*Initilize region register*/ |
1259 | p_ctx->rr[0] = 0x30; | 1257 | p_ctx->rr[0] = 0x30; |
1260 | p_ctx->rr[1] = 0x30; | 1258 | p_ctx->rr[1] = 0x30; |
1261 | p_ctx->rr[2] = 0x30; | 1259 | p_ctx->rr[2] = 0x30; |
1262 | p_ctx->rr[3] = 0x30; | 1260 | p_ctx->rr[3] = 0x30; |
1263 | p_ctx->rr[4] = 0x30; | 1261 | p_ctx->rr[4] = 0x30; |
1264 | p_ctx->rr[5] = 0x30; | 1262 | p_ctx->rr[5] = 0x30; |
1265 | p_ctx->rr[7] = 0x30; | 1263 | p_ctx->rr[7] = 0x30; |
1266 | 1264 | ||
1267 | /*Initilize branch register 0*/ | 1265 | /*Initilize branch register 0*/ |
1268 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; | 1266 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; |
1269 | 1267 | ||
1270 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; | 1268 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; |
1271 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; | 1269 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; |
1272 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; | 1270 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; |
1273 | 1271 | ||
1274 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 1272 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1275 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | 1273 | vcpu->arch.hlt_timer.function = hlt_timer_fn; |
1276 | 1274 | ||
1277 | vcpu->arch.last_run_cpu = -1; | 1275 | vcpu->arch.last_run_cpu = -1; |
1278 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); | 1276 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); |
1279 | vcpu->arch.vsa_base = kvm_vsa_base; | 1277 | vcpu->arch.vsa_base = kvm_vsa_base; |
1280 | vcpu->arch.__gp = kvm_vmm_gp; | 1278 | vcpu->arch.__gp = kvm_vmm_gp; |
1281 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | 1279 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); |
1282 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); | 1280 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); |
1283 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); | 1281 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); |
1284 | init_ptce_info(vcpu); | 1282 | init_ptce_info(vcpu); |
1285 | 1283 | ||
1286 | r = 0; | 1284 | r = 0; |
1287 | out: | 1285 | out: |
1288 | return r; | 1286 | return r; |
1289 | } | 1287 | } |
1290 | 1288 | ||
1291 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) | 1289 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) |
1292 | { | 1290 | { |
1293 | unsigned long psr; | 1291 | unsigned long psr; |
1294 | int r; | 1292 | int r; |
1295 | 1293 | ||
1296 | local_irq_save(psr); | 1294 | local_irq_save(psr); |
1297 | r = kvm_insert_vmm_mapping(vcpu); | 1295 | r = kvm_insert_vmm_mapping(vcpu); |
1298 | local_irq_restore(psr); | 1296 | local_irq_restore(psr); |
1299 | if (r) | 1297 | if (r) |
1300 | goto fail; | 1298 | goto fail; |
1301 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); | 1299 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); |
1302 | if (r) | 1300 | if (r) |
1303 | goto fail; | 1301 | goto fail; |
1304 | 1302 | ||
1305 | r = vti_init_vpd(vcpu); | 1303 | r = vti_init_vpd(vcpu); |
1306 | if (r) { | 1304 | if (r) { |
1307 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); | 1305 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); |
1308 | goto uninit; | 1306 | goto uninit; |
1309 | } | 1307 | } |
1310 | 1308 | ||
1311 | r = vti_create_vp(vcpu); | 1309 | r = vti_create_vp(vcpu); |
1312 | if (r) | 1310 | if (r) |
1313 | goto uninit; | 1311 | goto uninit; |
1314 | 1312 | ||
1315 | kvm_purge_vmm_mapping(vcpu); | 1313 | kvm_purge_vmm_mapping(vcpu); |
1316 | 1314 | ||
1317 | return 0; | 1315 | return 0; |
1318 | uninit: | 1316 | uninit: |
1319 | kvm_vcpu_uninit(vcpu); | 1317 | kvm_vcpu_uninit(vcpu); |
1320 | fail: | 1318 | fail: |
1321 | return r; | 1319 | return r; |
1322 | } | 1320 | } |
1323 | 1321 | ||
1324 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 1322 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
1325 | unsigned int id) | 1323 | unsigned int id) |
1326 | { | 1324 | { |
1327 | struct kvm_vcpu *vcpu; | 1325 | struct kvm_vcpu *vcpu; |
1328 | unsigned long vm_base = kvm->arch.vm_base; | 1326 | unsigned long vm_base = kvm->arch.vm_base; |
1329 | int r; | 1327 | int r; |
1330 | int cpu; | 1328 | int cpu; |
1331 | 1329 | ||
1332 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); | 1330 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); |
1333 | 1331 | ||
1334 | r = -EINVAL; | 1332 | r = -EINVAL; |
1335 | if (id >= KVM_MAX_VCPUS) { | 1333 | if (id >= KVM_MAX_VCPUS) { |
1336 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | 1334 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", |
1337 | KVM_MAX_VCPUS); | 1335 | KVM_MAX_VCPUS); |
1338 | goto fail; | 1336 | goto fail; |
1339 | } | 1337 | } |
1340 | 1338 | ||
1341 | r = -ENOMEM; | 1339 | r = -ENOMEM; |
1342 | if (!vm_base) { | 1340 | if (!vm_base) { |
1343 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | 1341 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); |
1344 | goto fail; | 1342 | goto fail; |
1345 | } | 1343 | } |
1346 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, | 1344 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, |
1347 | vcpu_data[id].vcpu_struct)); | 1345 | vcpu_data[id].vcpu_struct)); |
1348 | vcpu->kvm = kvm; | 1346 | vcpu->kvm = kvm; |
1349 | 1347 | ||
1350 | cpu = get_cpu(); | 1348 | cpu = get_cpu(); |
1351 | r = vti_vcpu_setup(vcpu, id); | 1349 | r = vti_vcpu_setup(vcpu, id); |
1352 | put_cpu(); | 1350 | put_cpu(); |
1353 | 1351 | ||
1354 | if (r) { | 1352 | if (r) { |
1355 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); | 1353 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); |
1356 | goto fail; | 1354 | goto fail; |
1357 | } | 1355 | } |
1358 | |||
1359 | kvm->arch.online_vcpus++; | ||
1360 | 1356 | ||
1361 | return vcpu; | 1357 | return vcpu; |
1362 | fail: | 1358 | fail: |
1363 | return ERR_PTR(r); | 1359 | return ERR_PTR(r); |
1364 | } | 1360 | } |
1365 | 1361 | ||
1366 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 1362 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
1367 | { | 1363 | { |
1368 | return 0; | 1364 | return 0; |
1369 | } | 1365 | } |
1370 | 1366 | ||
1371 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 1367 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1372 | { | 1368 | { |
1373 | return -EINVAL; | 1369 | return -EINVAL; |
1374 | } | 1370 | } |
1375 | 1371 | ||
1376 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 1372 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1377 | { | 1373 | { |
1378 | return -EINVAL; | 1374 | return -EINVAL; |
1379 | } | 1375 | } |
1380 | 1376 | ||
1381 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 1377 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1382 | struct kvm_guest_debug *dbg) | 1378 | struct kvm_guest_debug *dbg) |
1383 | { | 1379 | { |
1384 | return -EINVAL; | 1380 | return -EINVAL; |
1385 | } | 1381 | } |
1386 | 1382 | ||
1387 | static void free_kvm(struct kvm *kvm) | 1383 | static void free_kvm(struct kvm *kvm) |
1388 | { | 1384 | { |
1389 | unsigned long vm_base = kvm->arch.vm_base; | 1385 | unsigned long vm_base = kvm->arch.vm_base; |
1390 | 1386 | ||
1391 | if (vm_base) { | 1387 | if (vm_base) { |
1392 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | 1388 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
1393 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); | 1389 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); |
1394 | } | 1390 | } |
1395 | 1391 | ||
1396 | } | 1392 | } |
1397 | 1393 | ||
1398 | static void kvm_release_vm_pages(struct kvm *kvm) | 1394 | static void kvm_release_vm_pages(struct kvm *kvm) |
1399 | { | 1395 | { |
1400 | struct kvm_memory_slot *memslot; | 1396 | struct kvm_memory_slot *memslot; |
1401 | int i, j; | 1397 | int i, j; |
1402 | unsigned long base_gfn; | 1398 | unsigned long base_gfn; |
1403 | 1399 | ||
1404 | for (i = 0; i < kvm->nmemslots; i++) { | 1400 | for (i = 0; i < kvm->nmemslots; i++) { |
1405 | memslot = &kvm->memslots[i]; | 1401 | memslot = &kvm->memslots[i]; |
1406 | base_gfn = memslot->base_gfn; | 1402 | base_gfn = memslot->base_gfn; |
1407 | 1403 | ||
1408 | for (j = 0; j < memslot->npages; j++) { | 1404 | for (j = 0; j < memslot->npages; j++) { |
1409 | if (memslot->rmap[j]) | 1405 | if (memslot->rmap[j]) |
1410 | put_page((struct page *)memslot->rmap[j]); | 1406 | put_page((struct page *)memslot->rmap[j]); |
1411 | } | 1407 | } |
1412 | } | 1408 | } |
1413 | } | 1409 | } |
1414 | 1410 | ||
1415 | void kvm_arch_sync_events(struct kvm *kvm) | 1411 | void kvm_arch_sync_events(struct kvm *kvm) |
1416 | { | 1412 | { |
1417 | } | 1413 | } |
1418 | 1414 | ||
1419 | void kvm_arch_destroy_vm(struct kvm *kvm) | 1415 | void kvm_arch_destroy_vm(struct kvm *kvm) |
1420 | { | 1416 | { |
1421 | kvm_iommu_unmap_guest(kvm); | 1417 | kvm_iommu_unmap_guest(kvm); |
1422 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | 1418 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT |
1423 | kvm_free_all_assigned_devices(kvm); | 1419 | kvm_free_all_assigned_devices(kvm); |
1424 | #endif | 1420 | #endif |
1425 | kfree(kvm->arch.vioapic); | 1421 | kfree(kvm->arch.vioapic); |
1426 | kvm_release_vm_pages(kvm); | 1422 | kvm_release_vm_pages(kvm); |
1427 | kvm_free_physmem(kvm); | 1423 | kvm_free_physmem(kvm); |
1428 | free_kvm(kvm); | 1424 | free_kvm(kvm); |
1429 | } | 1425 | } |
1430 | 1426 | ||
1431 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 1427 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
1432 | { | 1428 | { |
1433 | } | 1429 | } |
1434 | 1430 | ||
1435 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1431 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1436 | { | 1432 | { |
1437 | if (cpu != vcpu->cpu) { | 1433 | if (cpu != vcpu->cpu) { |
1438 | vcpu->cpu = cpu; | 1434 | vcpu->cpu = cpu; |
1439 | if (vcpu->arch.ht_active) | 1435 | if (vcpu->arch.ht_active) |
1440 | kvm_migrate_hlt_timer(vcpu); | 1436 | kvm_migrate_hlt_timer(vcpu); |
1441 | } | 1437 | } |
1442 | } | 1438 | } |
1443 | 1439 | ||
1444 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x | 1440 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x |
1445 | 1441 | ||
1446 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 1442 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1447 | { | 1443 | { |
1448 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 1444 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
1449 | int i; | 1445 | int i; |
1450 | 1446 | ||
1451 | vcpu_load(vcpu); | 1447 | vcpu_load(vcpu); |
1452 | 1448 | ||
1453 | for (i = 0; i < 16; i++) { | 1449 | for (i = 0; i < 16; i++) { |
1454 | regs->vpd.vgr[i] = vpd->vgr[i]; | 1450 | regs->vpd.vgr[i] = vpd->vgr[i]; |
1455 | regs->vpd.vbgr[i] = vpd->vbgr[i]; | 1451 | regs->vpd.vbgr[i] = vpd->vbgr[i]; |
1456 | } | 1452 | } |
1457 | for (i = 0; i < 128; i++) | 1453 | for (i = 0; i < 128; i++) |
1458 | regs->vpd.vcr[i] = vpd->vcr[i]; | 1454 | regs->vpd.vcr[i] = vpd->vcr[i]; |
1459 | regs->vpd.vhpi = vpd->vhpi; | 1455 | regs->vpd.vhpi = vpd->vhpi; |
1460 | regs->vpd.vnat = vpd->vnat; | 1456 | regs->vpd.vnat = vpd->vnat; |
1461 | regs->vpd.vbnat = vpd->vbnat; | 1457 | regs->vpd.vbnat = vpd->vbnat; |
1462 | regs->vpd.vpsr = vpd->vpsr; | 1458 | regs->vpd.vpsr = vpd->vpsr; |
1463 | regs->vpd.vpr = vpd->vpr; | 1459 | regs->vpd.vpr = vpd->vpr; |
1464 | 1460 | ||
1465 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); | 1461 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); |
1466 | 1462 | ||
1467 | SAVE_REGS(mp_state); | 1463 | SAVE_REGS(mp_state); |
1468 | SAVE_REGS(vmm_rr); | 1464 | SAVE_REGS(vmm_rr); |
1469 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | 1465 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); |
1470 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); | 1466 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); |
1471 | SAVE_REGS(itr_regions); | 1467 | SAVE_REGS(itr_regions); |
1472 | SAVE_REGS(dtr_regions); | 1468 | SAVE_REGS(dtr_regions); |
1473 | SAVE_REGS(tc_regions); | 1469 | SAVE_REGS(tc_regions); |
1474 | SAVE_REGS(irq_check); | 1470 | SAVE_REGS(irq_check); |
1475 | SAVE_REGS(itc_check); | 1471 | SAVE_REGS(itc_check); |
1476 | SAVE_REGS(timer_check); | 1472 | SAVE_REGS(timer_check); |
1477 | SAVE_REGS(timer_pending); | 1473 | SAVE_REGS(timer_pending); |
1478 | SAVE_REGS(last_itc); | 1474 | SAVE_REGS(last_itc); |
1479 | for (i = 0; i < 8; i++) { | 1475 | for (i = 0; i < 8; i++) { |
1480 | regs->vrr[i] = vcpu->arch.vrr[i]; | 1476 | regs->vrr[i] = vcpu->arch.vrr[i]; |
1481 | regs->ibr[i] = vcpu->arch.ibr[i]; | 1477 | regs->ibr[i] = vcpu->arch.ibr[i]; |
1482 | regs->dbr[i] = vcpu->arch.dbr[i]; | 1478 | regs->dbr[i] = vcpu->arch.dbr[i]; |
1483 | } | 1479 | } |
1484 | for (i = 0; i < 4; i++) | 1480 | for (i = 0; i < 4; i++) |
1485 | regs->insvc[i] = vcpu->arch.insvc[i]; | 1481 | regs->insvc[i] = vcpu->arch.insvc[i]; |
1486 | regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); | 1482 | regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); |
1487 | SAVE_REGS(xtp); | 1483 | SAVE_REGS(xtp); |
1488 | SAVE_REGS(metaphysical_rr0); | 1484 | SAVE_REGS(metaphysical_rr0); |
1489 | SAVE_REGS(metaphysical_rr4); | 1485 | SAVE_REGS(metaphysical_rr4); |
1490 | SAVE_REGS(metaphysical_saved_rr0); | 1486 | SAVE_REGS(metaphysical_saved_rr0); |
1491 | SAVE_REGS(metaphysical_saved_rr4); | 1487 | SAVE_REGS(metaphysical_saved_rr4); |
1492 | SAVE_REGS(fp_psr); | 1488 | SAVE_REGS(fp_psr); |
1493 | SAVE_REGS(saved_gp); | 1489 | SAVE_REGS(saved_gp); |
1494 | 1490 | ||
1495 | vcpu_put(vcpu); | 1491 | vcpu_put(vcpu); |
1496 | return 0; | 1492 | return 0; |
1497 | } | 1493 | } |
1498 | 1494 | ||
1499 | int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, | 1495 | int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, |
1500 | struct kvm_ia64_vcpu_stack *stack) | 1496 | struct kvm_ia64_vcpu_stack *stack) |
1501 | { | 1497 | { |
1502 | memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); | 1498 | memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); |
1503 | return 0; | 1499 | return 0; |
1504 | } | 1500 | } |
1505 | 1501 | ||
1506 | int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, | 1502 | int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, |
1507 | struct kvm_ia64_vcpu_stack *stack) | 1503 | struct kvm_ia64_vcpu_stack *stack) |
1508 | { | 1504 | { |
1509 | memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), | 1505 | memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), |
1510 | sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); | 1506 | sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); |
1511 | 1507 | ||
1512 | vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; | 1508 | vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; |
1513 | return 0; | 1509 | return 0; |
1514 | } | 1510 | } |
1515 | 1511 | ||
1516 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1512 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
1517 | { | 1513 | { |
1518 | 1514 | ||
1519 | hrtimer_cancel(&vcpu->arch.hlt_timer); | 1515 | hrtimer_cancel(&vcpu->arch.hlt_timer); |
1520 | kfree(vcpu->arch.apic); | 1516 | kfree(vcpu->arch.apic); |
1521 | } | 1517 | } |
1522 | 1518 | ||
1523 | 1519 | ||
1524 | long kvm_arch_vcpu_ioctl(struct file *filp, | 1520 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1525 | unsigned int ioctl, unsigned long arg) | 1521 | unsigned int ioctl, unsigned long arg) |
1526 | { | 1522 | { |
1527 | struct kvm_vcpu *vcpu = filp->private_data; | 1523 | struct kvm_vcpu *vcpu = filp->private_data; |
1528 | void __user *argp = (void __user *)arg; | 1524 | void __user *argp = (void __user *)arg; |
1529 | struct kvm_ia64_vcpu_stack *stack = NULL; | 1525 | struct kvm_ia64_vcpu_stack *stack = NULL; |
1530 | long r; | 1526 | long r; |
1531 | 1527 | ||
1532 | switch (ioctl) { | 1528 | switch (ioctl) { |
1533 | case KVM_IA64_VCPU_GET_STACK: { | 1529 | case KVM_IA64_VCPU_GET_STACK: { |
1534 | struct kvm_ia64_vcpu_stack __user *user_stack; | 1530 | struct kvm_ia64_vcpu_stack __user *user_stack; |
1535 | void __user *first_p = argp; | 1531 | void __user *first_p = argp; |
1536 | 1532 | ||
1537 | r = -EFAULT; | 1533 | r = -EFAULT; |
1538 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | 1534 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) |
1539 | goto out; | 1535 | goto out; |
1540 | 1536 | ||
1541 | if (!access_ok(VERIFY_WRITE, user_stack, | 1537 | if (!access_ok(VERIFY_WRITE, user_stack, |
1542 | sizeof(struct kvm_ia64_vcpu_stack))) { | 1538 | sizeof(struct kvm_ia64_vcpu_stack))) { |
1543 | printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " | 1539 | printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " |
1544 | "Illegal user destination address for stack\n"); | 1540 | "Illegal user destination address for stack\n"); |
1545 | goto out; | 1541 | goto out; |
1546 | } | 1542 | } |
1547 | stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | 1543 | stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); |
1548 | if (!stack) { | 1544 | if (!stack) { |
1549 | r = -ENOMEM; | 1545 | r = -ENOMEM; |
1550 | goto out; | 1546 | goto out; |
1551 | } | 1547 | } |
1552 | 1548 | ||
1553 | r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); | 1549 | r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); |
1554 | if (r) | 1550 | if (r) |
1555 | goto out; | 1551 | goto out; |
1556 | 1552 | ||
1557 | if (copy_to_user(user_stack, stack, | 1553 | if (copy_to_user(user_stack, stack, |
1558 | sizeof(struct kvm_ia64_vcpu_stack))) | 1554 | sizeof(struct kvm_ia64_vcpu_stack))) |
1559 | goto out; | 1555 | goto out; |
1560 | 1556 | ||
1561 | break; | 1557 | break; |
1562 | } | 1558 | } |
1563 | case KVM_IA64_VCPU_SET_STACK: { | 1559 | case KVM_IA64_VCPU_SET_STACK: { |
1564 | struct kvm_ia64_vcpu_stack __user *user_stack; | 1560 | struct kvm_ia64_vcpu_stack __user *user_stack; |
1565 | void __user *first_p = argp; | 1561 | void __user *first_p = argp; |
1566 | 1562 | ||
1567 | r = -EFAULT; | 1563 | r = -EFAULT; |
1568 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | 1564 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) |
1569 | goto out; | 1565 | goto out; |
1570 | 1566 | ||
1571 | if (!access_ok(VERIFY_READ, user_stack, | 1567 | if (!access_ok(VERIFY_READ, user_stack, |
1572 | sizeof(struct kvm_ia64_vcpu_stack))) { | 1568 | sizeof(struct kvm_ia64_vcpu_stack))) { |
1573 | printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " | 1569 | printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " |
1574 | "Illegal user address for stack\n"); | 1570 | "Illegal user address for stack\n"); |
1575 | goto out; | 1571 | goto out; |
1576 | } | 1572 | } |
1577 | stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | 1573 | stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); |
1578 | if (!stack) { | 1574 | if (!stack) { |
1579 | r = -ENOMEM; | 1575 | r = -ENOMEM; |
1580 | goto out; | 1576 | goto out; |
1581 | } | 1577 | } |
1582 | if (copy_from_user(stack, user_stack, | 1578 | if (copy_from_user(stack, user_stack, |
1583 | sizeof(struct kvm_ia64_vcpu_stack))) | 1579 | sizeof(struct kvm_ia64_vcpu_stack))) |
1584 | goto out; | 1580 | goto out; |
1585 | 1581 | ||
1586 | r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); | 1582 | r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); |
1587 | break; | 1583 | break; |
1588 | } | 1584 | } |
1589 | 1585 | ||
1590 | default: | 1586 | default: |
1591 | r = -EINVAL; | 1587 | r = -EINVAL; |
1592 | } | 1588 | } |
1593 | 1589 | ||
1594 | out: | 1590 | out: |
1595 | kfree(stack); | 1591 | kfree(stack); |
1596 | return r; | 1592 | return r; |
1597 | } | 1593 | } |
1598 | 1594 | ||
1599 | int kvm_arch_set_memory_region(struct kvm *kvm, | 1595 | int kvm_arch_set_memory_region(struct kvm *kvm, |
1600 | struct kvm_userspace_memory_region *mem, | 1596 | struct kvm_userspace_memory_region *mem, |
1601 | struct kvm_memory_slot old, | 1597 | struct kvm_memory_slot old, |
1602 | int user_alloc) | 1598 | int user_alloc) |
1603 | { | 1599 | { |
1604 | unsigned long i; | 1600 | unsigned long i; |
1605 | unsigned long pfn; | 1601 | unsigned long pfn; |
1606 | int npages = mem->memory_size >> PAGE_SHIFT; | 1602 | int npages = mem->memory_size >> PAGE_SHIFT; |
1607 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | 1603 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; |
1608 | unsigned long base_gfn = memslot->base_gfn; | 1604 | unsigned long base_gfn = memslot->base_gfn; |
1609 | 1605 | ||
1610 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) | 1606 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) |
1611 | return -ENOMEM; | 1607 | return -ENOMEM; |
1612 | 1608 | ||
1613 | for (i = 0; i < npages; i++) { | 1609 | for (i = 0; i < npages; i++) { |
1614 | pfn = gfn_to_pfn(kvm, base_gfn + i); | 1610 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1615 | if (!kvm_is_mmio_pfn(pfn)) { | 1611 | if (!kvm_is_mmio_pfn(pfn)) { |
1616 | kvm_set_pmt_entry(kvm, base_gfn + i, | 1612 | kvm_set_pmt_entry(kvm, base_gfn + i, |
1617 | pfn << PAGE_SHIFT, | 1613 | pfn << PAGE_SHIFT, |
1618 | _PAGE_AR_RWX | _PAGE_MA_WB); | 1614 | _PAGE_AR_RWX | _PAGE_MA_WB); |
1619 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); | 1615 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); |
1620 | } else { | 1616 | } else { |
1621 | kvm_set_pmt_entry(kvm, base_gfn + i, | 1617 | kvm_set_pmt_entry(kvm, base_gfn + i, |
1622 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), | 1618 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), |
1623 | _PAGE_MA_UC); | 1619 | _PAGE_MA_UC); |
1624 | memslot->rmap[i] = 0; | 1620 | memslot->rmap[i] = 0; |
1625 | } | 1621 | } |
1626 | } | 1622 | } |
1627 | 1623 | ||
1628 | return 0; | 1624 | return 0; |
1629 | } | 1625 | } |
1630 | 1626 | ||
1631 | void kvm_arch_flush_shadow(struct kvm *kvm) | 1627 | void kvm_arch_flush_shadow(struct kvm *kvm) |
1632 | { | 1628 | { |
1633 | kvm_flush_remote_tlbs(kvm); | 1629 | kvm_flush_remote_tlbs(kvm); |
1634 | } | 1630 | } |
1635 | 1631 | ||
1636 | long kvm_arch_dev_ioctl(struct file *filp, | 1632 | long kvm_arch_dev_ioctl(struct file *filp, |
1637 | unsigned int ioctl, unsigned long arg) | 1633 | unsigned int ioctl, unsigned long arg) |
1638 | { | 1634 | { |
1639 | return -EINVAL; | 1635 | return -EINVAL; |
1640 | } | 1636 | } |
1641 | 1637 | ||
1642 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 1638 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
1643 | { | 1639 | { |
1644 | kvm_vcpu_uninit(vcpu); | 1640 | kvm_vcpu_uninit(vcpu); |
1645 | } | 1641 | } |
1646 | 1642 | ||
1647 | static int vti_cpu_has_kvm_support(void) | 1643 | static int vti_cpu_has_kvm_support(void) |
1648 | { | 1644 | { |
1649 | long avail = 1, status = 1, control = 1; | 1645 | long avail = 1, status = 1, control = 1; |
1650 | long ret; | 1646 | long ret; |
1651 | 1647 | ||
1652 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); | 1648 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); |
1653 | if (ret) | 1649 | if (ret) |
1654 | goto out; | 1650 | goto out; |
1655 | 1651 | ||
1656 | if (!(avail & PAL_PROC_VM_BIT)) | 1652 | if (!(avail & PAL_PROC_VM_BIT)) |
1657 | goto out; | 1653 | goto out; |
1658 | 1654 | ||
1659 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); | 1655 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); |
1660 | 1656 | ||
1661 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); | 1657 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); |
1662 | if (ret) | 1658 | if (ret) |
1663 | goto out; | 1659 | goto out; |
1664 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); | 1660 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); |
1665 | 1661 | ||
1666 | if (!(vp_env_info & VP_OPCODE)) { | 1662 | if (!(vp_env_info & VP_OPCODE)) { |
1667 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " | 1663 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " |
1668 | "vm_env_info:0x%lx\n", vp_env_info); | 1664 | "vm_env_info:0x%lx\n", vp_env_info); |
1669 | } | 1665 | } |
1670 | 1666 | ||
1671 | return 1; | 1667 | return 1; |
1672 | out: | 1668 | out: |
1673 | return 0; | 1669 | return 0; |
1674 | } | 1670 | } |
1675 | 1671 | ||
1676 | 1672 | ||
1677 | /* | 1673 | /* |
1678 | * On SN2, the ITC isn't stable, so copy in fast path code to use the | 1674 | * On SN2, the ITC isn't stable, so copy in fast path code to use the |
1679 | * SN2 RTC, replacing the ITC based default verion. | 1675 | * SN2 RTC, replacing the ITC based default verion. |
1680 | */ | 1676 | */ |
1681 | static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, | 1677 | static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, |
1682 | struct module *module) | 1678 | struct module *module) |
1683 | { | 1679 | { |
1684 | unsigned long new_ar, new_ar_sn2; | 1680 | unsigned long new_ar, new_ar_sn2; |
1685 | unsigned long module_base; | 1681 | unsigned long module_base; |
1686 | 1682 | ||
1687 | if (!ia64_platform_is("sn2")) | 1683 | if (!ia64_platform_is("sn2")) |
1688 | return; | 1684 | return; |
1689 | 1685 | ||
1690 | module_base = (unsigned long)module->module_core; | 1686 | module_base = (unsigned long)module->module_core; |
1691 | 1687 | ||
1692 | new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; | 1688 | new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; |
1693 | new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; | 1689 | new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; |
1694 | 1690 | ||
1695 | printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " | 1691 | printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " |
1696 | "as source\n"); | 1692 | "as source\n"); |
1697 | 1693 | ||
1698 | /* | 1694 | /* |
1699 | * Copy the SN2 version of mov_ar into place. They are both | 1695 | * Copy the SN2 version of mov_ar into place. They are both |
1700 | * the same size, so 6 bundles is sufficient (6 * 0x10). | 1696 | * the same size, so 6 bundles is sufficient (6 * 0x10). |
1701 | */ | 1697 | */ |
1702 | memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); | 1698 | memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); |
1703 | } | 1699 | } |
1704 | 1700 | ||
1705 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, | 1701 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, |
1706 | struct module *module) | 1702 | struct module *module) |
1707 | { | 1703 | { |
1708 | unsigned long module_base; | 1704 | unsigned long module_base; |
1709 | unsigned long vmm_size; | 1705 | unsigned long vmm_size; |
1710 | 1706 | ||
1711 | unsigned long vmm_offset, func_offset, fdesc_offset; | 1707 | unsigned long vmm_offset, func_offset, fdesc_offset; |
1712 | struct fdesc *p_fdesc; | 1708 | struct fdesc *p_fdesc; |
1713 | 1709 | ||
1714 | BUG_ON(!module); | 1710 | BUG_ON(!module); |
1715 | 1711 | ||
1716 | if (!kvm_vmm_base) { | 1712 | if (!kvm_vmm_base) { |
1717 | printk("kvm: kvm area hasn't been initilized yet!!\n"); | 1713 | printk("kvm: kvm area hasn't been initilized yet!!\n"); |
1718 | return -EFAULT; | 1714 | return -EFAULT; |
1719 | } | 1715 | } |
1720 | 1716 | ||
1721 | /*Calculate new position of relocated vmm module.*/ | 1717 | /*Calculate new position of relocated vmm module.*/ |
1722 | module_base = (unsigned long)module->module_core; | 1718 | module_base = (unsigned long)module->module_core; |
1723 | vmm_size = module->core_size; | 1719 | vmm_size = module->core_size; |
1724 | if (unlikely(vmm_size > KVM_VMM_SIZE)) | 1720 | if (unlikely(vmm_size > KVM_VMM_SIZE)) |
1725 | return -EFAULT; | 1721 | return -EFAULT; |
1726 | 1722 | ||
1727 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); | 1723 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); |
1728 | kvm_patch_vmm(vmm_info, module); | 1724 | kvm_patch_vmm(vmm_info, module); |
1729 | kvm_flush_icache(kvm_vmm_base, vmm_size); | 1725 | kvm_flush_icache(kvm_vmm_base, vmm_size); |
1730 | 1726 | ||
1731 | /*Recalculate kvm_vmm_info based on new VMM*/ | 1727 | /*Recalculate kvm_vmm_info based on new VMM*/ |
1732 | vmm_offset = vmm_info->vmm_ivt - module_base; | 1728 | vmm_offset = vmm_info->vmm_ivt - module_base; |
1733 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; | 1729 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; |
1734 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", | 1730 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", |
1735 | kvm_vmm_info->vmm_ivt); | 1731 | kvm_vmm_info->vmm_ivt); |
1736 | 1732 | ||
1737 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; | 1733 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; |
1738 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + | 1734 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + |
1739 | fdesc_offset); | 1735 | fdesc_offset); |
1740 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; | 1736 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; |
1741 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | 1737 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); |
1742 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | 1738 | p_fdesc->ip = KVM_VMM_BASE + func_offset; |
1743 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); | 1739 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); |
1744 | 1740 | ||
1745 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", | 1741 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", |
1746 | KVM_VMM_BASE+func_offset); | 1742 | KVM_VMM_BASE+func_offset); |
1747 | 1743 | ||
1748 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; | 1744 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; |
1749 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + | 1745 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + |
1750 | fdesc_offset); | 1746 | fdesc_offset); |
1751 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; | 1747 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; |
1752 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | 1748 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); |
1753 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | 1749 | p_fdesc->ip = KVM_VMM_BASE + func_offset; |
1754 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); | 1750 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); |
1755 | 1751 | ||
1756 | kvm_vmm_gp = p_fdesc->gp; | 1752 | kvm_vmm_gp = p_fdesc->gp; |
1757 | 1753 | ||
1758 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", | 1754 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", |
1759 | kvm_vmm_info->vmm_entry); | 1755 | kvm_vmm_info->vmm_entry); |
1760 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", | 1756 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", |
1761 | KVM_VMM_BASE + func_offset); | 1757 | KVM_VMM_BASE + func_offset); |
1762 | 1758 | ||
1763 | return 0; | 1759 | return 0; |
1764 | } | 1760 | } |
1765 | 1761 | ||
1766 | int kvm_arch_init(void *opaque) | 1762 | int kvm_arch_init(void *opaque) |
1767 | { | 1763 | { |
1768 | int r; | 1764 | int r; |
1769 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; | 1765 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; |
1770 | 1766 | ||
1771 | if (!vti_cpu_has_kvm_support()) { | 1767 | if (!vti_cpu_has_kvm_support()) { |
1772 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); | 1768 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); |
1773 | r = -EOPNOTSUPP; | 1769 | r = -EOPNOTSUPP; |
1774 | goto out; | 1770 | goto out; |
1775 | } | 1771 | } |
1776 | 1772 | ||
1777 | if (kvm_vmm_info) { | 1773 | if (kvm_vmm_info) { |
1778 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); | 1774 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); |
1779 | r = -EEXIST; | 1775 | r = -EEXIST; |
1780 | goto out; | 1776 | goto out; |
1781 | } | 1777 | } |
1782 | 1778 | ||
1783 | r = -ENOMEM; | 1779 | r = -ENOMEM; |
1784 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); | 1780 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); |
1785 | if (!kvm_vmm_info) | 1781 | if (!kvm_vmm_info) |
1786 | goto out; | 1782 | goto out; |
1787 | 1783 | ||
1788 | if (kvm_alloc_vmm_area()) | 1784 | if (kvm_alloc_vmm_area()) |
1789 | goto out_free0; | 1785 | goto out_free0; |
1790 | 1786 | ||
1791 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); | 1787 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); |
1792 | if (r) | 1788 | if (r) |
1793 | goto out_free1; | 1789 | goto out_free1; |
1794 | 1790 | ||
1795 | return 0; | 1791 | return 0; |
1796 | 1792 | ||
1797 | out_free1: | 1793 | out_free1: |
1798 | kvm_free_vmm_area(); | 1794 | kvm_free_vmm_area(); |
1799 | out_free0: | 1795 | out_free0: |
1800 | kfree(kvm_vmm_info); | 1796 | kfree(kvm_vmm_info); |
1801 | out: | 1797 | out: |
1802 | return r; | 1798 | return r; |
1803 | } | 1799 | } |
1804 | 1800 | ||
1805 | void kvm_arch_exit(void) | 1801 | void kvm_arch_exit(void) |
1806 | { | 1802 | { |
1807 | kvm_free_vmm_area(); | 1803 | kvm_free_vmm_area(); |
1808 | kfree(kvm_vmm_info); | 1804 | kfree(kvm_vmm_info); |
1809 | kvm_vmm_info = NULL; | 1805 | kvm_vmm_info = NULL; |
1810 | } | 1806 | } |
1811 | 1807 | ||
1812 | static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | 1808 | static int kvm_ia64_sync_dirty_log(struct kvm *kvm, |
1813 | struct kvm_dirty_log *log) | 1809 | struct kvm_dirty_log *log) |
1814 | { | 1810 | { |
1815 | struct kvm_memory_slot *memslot; | 1811 | struct kvm_memory_slot *memslot; |
1816 | int r, i; | 1812 | int r, i; |
1817 | long n, base; | 1813 | long n, base; |
1818 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | 1814 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1819 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | 1815 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1820 | 1816 | ||
1821 | r = -EINVAL; | 1817 | r = -EINVAL; |
1822 | if (log->slot >= KVM_MEMORY_SLOTS) | 1818 | if (log->slot >= KVM_MEMORY_SLOTS) |
1823 | goto out; | 1819 | goto out; |
1824 | 1820 | ||
1825 | memslot = &kvm->memslots[log->slot]; | 1821 | memslot = &kvm->memslots[log->slot]; |
1826 | r = -ENOENT; | 1822 | r = -ENOENT; |
1827 | if (!memslot->dirty_bitmap) | 1823 | if (!memslot->dirty_bitmap) |
1828 | goto out; | 1824 | goto out; |
1829 | 1825 | ||
1830 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1826 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
1831 | base = memslot->base_gfn / BITS_PER_LONG; | 1827 | base = memslot->base_gfn / BITS_PER_LONG; |
1832 | 1828 | ||
1833 | for (i = 0; i < n/sizeof(long); ++i) { | 1829 | for (i = 0; i < n/sizeof(long); ++i) { |
1834 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; | 1830 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; |
1835 | dirty_bitmap[base + i] = 0; | 1831 | dirty_bitmap[base + i] = 0; |
1836 | } | 1832 | } |
1837 | r = 0; | 1833 | r = 0; |
1838 | out: | 1834 | out: |
1839 | return r; | 1835 | return r; |
1840 | } | 1836 | } |
1841 | 1837 | ||
1842 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 1838 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
1843 | struct kvm_dirty_log *log) | 1839 | struct kvm_dirty_log *log) |
1844 | { | 1840 | { |
1845 | int r; | 1841 | int r; |
1846 | int n; | 1842 | int n; |
1847 | struct kvm_memory_slot *memslot; | 1843 | struct kvm_memory_slot *memslot; |
1848 | int is_dirty = 0; | 1844 | int is_dirty = 0; |
1849 | 1845 | ||
1850 | spin_lock(&kvm->arch.dirty_log_lock); | 1846 | spin_lock(&kvm->arch.dirty_log_lock); |
1851 | 1847 | ||
1852 | r = kvm_ia64_sync_dirty_log(kvm, log); | 1848 | r = kvm_ia64_sync_dirty_log(kvm, log); |
1853 | if (r) | 1849 | if (r) |
1854 | goto out; | 1850 | goto out; |
1855 | 1851 | ||
1856 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 1852 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
1857 | if (r) | 1853 | if (r) |
1858 | goto out; | 1854 | goto out; |
1859 | 1855 | ||
1860 | /* If nothing is dirty, don't bother messing with page tables. */ | 1856 | /* If nothing is dirty, don't bother messing with page tables. */ |
1861 | if (is_dirty) { | 1857 | if (is_dirty) { |
1862 | kvm_flush_remote_tlbs(kvm); | 1858 | kvm_flush_remote_tlbs(kvm); |
1863 | memslot = &kvm->memslots[log->slot]; | 1859 | memslot = &kvm->memslots[log->slot]; |
1864 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1860 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
1865 | memset(memslot->dirty_bitmap, 0, n); | 1861 | memset(memslot->dirty_bitmap, 0, n); |
1866 | } | 1862 | } |
1867 | r = 0; | 1863 | r = 0; |
1868 | out: | 1864 | out: |
1869 | spin_unlock(&kvm->arch.dirty_log_lock); | 1865 | spin_unlock(&kvm->arch.dirty_log_lock); |
1870 | return r; | 1866 | return r; |
1871 | } | 1867 | } |
1872 | 1868 | ||
1873 | int kvm_arch_hardware_setup(void) | 1869 | int kvm_arch_hardware_setup(void) |
1874 | { | 1870 | { |
1875 | return 0; | 1871 | return 0; |
1876 | } | 1872 | } |
1877 | 1873 | ||
1878 | void kvm_arch_hardware_unsetup(void) | 1874 | void kvm_arch_hardware_unsetup(void) |
1879 | { | 1875 | { |
1880 | } | 1876 | } |
1881 | 1877 | ||
1882 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | 1878 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) |
1883 | { | 1879 | { |
1884 | int me; | 1880 | int me; |
1885 | int cpu = vcpu->cpu; | 1881 | int cpu = vcpu->cpu; |
1886 | 1882 | ||
1887 | if (waitqueue_active(&vcpu->wq)) | 1883 | if (waitqueue_active(&vcpu->wq)) |
1888 | wake_up_interruptible(&vcpu->wq); | 1884 | wake_up_interruptible(&vcpu->wq); |
1889 | 1885 | ||
1890 | me = get_cpu(); | 1886 | me = get_cpu(); |
1891 | if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu)) | 1887 | if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu)) |
1892 | if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)) | 1888 | if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)) |
1893 | smp_send_reschedule(cpu); | 1889 | smp_send_reschedule(cpu); |
1894 | put_cpu(); | 1890 | put_cpu(); |
1895 | } | 1891 | } |
1896 | 1892 | ||
1897 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) | 1893 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) |
1898 | { | 1894 | { |
1899 | return __apic_accept_irq(vcpu, irq->vector); | 1895 | return __apic_accept_irq(vcpu, irq->vector); |
1900 | } | 1896 | } |
1901 | 1897 | ||
1902 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | 1898 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) |
1903 | { | 1899 | { |
1904 | return apic->vcpu->vcpu_id == dest; | 1900 | return apic->vcpu->vcpu_id == dest; |
1905 | } | 1901 | } |
1906 | 1902 | ||
1907 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | 1903 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) |
1908 | { | 1904 | { |
1909 | return 0; | 1905 | return 0; |
1910 | } | 1906 | } |
1911 | 1907 | ||
1912 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) | 1908 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) |
1913 | { | 1909 | { |
1914 | return vcpu1->arch.xtp - vcpu2->arch.xtp; | 1910 | return vcpu1->arch.xtp - vcpu2->arch.xtp; |
1915 | } | 1911 | } |
1916 | 1912 | ||
1917 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 1913 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
1918 | int short_hand, int dest, int dest_mode) | 1914 | int short_hand, int dest, int dest_mode) |
1919 | { | 1915 | { |
1920 | struct kvm_lapic *target = vcpu->arch.apic; | 1916 | struct kvm_lapic *target = vcpu->arch.apic; |
1921 | return (dest_mode == 0) ? | 1917 | return (dest_mode == 0) ? |
1922 | kvm_apic_match_physical_addr(target, dest) : | 1918 | kvm_apic_match_physical_addr(target, dest) : |
1923 | kvm_apic_match_logical_addr(target, dest); | 1919 | kvm_apic_match_logical_addr(target, dest); |
1924 | } | 1920 | } |
1925 | 1921 | ||
1926 | static int find_highest_bits(int *dat) | 1922 | static int find_highest_bits(int *dat) |
1927 | { | 1923 | { |
1928 | u32 bits, bitnum; | 1924 | u32 bits, bitnum; |
1929 | int i; | 1925 | int i; |
1930 | 1926 | ||
1931 | /* loop for all 256 bits */ | 1927 | /* loop for all 256 bits */ |
1932 | for (i = 7; i >= 0 ; i--) { | 1928 | for (i = 7; i >= 0 ; i--) { |
1933 | bits = dat[i]; | 1929 | bits = dat[i]; |
1934 | if (bits) { | 1930 | if (bits) { |
1935 | bitnum = fls(bits); | 1931 | bitnum = fls(bits); |
1936 | return i * 32 + bitnum - 1; | 1932 | return i * 32 + bitnum - 1; |
1937 | } | 1933 | } |
1938 | } | 1934 | } |
1939 | 1935 | ||
1940 | return -1; | 1936 | return -1; |
1941 | } | 1937 | } |
1942 | 1938 | ||
1943 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) | 1939 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) |
1944 | { | 1940 | { |
1945 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 1941 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
1946 | 1942 | ||
1947 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) | 1943 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) |
1948 | return NMI_VECTOR; | 1944 | return NMI_VECTOR; |
1949 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) | 1945 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) |
1950 | return ExtINT_VECTOR; | 1946 | return ExtINT_VECTOR; |
1951 | 1947 | ||
1952 | return find_highest_bits((int *)&vpd->irr[0]); | 1948 | return find_highest_bits((int *)&vpd->irr[0]); |
1953 | } | 1949 | } |
1954 | 1950 | ||
1955 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | 1951 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) |
1956 | { | 1952 | { |
1957 | if (kvm_highest_pending_irq(vcpu) != -1) | 1953 | if (kvm_highest_pending_irq(vcpu) != -1) |
1958 | return 1; | 1954 | return 1; |
1959 | return 0; | 1955 | return 0; |
1960 | } | 1956 | } |
1961 | 1957 | ||
1962 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) | 1958 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) |
1963 | { | 1959 | { |
1964 | /* do real check here */ | 1960 | /* do real check here */ |
1965 | return 1; | 1961 | return 1; |
1966 | } | 1962 | } |
1967 | 1963 | ||
1968 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 1964 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
1969 | { | 1965 | { |
1970 | return vcpu->arch.timer_fired; | 1966 | return vcpu->arch.timer_fired; |
1971 | } | 1967 | } |
1972 | 1968 | ||
1973 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 1969 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
1974 | { | 1970 | { |
1975 | return gfn; | 1971 | return gfn; |
1976 | } | 1972 | } |
1977 | 1973 | ||
1978 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 1974 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
1979 | { | 1975 | { |
1980 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE; | 1976 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE; |
1981 | } | 1977 | } |
1982 | 1978 | ||
1983 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 1979 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1984 | struct kvm_mp_state *mp_state) | 1980 | struct kvm_mp_state *mp_state) |
1985 | { | 1981 | { |
1986 | vcpu_load(vcpu); | 1982 | vcpu_load(vcpu); |
1987 | mp_state->mp_state = vcpu->arch.mp_state; | 1983 | mp_state->mp_state = vcpu->arch.mp_state; |
1988 | vcpu_put(vcpu); | 1984 | vcpu_put(vcpu); |
1989 | return 0; | 1985 | return 0; |
1990 | } | 1986 | } |
1991 | 1987 | ||
1992 | static int vcpu_reset(struct kvm_vcpu *vcpu) | 1988 | static int vcpu_reset(struct kvm_vcpu *vcpu) |
1993 | { | 1989 | { |
1994 | int r; | 1990 | int r; |
1995 | long psr; | 1991 | long psr; |
1996 | local_irq_save(psr); | 1992 | local_irq_save(psr); |
1997 | r = kvm_insert_vmm_mapping(vcpu); | 1993 | r = kvm_insert_vmm_mapping(vcpu); |
1998 | local_irq_restore(psr); | 1994 | local_irq_restore(psr); |
1999 | if (r) | 1995 | if (r) |
2000 | goto fail; | 1996 | goto fail; |
2001 | 1997 | ||
2002 | vcpu->arch.launched = 0; | 1998 | vcpu->arch.launched = 0; |
2003 | kvm_arch_vcpu_uninit(vcpu); | 1999 | kvm_arch_vcpu_uninit(vcpu); |
2004 | r = kvm_arch_vcpu_init(vcpu); | 2000 | r = kvm_arch_vcpu_init(vcpu); |
2005 | if (r) | 2001 | if (r) |
2006 | goto fail; | 2002 | goto fail; |
2007 | 2003 | ||
2008 | kvm_purge_vmm_mapping(vcpu); | 2004 | kvm_purge_vmm_mapping(vcpu); |
2009 | r = 0; | 2005 | r = 0; |
2010 | fail: | 2006 | fail: |
2011 | return r; | 2007 | return r; |
2012 | } | 2008 | } |
2013 | 2009 | ||
2014 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 2010 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
2015 | struct kvm_mp_state *mp_state) | 2011 | struct kvm_mp_state *mp_state) |
2016 | { | 2012 | { |
2017 | int r = 0; | 2013 | int r = 0; |
2018 | 2014 | ||
2019 | vcpu_load(vcpu); | 2015 | vcpu_load(vcpu); |
2020 | vcpu->arch.mp_state = mp_state->mp_state; | 2016 | vcpu->arch.mp_state = mp_state->mp_state; |
2021 | if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) | 2017 | if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) |
2022 | r = vcpu_reset(vcpu); | 2018 | r = vcpu_reset(vcpu); |
2023 | vcpu_put(vcpu); | 2019 | vcpu_put(vcpu); |
2024 | return r; | 2020 | return r; |
2025 | } | 2021 | } |
2026 | 2022 |
arch/ia64/kvm/vcpu.c
1 | /* | 1 | /* |
2 | * kvm_vcpu.c: handling all virtual cpu related thing. | 2 | * kvm_vcpu.c: handling all virtual cpu related thing. |
3 | * Copyright (c) 2005, Intel Corporation. | 3 | * Copyright (c) 2005, Intel Corporation. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, | 6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. | 7 | * version 2, as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License along with | 14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
17 | * | 17 | * |
18 | * Shaofan Li (Susue Li) <susie.li@intel.com> | 18 | * Shaofan Li (Susue Li) <susie.li@intel.com> |
19 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | 19 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) |
20 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | 20 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) |
21 | * Xiantao Zhang <xiantao.zhang@intel.com> | 21 | * Xiantao Zhang <xiantao.zhang@intel.com> |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | 26 | ||
27 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
28 | #include <asm/ia64regs.h> | 28 | #include <asm/ia64regs.h> |
29 | #include <asm/gcc_intrin.h> | 29 | #include <asm/gcc_intrin.h> |
30 | #include <asm/kregs.h> | 30 | #include <asm/kregs.h> |
31 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
32 | #include <asm/tlb.h> | 32 | #include <asm/tlb.h> |
33 | 33 | ||
34 | #include "asm-offsets.h" | 34 | #include "asm-offsets.h" |
35 | #include "vcpu.h" | 35 | #include "vcpu.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Special notes: | 38 | * Special notes: |
39 | * - Index by it/dt/rt sequence | 39 | * - Index by it/dt/rt sequence |
40 | * - Only existing mode transitions are allowed in this table | 40 | * - Only existing mode transitions are allowed in this table |
41 | * - RSE is placed at lazy mode when emulating guest partial mode | 41 | * - RSE is placed at lazy mode when emulating guest partial mode |
42 | * - If gva happens to be rr0 and rr4, only allowed case is identity | 42 | * - If gva happens to be rr0 and rr4, only allowed case is identity |
43 | * mapping (gva=gpa), or panic! (How?) | 43 | * mapping (gva=gpa), or panic! (How?) |
44 | */ | 44 | */ |
45 | int mm_switch_table[8][8] = { | 45 | int mm_switch_table[8][8] = { |
46 | /* 2004/09/12(Kevin): Allow switch to self */ | 46 | /* 2004/09/12(Kevin): Allow switch to self */ |
47 | /* | 47 | /* |
48 | * (it,dt,rt): (0,0,0) -> (1,1,1) | 48 | * (it,dt,rt): (0,0,0) -> (1,1,1) |
49 | * This kind of transition usually occurs in the very early | 49 | * This kind of transition usually occurs in the very early |
50 | * stage of Linux boot up procedure. Another case is in efi | 50 | * stage of Linux boot up procedure. Another case is in efi |
51 | * and pal calls. (see "arch/ia64/kernel/head.S") | 51 | * and pal calls. (see "arch/ia64/kernel/head.S") |
52 | * | 52 | * |
53 | * (it,dt,rt): (0,0,0) -> (0,1,1) | 53 | * (it,dt,rt): (0,0,0) -> (0,1,1) |
54 | * This kind of transition is found when OSYa exits efi boot | 54 | * This kind of transition is found when OSYa exits efi boot |
55 | * service. Due to gva = gpa in this case (Same region), | 55 | * service. Due to gva = gpa in this case (Same region), |
56 | * data access can be satisfied though itlb entry for physical | 56 | * data access can be satisfied though itlb entry for physical |
57 | * emulation is hit. | 57 | * emulation is hit. |
58 | */ | 58 | */ |
59 | {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V}, | 59 | {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V}, |
60 | {0, 0, 0, 0, 0, 0, 0, 0}, | 60 | {0, 0, 0, 0, 0, 0, 0, 0}, |
61 | {0, 0, 0, 0, 0, 0, 0, 0}, | 61 | {0, 0, 0, 0, 0, 0, 0, 0}, |
62 | /* | 62 | /* |
63 | * (it,dt,rt): (0,1,1) -> (1,1,1) | 63 | * (it,dt,rt): (0,1,1) -> (1,1,1) |
64 | * This kind of transition is found in OSYa. | 64 | * This kind of transition is found in OSYa. |
65 | * | 65 | * |
66 | * (it,dt,rt): (0,1,1) -> (0,0,0) | 66 | * (it,dt,rt): (0,1,1) -> (0,0,0) |
67 | * This kind of transition is found in OSYa | 67 | * This kind of transition is found in OSYa |
68 | */ | 68 | */ |
69 | {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V}, | 69 | {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V}, |
70 | /* (1,0,0)->(1,1,1) */ | 70 | /* (1,0,0)->(1,1,1) */ |
71 | {0, 0, 0, 0, 0, 0, 0, SW_P2V}, | 71 | {0, 0, 0, 0, 0, 0, 0, SW_P2V}, |
72 | /* | 72 | /* |
73 | * (it,dt,rt): (1,0,1) -> (1,1,1) | 73 | * (it,dt,rt): (1,0,1) -> (1,1,1) |
74 | * This kind of transition usually occurs when Linux returns | 74 | * This kind of transition usually occurs when Linux returns |
75 | * from the low level TLB miss handlers. | 75 | * from the low level TLB miss handlers. |
76 | * (see "arch/ia64/kernel/ivt.S") | 76 | * (see "arch/ia64/kernel/ivt.S") |
77 | */ | 77 | */ |
78 | {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V}, | 78 | {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V}, |
79 | {0, 0, 0, 0, 0, 0, 0, 0}, | 79 | {0, 0, 0, 0, 0, 0, 0, 0}, |
80 | /* | 80 | /* |
81 | * (it,dt,rt): (1,1,1) -> (1,0,1) | 81 | * (it,dt,rt): (1,1,1) -> (1,0,1) |
82 | * This kind of transition usually occurs in Linux low level | 82 | * This kind of transition usually occurs in Linux low level |
83 | * TLB miss handler. (see "arch/ia64/kernel/ivt.S") | 83 | * TLB miss handler. (see "arch/ia64/kernel/ivt.S") |
84 | * | 84 | * |
85 | * (it,dt,rt): (1,1,1) -> (0,0,0) | 85 | * (it,dt,rt): (1,1,1) -> (0,0,0) |
86 | * This kind of transition usually occurs in pal and efi calls, | 86 | * This kind of transition usually occurs in pal and efi calls, |
87 | * which requires running in physical mode. | 87 | * which requires running in physical mode. |
88 | * (see "arch/ia64/kernel/head.S") | 88 | * (see "arch/ia64/kernel/head.S") |
89 | * (1,1,1)->(1,0,0) | 89 | * (1,1,1)->(1,0,0) |
90 | */ | 90 | */ |
91 | 91 | ||
92 | {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, | 92 | {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | void physical_mode_init(struct kvm_vcpu *vcpu) | 95 | void physical_mode_init(struct kvm_vcpu *vcpu) |
96 | { | 96 | { |
97 | vcpu->arch.mode_flags = GUEST_IN_PHY; | 97 | vcpu->arch.mode_flags = GUEST_IN_PHY; |
98 | } | 98 | } |
99 | 99 | ||
100 | void switch_to_physical_rid(struct kvm_vcpu *vcpu) | 100 | void switch_to_physical_rid(struct kvm_vcpu *vcpu) |
101 | { | 101 | { |
102 | unsigned long psr; | 102 | unsigned long psr; |
103 | 103 | ||
104 | /* Save original virtual mode rr[0] and rr[4] */ | 104 | /* Save original virtual mode rr[0] and rr[4] */ |
105 | psr = ia64_clear_ic(); | 105 | psr = ia64_clear_ic(); |
106 | ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); | 106 | ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); |
107 | ia64_srlz_d(); | 107 | ia64_srlz_d(); |
108 | ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); | 108 | ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); |
109 | ia64_srlz_d(); | 109 | ia64_srlz_d(); |
110 | 110 | ||
111 | ia64_set_psr(psr); | 111 | ia64_set_psr(psr); |
112 | return; | 112 | return; |
113 | } | 113 | } |
114 | 114 | ||
115 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) | 115 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) |
116 | { | 116 | { |
117 | unsigned long psr; | 117 | unsigned long psr; |
118 | 118 | ||
119 | psr = ia64_clear_ic(); | 119 | psr = ia64_clear_ic(); |
120 | ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); | 120 | ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); |
121 | ia64_srlz_d(); | 121 | ia64_srlz_d(); |
122 | ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); | 122 | ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); |
123 | ia64_srlz_d(); | 123 | ia64_srlz_d(); |
124 | ia64_set_psr(psr); | 124 | ia64_set_psr(psr); |
125 | return; | 125 | return; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) | 128 | static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) |
129 | { | 129 | { |
130 | return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; | 130 | return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; |
131 | } | 131 | } |
132 | 132 | ||
133 | void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | 133 | void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, |
134 | struct ia64_psr new_psr) | 134 | struct ia64_psr new_psr) |
135 | { | 135 | { |
136 | int act; | 136 | int act; |
137 | act = mm_switch_action(old_psr, new_psr); | 137 | act = mm_switch_action(old_psr, new_psr); |
138 | switch (act) { | 138 | switch (act) { |
139 | case SW_V2P: | 139 | case SW_V2P: |
140 | /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", | 140 | /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", |
141 | old_psr.val, new_psr.val);*/ | 141 | old_psr.val, new_psr.val);*/ |
142 | switch_to_physical_rid(vcpu); | 142 | switch_to_physical_rid(vcpu); |
143 | /* | 143 | /* |
144 | * Set rse to enforced lazy, to prevent active rse | 144 | * Set rse to enforced lazy, to prevent active rse |
145 | *save/restor when guest physical mode. | 145 | *save/restor when guest physical mode. |
146 | */ | 146 | */ |
147 | vcpu->arch.mode_flags |= GUEST_IN_PHY; | 147 | vcpu->arch.mode_flags |= GUEST_IN_PHY; |
148 | break; | 148 | break; |
149 | case SW_P2V: | 149 | case SW_P2V: |
150 | switch_to_virtual_rid(vcpu); | 150 | switch_to_virtual_rid(vcpu); |
151 | /* | 151 | /* |
152 | * recover old mode which is saved when entering | 152 | * recover old mode which is saved when entering |
153 | * guest physical mode | 153 | * guest physical mode |
154 | */ | 154 | */ |
155 | vcpu->arch.mode_flags &= ~GUEST_IN_PHY; | 155 | vcpu->arch.mode_flags &= ~GUEST_IN_PHY; |
156 | break; | 156 | break; |
157 | case SW_SELF: | 157 | case SW_SELF: |
158 | break; | 158 | break; |
159 | case SW_NOP: | 159 | case SW_NOP: |
160 | break; | 160 | break; |
161 | default: | 161 | default: |
162 | /* Sanity check */ | 162 | /* Sanity check */ |
163 | break; | 163 | break; |
164 | } | 164 | } |
165 | return; | 165 | return; |
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * In physical mode, insert tc/tr for region 0 and 4 uses | 169 | * In physical mode, insert tc/tr for region 0 and 4 uses |
170 | * RID[0] and RID[4] which is for physical mode emulation. | 170 | * RID[0] and RID[4] which is for physical mode emulation. |
171 | * However what those inserted tc/tr wants is rid for | 171 | * However what those inserted tc/tr wants is rid for |
172 | * virtual mode. So original virtual rid needs to be restored | 172 | * virtual mode. So original virtual rid needs to be restored |
173 | * before insert. | 173 | * before insert. |
174 | * | 174 | * |
175 | * Operations which required such switch include: | 175 | * Operations which required such switch include: |
176 | * - insertions (itc.*, itr.*) | 176 | * - insertions (itc.*, itr.*) |
177 | * - purges (ptc.* and ptr.*) | 177 | * - purges (ptc.* and ptr.*) |
178 | * - tpa | 178 | * - tpa |
179 | * - tak | 179 | * - tak |
180 | * - thash?, ttag? | 180 | * - thash?, ttag? |
181 | * All above needs actual virtual rid for destination entry. | 181 | * All above needs actual virtual rid for destination entry. |
182 | */ | 182 | */ |
183 | 183 | ||
184 | void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | 184 | void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, |
185 | struct ia64_psr new_psr) | 185 | struct ia64_psr new_psr) |
186 | { | 186 | { |
187 | 187 | ||
188 | if ((old_psr.dt != new_psr.dt) | 188 | if ((old_psr.dt != new_psr.dt) |
189 | || (old_psr.it != new_psr.it) | 189 | || (old_psr.it != new_psr.it) |
190 | || (old_psr.rt != new_psr.rt)) | 190 | || (old_psr.rt != new_psr.rt)) |
191 | switch_mm_mode(vcpu, old_psr, new_psr); | 191 | switch_mm_mode(vcpu, old_psr, new_psr); |
192 | 192 | ||
193 | return; | 193 | return; |
194 | } | 194 | } |
195 | 195 | ||
196 | 196 | ||
197 | /* | 197 | /* |
198 | * In physical mode, insert tc/tr for region 0 and 4 uses | 198 | * In physical mode, insert tc/tr for region 0 and 4 uses |
199 | * RID[0] and RID[4] which is for physical mode emulation. | 199 | * RID[0] and RID[4] which is for physical mode emulation. |
200 | * However what those inserted tc/tr wants is rid for | 200 | * However what those inserted tc/tr wants is rid for |
201 | * virtual mode. So original virtual rid needs to be restored | 201 | * virtual mode. So original virtual rid needs to be restored |
202 | * before insert. | 202 | * before insert. |
203 | * | 203 | * |
204 | * Operations which required such switch include: | 204 | * Operations which required such switch include: |
205 | * - insertions (itc.*, itr.*) | 205 | * - insertions (itc.*, itr.*) |
206 | * - purges (ptc.* and ptr.*) | 206 | * - purges (ptc.* and ptr.*) |
207 | * - tpa | 207 | * - tpa |
208 | * - tak | 208 | * - tak |
209 | * - thash?, ttag? | 209 | * - thash?, ttag? |
210 | * All above needs actual virtual rid for destination entry. | 210 | * All above needs actual virtual rid for destination entry. |
211 | */ | 211 | */ |
212 | 212 | ||
213 | void prepare_if_physical_mode(struct kvm_vcpu *vcpu) | 213 | void prepare_if_physical_mode(struct kvm_vcpu *vcpu) |
214 | { | 214 | { |
215 | if (is_physical_mode(vcpu)) { | 215 | if (is_physical_mode(vcpu)) { |
216 | vcpu->arch.mode_flags |= GUEST_PHY_EMUL; | 216 | vcpu->arch.mode_flags |= GUEST_PHY_EMUL; |
217 | switch_to_virtual_rid(vcpu); | 217 | switch_to_virtual_rid(vcpu); |
218 | } | 218 | } |
219 | return; | 219 | return; |
220 | } | 220 | } |
221 | 221 | ||
222 | /* Recover always follows prepare */ | 222 | /* Recover always follows prepare */ |
223 | void recover_if_physical_mode(struct kvm_vcpu *vcpu) | 223 | void recover_if_physical_mode(struct kvm_vcpu *vcpu) |
224 | { | 224 | { |
225 | if (is_physical_mode(vcpu)) | 225 | if (is_physical_mode(vcpu)) |
226 | switch_to_physical_rid(vcpu); | 226 | switch_to_physical_rid(vcpu); |
227 | vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; | 227 | vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; |
228 | return; | 228 | return; |
229 | } | 229 | } |
230 | 230 | ||
231 | #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x) | 231 | #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x) |
232 | 232 | ||
233 | static u16 gr_info[32] = { | 233 | static u16 gr_info[32] = { |
234 | 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ | 234 | 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ |
235 | RPT(r1), RPT(r2), RPT(r3), | 235 | RPT(r1), RPT(r2), RPT(r3), |
236 | RPT(r4), RPT(r5), RPT(r6), RPT(r7), | 236 | RPT(r4), RPT(r5), RPT(r6), RPT(r7), |
237 | RPT(r8), RPT(r9), RPT(r10), RPT(r11), | 237 | RPT(r8), RPT(r9), RPT(r10), RPT(r11), |
238 | RPT(r12), RPT(r13), RPT(r14), RPT(r15), | 238 | RPT(r12), RPT(r13), RPT(r14), RPT(r15), |
239 | RPT(r16), RPT(r17), RPT(r18), RPT(r19), | 239 | RPT(r16), RPT(r17), RPT(r18), RPT(r19), |
240 | RPT(r20), RPT(r21), RPT(r22), RPT(r23), | 240 | RPT(r20), RPT(r21), RPT(r22), RPT(r23), |
241 | RPT(r24), RPT(r25), RPT(r26), RPT(r27), | 241 | RPT(r24), RPT(r25), RPT(r26), RPT(r27), |
242 | RPT(r28), RPT(r29), RPT(r30), RPT(r31) | 242 | RPT(r28), RPT(r29), RPT(r30), RPT(r31) |
243 | }; | 243 | }; |
244 | 244 | ||
245 | #define IA64_FIRST_STACKED_GR 32 | 245 | #define IA64_FIRST_STACKED_GR 32 |
246 | #define IA64_FIRST_ROTATING_FR 32 | 246 | #define IA64_FIRST_ROTATING_FR 32 |
247 | 247 | ||
248 | static inline unsigned long | 248 | static inline unsigned long |
249 | rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg) | 249 | rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg) |
250 | { | 250 | { |
251 | reg += rrb; | 251 | reg += rrb; |
252 | if (reg >= sor) | 252 | if (reg >= sor) |
253 | reg -= sor; | 253 | reg -= sor; |
254 | return reg; | 254 | return reg; |
255 | } | 255 | } |
256 | 256 | ||
257 | /* | 257 | /* |
258 | * Return the (rotated) index for floating point register | 258 | * Return the (rotated) index for floating point register |
259 | * be in the REGNUM (REGNUM must range from 32-127, | 259 | * be in the REGNUM (REGNUM must range from 32-127, |
260 | * result is in the range from 0-95. | 260 | * result is in the range from 0-95. |
261 | */ | 261 | */ |
262 | static inline unsigned long fph_index(struct kvm_pt_regs *regs, | 262 | static inline unsigned long fph_index(struct kvm_pt_regs *regs, |
263 | long regnum) | 263 | long regnum) |
264 | { | 264 | { |
265 | unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; | 265 | unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; |
266 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); | 266 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); |
267 | } | 267 | } |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * The inverse of the above: given bspstore and the number of | 270 | * The inverse of the above: given bspstore and the number of |
271 | * registers, calculate ar.bsp. | 271 | * registers, calculate ar.bsp. |
272 | */ | 272 | */ |
273 | static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr, | 273 | static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr, |
274 | long num_regs) | 274 | long num_regs) |
275 | { | 275 | { |
276 | long delta = ia64_rse_slot_num(addr) + num_regs; | 276 | long delta = ia64_rse_slot_num(addr) + num_regs; |
277 | int i = 0; | 277 | int i = 0; |
278 | 278 | ||
279 | if (num_regs < 0) | 279 | if (num_regs < 0) |
280 | delta -= 0x3e; | 280 | delta -= 0x3e; |
281 | if (delta < 0) { | 281 | if (delta < 0) { |
282 | while (delta <= -0x3f) { | 282 | while (delta <= -0x3f) { |
283 | i--; | 283 | i--; |
284 | delta += 0x3f; | 284 | delta += 0x3f; |
285 | } | 285 | } |
286 | } else { | 286 | } else { |
287 | while (delta >= 0x3f) { | 287 | while (delta >= 0x3f) { |
288 | i++; | 288 | i++; |
289 | delta -= 0x3f; | 289 | delta -= 0x3f; |
290 | } | 290 | } |
291 | } | 291 | } |
292 | 292 | ||
293 | return addr + num_regs + i; | 293 | return addr + num_regs + i; |
294 | } | 294 | } |
295 | 295 | ||
296 | static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | 296 | static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, |
297 | unsigned long *val, int *nat) | 297 | unsigned long *val, int *nat) |
298 | { | 298 | { |
299 | unsigned long *bsp, *addr, *rnat_addr, *bspstore; | 299 | unsigned long *bsp, *addr, *rnat_addr, *bspstore; |
300 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; | 300 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; |
301 | unsigned long nat_mask; | 301 | unsigned long nat_mask; |
302 | unsigned long old_rsc, new_rsc; | 302 | unsigned long old_rsc, new_rsc; |
303 | long sof = (regs->cr_ifs) & 0x7f; | 303 | long sof = (regs->cr_ifs) & 0x7f; |
304 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); | 304 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); |
305 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; | 305 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; |
306 | long ridx = r1 - 32; | 306 | long ridx = r1 - 32; |
307 | 307 | ||
308 | if (ridx < sor) | 308 | if (ridx < sor) |
309 | ridx = rotate_reg(sor, rrb_gr, ridx); | 309 | ridx = rotate_reg(sor, rrb_gr, ridx); |
310 | 310 | ||
311 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); | 311 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); |
312 | new_rsc = old_rsc&(~(0x3)); | 312 | new_rsc = old_rsc&(~(0x3)); |
313 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); | 313 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); |
314 | 314 | ||
315 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | 315 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); |
316 | bsp = kbs + (regs->loadrs >> 19); | 316 | bsp = kbs + (regs->loadrs >> 19); |
317 | 317 | ||
318 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); | 318 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); |
319 | nat_mask = 1UL << ia64_rse_slot_num(addr); | 319 | nat_mask = 1UL << ia64_rse_slot_num(addr); |
320 | rnat_addr = ia64_rse_rnat_addr(addr); | 320 | rnat_addr = ia64_rse_rnat_addr(addr); |
321 | 321 | ||
322 | if (addr >= bspstore) { | 322 | if (addr >= bspstore) { |
323 | ia64_flushrs(); | 323 | ia64_flushrs(); |
324 | ia64_mf(); | 324 | ia64_mf(); |
325 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | 325 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); |
326 | } | 326 | } |
327 | *val = *addr; | 327 | *val = *addr; |
328 | if (nat) { | 328 | if (nat) { |
329 | if (bspstore < rnat_addr) | 329 | if (bspstore < rnat_addr) |
330 | *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT) | 330 | *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT) |
331 | & nat_mask); | 331 | & nat_mask); |
332 | else | 332 | else |
333 | *nat = (int)!!((*rnat_addr) & nat_mask); | 333 | *nat = (int)!!((*rnat_addr) & nat_mask); |
334 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); | 334 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); |
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | 338 | void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, |
339 | unsigned long val, unsigned long nat) | 339 | unsigned long val, unsigned long nat) |
340 | { | 340 | { |
341 | unsigned long *bsp, *bspstore, *addr, *rnat_addr; | 341 | unsigned long *bsp, *bspstore, *addr, *rnat_addr; |
342 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; | 342 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; |
343 | unsigned long nat_mask; | 343 | unsigned long nat_mask; |
344 | unsigned long old_rsc, new_rsc, psr; | 344 | unsigned long old_rsc, new_rsc, psr; |
345 | unsigned long rnat; | 345 | unsigned long rnat; |
346 | long sof = (regs->cr_ifs) & 0x7f; | 346 | long sof = (regs->cr_ifs) & 0x7f; |
347 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); | 347 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); |
348 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; | 348 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; |
349 | long ridx = r1 - 32; | 349 | long ridx = r1 - 32; |
350 | 350 | ||
351 | if (ridx < sor) | 351 | if (ridx < sor) |
352 | ridx = rotate_reg(sor, rrb_gr, ridx); | 352 | ridx = rotate_reg(sor, rrb_gr, ridx); |
353 | 353 | ||
354 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); | 354 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); |
355 | /* put RSC to lazy mode, and set loadrs 0 */ | 355 | /* put RSC to lazy mode, and set loadrs 0 */ |
356 | new_rsc = old_rsc & (~0x3fff0003); | 356 | new_rsc = old_rsc & (~0x3fff0003); |
357 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); | 357 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); |
358 | bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ | 358 | bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ |
359 | 359 | ||
360 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); | 360 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); |
361 | nat_mask = 1UL << ia64_rse_slot_num(addr); | 361 | nat_mask = 1UL << ia64_rse_slot_num(addr); |
362 | rnat_addr = ia64_rse_rnat_addr(addr); | 362 | rnat_addr = ia64_rse_rnat_addr(addr); |
363 | 363 | ||
364 | local_irq_save(psr); | 364 | local_irq_save(psr); |
365 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | 365 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); |
366 | if (addr >= bspstore) { | 366 | if (addr >= bspstore) { |
367 | 367 | ||
368 | ia64_flushrs(); | 368 | ia64_flushrs(); |
369 | ia64_mf(); | 369 | ia64_mf(); |
370 | *addr = val; | 370 | *addr = val; |
371 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | 371 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); |
372 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); | 372 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); |
373 | if (bspstore < rnat_addr) | 373 | if (bspstore < rnat_addr) |
374 | rnat = rnat & (~nat_mask); | 374 | rnat = rnat & (~nat_mask); |
375 | else | 375 | else |
376 | *rnat_addr = (*rnat_addr)&(~nat_mask); | 376 | *rnat_addr = (*rnat_addr)&(~nat_mask); |
377 | 377 | ||
378 | ia64_mf(); | 378 | ia64_mf(); |
379 | ia64_loadrs(); | 379 | ia64_loadrs(); |
380 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | 380 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); |
381 | } else { | 381 | } else { |
382 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); | 382 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); |
383 | *addr = val; | 383 | *addr = val; |
384 | if (bspstore < rnat_addr) | 384 | if (bspstore < rnat_addr) |
385 | rnat = rnat&(~nat_mask); | 385 | rnat = rnat&(~nat_mask); |
386 | else | 386 | else |
387 | *rnat_addr = (*rnat_addr) & (~nat_mask); | 387 | *rnat_addr = (*rnat_addr) & (~nat_mask); |
388 | 388 | ||
389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); | 389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); |
390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | 390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); |
391 | } | 391 | } |
392 | local_irq_restore(psr); | 392 | local_irq_restore(psr); |
393 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); | 393 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); |
394 | } | 394 | } |
395 | 395 | ||
396 | void getreg(unsigned long regnum, unsigned long *val, | 396 | void getreg(unsigned long regnum, unsigned long *val, |
397 | int *nat, struct kvm_pt_regs *regs) | 397 | int *nat, struct kvm_pt_regs *regs) |
398 | { | 398 | { |
399 | unsigned long addr, *unat; | 399 | unsigned long addr, *unat; |
400 | if (regnum >= IA64_FIRST_STACKED_GR) { | 400 | if (regnum >= IA64_FIRST_STACKED_GR) { |
401 | get_rse_reg(regs, regnum, val, nat); | 401 | get_rse_reg(regs, regnum, val, nat); |
402 | return; | 402 | return; |
403 | } | 403 | } |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * Now look at registers in [0-31] range and init correct UNAT | 406 | * Now look at registers in [0-31] range and init correct UNAT |
407 | */ | 407 | */ |
408 | addr = (unsigned long)regs; | 408 | addr = (unsigned long)regs; |
409 | unat = ®s->eml_unat; | 409 | unat = ®s->eml_unat; |
410 | 410 | ||
411 | addr += gr_info[regnum]; | 411 | addr += gr_info[regnum]; |
412 | 412 | ||
413 | *val = *(unsigned long *)addr; | 413 | *val = *(unsigned long *)addr; |
414 | /* | 414 | /* |
415 | * do it only when requested | 415 | * do it only when requested |
416 | */ | 416 | */ |
417 | if (nat) | 417 | if (nat) |
418 | *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL; | 418 | *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL; |
419 | } | 419 | } |
420 | 420 | ||
421 | void setreg(unsigned long regnum, unsigned long val, | 421 | void setreg(unsigned long regnum, unsigned long val, |
422 | int nat, struct kvm_pt_regs *regs) | 422 | int nat, struct kvm_pt_regs *regs) |
423 | { | 423 | { |
424 | unsigned long addr; | 424 | unsigned long addr; |
425 | unsigned long bitmask; | 425 | unsigned long bitmask; |
426 | unsigned long *unat; | 426 | unsigned long *unat; |
427 | 427 | ||
428 | /* | 428 | /* |
429 | * First takes care of stacked registers | 429 | * First takes care of stacked registers |
430 | */ | 430 | */ |
431 | if (regnum >= IA64_FIRST_STACKED_GR) { | 431 | if (regnum >= IA64_FIRST_STACKED_GR) { |
432 | set_rse_reg(regs, regnum, val, nat); | 432 | set_rse_reg(regs, regnum, val, nat); |
433 | return; | 433 | return; |
434 | } | 434 | } |
435 | 435 | ||
436 | /* | 436 | /* |
437 | * Now look at registers in [0-31] range and init correct UNAT | 437 | * Now look at registers in [0-31] range and init correct UNAT |
438 | */ | 438 | */ |
439 | addr = (unsigned long)regs; | 439 | addr = (unsigned long)regs; |
440 | unat = ®s->eml_unat; | 440 | unat = ®s->eml_unat; |
441 | /* | 441 | /* |
442 | * add offset from base of struct | 442 | * add offset from base of struct |
443 | * and do it ! | 443 | * and do it ! |
444 | */ | 444 | */ |
445 | addr += gr_info[regnum]; | 445 | addr += gr_info[regnum]; |
446 | 446 | ||
447 | *(unsigned long *)addr = val; | 447 | *(unsigned long *)addr = val; |
448 | 448 | ||
449 | /* | 449 | /* |
450 | * We need to clear the corresponding UNAT bit to fully emulate the load | 450 | * We need to clear the corresponding UNAT bit to fully emulate the load |
451 | * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 | 451 | * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 |
452 | */ | 452 | */ |
453 | bitmask = 1UL << ((addr >> 3) & 0x3f); | 453 | bitmask = 1UL << ((addr >> 3) & 0x3f); |
454 | if (nat) | 454 | if (nat) |
455 | *unat |= bitmask; | 455 | *unat |= bitmask; |
456 | else | 456 | else |
457 | *unat &= ~bitmask; | 457 | *unat &= ~bitmask; |
458 | 458 | ||
459 | } | 459 | } |
460 | 460 | ||
461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | 461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) |
462 | { | 462 | { |
463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
464 | unsigned long val; | 464 | unsigned long val; |
465 | 465 | ||
466 | if (!reg) | 466 | if (!reg) |
467 | return 0; | 467 | return 0; |
468 | getreg(reg, &val, 0, regs); | 468 | getreg(reg, &val, 0, regs); |
469 | return val; | 469 | return val; |
470 | } | 470 | } |
471 | 471 | ||
472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) | 472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) |
473 | { | 473 | { |
474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
475 | long sof = (regs->cr_ifs) & 0x7f; | 475 | long sof = (regs->cr_ifs) & 0x7f; |
476 | 476 | ||
477 | if (!reg) | 477 | if (!reg) |
478 | return; | 478 | return; |
479 | if (reg >= sof + 32) | 479 | if (reg >= sof + 32) |
480 | return; | 480 | return; |
481 | setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/ | 481 | setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/ |
482 | } | 482 | } |
483 | 483 | ||
484 | void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, | 484 | void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, |
485 | struct kvm_pt_regs *regs) | 485 | struct kvm_pt_regs *regs) |
486 | { | 486 | { |
487 | /* Take floating register rotation into consideration*/ | 487 | /* Take floating register rotation into consideration*/ |
488 | if (regnum >= IA64_FIRST_ROTATING_FR) | 488 | if (regnum >= IA64_FIRST_ROTATING_FR) |
489 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); | 489 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); |
490 | #define CASE_FIXED_FP(reg) \ | 490 | #define CASE_FIXED_FP(reg) \ |
491 | case (reg) : \ | 491 | case (reg) : \ |
492 | ia64_stf_spill(fpval, reg); \ | 492 | ia64_stf_spill(fpval, reg); \ |
493 | break | 493 | break |
494 | 494 | ||
495 | switch (regnum) { | 495 | switch (regnum) { |
496 | CASE_FIXED_FP(0); | 496 | CASE_FIXED_FP(0); |
497 | CASE_FIXED_FP(1); | 497 | CASE_FIXED_FP(1); |
498 | CASE_FIXED_FP(2); | 498 | CASE_FIXED_FP(2); |
499 | CASE_FIXED_FP(3); | 499 | CASE_FIXED_FP(3); |
500 | CASE_FIXED_FP(4); | 500 | CASE_FIXED_FP(4); |
501 | CASE_FIXED_FP(5); | 501 | CASE_FIXED_FP(5); |
502 | 502 | ||
503 | CASE_FIXED_FP(6); | 503 | CASE_FIXED_FP(6); |
504 | CASE_FIXED_FP(7); | 504 | CASE_FIXED_FP(7); |
505 | CASE_FIXED_FP(8); | 505 | CASE_FIXED_FP(8); |
506 | CASE_FIXED_FP(9); | 506 | CASE_FIXED_FP(9); |
507 | CASE_FIXED_FP(10); | 507 | CASE_FIXED_FP(10); |
508 | CASE_FIXED_FP(11); | 508 | CASE_FIXED_FP(11); |
509 | 509 | ||
510 | CASE_FIXED_FP(12); | 510 | CASE_FIXED_FP(12); |
511 | CASE_FIXED_FP(13); | 511 | CASE_FIXED_FP(13); |
512 | CASE_FIXED_FP(14); | 512 | CASE_FIXED_FP(14); |
513 | CASE_FIXED_FP(15); | 513 | CASE_FIXED_FP(15); |
514 | CASE_FIXED_FP(16); | 514 | CASE_FIXED_FP(16); |
515 | CASE_FIXED_FP(17); | 515 | CASE_FIXED_FP(17); |
516 | CASE_FIXED_FP(18); | 516 | CASE_FIXED_FP(18); |
517 | CASE_FIXED_FP(19); | 517 | CASE_FIXED_FP(19); |
518 | CASE_FIXED_FP(20); | 518 | CASE_FIXED_FP(20); |
519 | CASE_FIXED_FP(21); | 519 | CASE_FIXED_FP(21); |
520 | CASE_FIXED_FP(22); | 520 | CASE_FIXED_FP(22); |
521 | CASE_FIXED_FP(23); | 521 | CASE_FIXED_FP(23); |
522 | CASE_FIXED_FP(24); | 522 | CASE_FIXED_FP(24); |
523 | CASE_FIXED_FP(25); | 523 | CASE_FIXED_FP(25); |
524 | CASE_FIXED_FP(26); | 524 | CASE_FIXED_FP(26); |
525 | CASE_FIXED_FP(27); | 525 | CASE_FIXED_FP(27); |
526 | CASE_FIXED_FP(28); | 526 | CASE_FIXED_FP(28); |
527 | CASE_FIXED_FP(29); | 527 | CASE_FIXED_FP(29); |
528 | CASE_FIXED_FP(30); | 528 | CASE_FIXED_FP(30); |
529 | CASE_FIXED_FP(31); | 529 | CASE_FIXED_FP(31); |
530 | CASE_FIXED_FP(32); | 530 | CASE_FIXED_FP(32); |
531 | CASE_FIXED_FP(33); | 531 | CASE_FIXED_FP(33); |
532 | CASE_FIXED_FP(34); | 532 | CASE_FIXED_FP(34); |
533 | CASE_FIXED_FP(35); | 533 | CASE_FIXED_FP(35); |
534 | CASE_FIXED_FP(36); | 534 | CASE_FIXED_FP(36); |
535 | CASE_FIXED_FP(37); | 535 | CASE_FIXED_FP(37); |
536 | CASE_FIXED_FP(38); | 536 | CASE_FIXED_FP(38); |
537 | CASE_FIXED_FP(39); | 537 | CASE_FIXED_FP(39); |
538 | CASE_FIXED_FP(40); | 538 | CASE_FIXED_FP(40); |
539 | CASE_FIXED_FP(41); | 539 | CASE_FIXED_FP(41); |
540 | CASE_FIXED_FP(42); | 540 | CASE_FIXED_FP(42); |
541 | CASE_FIXED_FP(43); | 541 | CASE_FIXED_FP(43); |
542 | CASE_FIXED_FP(44); | 542 | CASE_FIXED_FP(44); |
543 | CASE_FIXED_FP(45); | 543 | CASE_FIXED_FP(45); |
544 | CASE_FIXED_FP(46); | 544 | CASE_FIXED_FP(46); |
545 | CASE_FIXED_FP(47); | 545 | CASE_FIXED_FP(47); |
546 | CASE_FIXED_FP(48); | 546 | CASE_FIXED_FP(48); |
547 | CASE_FIXED_FP(49); | 547 | CASE_FIXED_FP(49); |
548 | CASE_FIXED_FP(50); | 548 | CASE_FIXED_FP(50); |
549 | CASE_FIXED_FP(51); | 549 | CASE_FIXED_FP(51); |
550 | CASE_FIXED_FP(52); | 550 | CASE_FIXED_FP(52); |
551 | CASE_FIXED_FP(53); | 551 | CASE_FIXED_FP(53); |
552 | CASE_FIXED_FP(54); | 552 | CASE_FIXED_FP(54); |
553 | CASE_FIXED_FP(55); | 553 | CASE_FIXED_FP(55); |
554 | CASE_FIXED_FP(56); | 554 | CASE_FIXED_FP(56); |
555 | CASE_FIXED_FP(57); | 555 | CASE_FIXED_FP(57); |
556 | CASE_FIXED_FP(58); | 556 | CASE_FIXED_FP(58); |
557 | CASE_FIXED_FP(59); | 557 | CASE_FIXED_FP(59); |
558 | CASE_FIXED_FP(60); | 558 | CASE_FIXED_FP(60); |
559 | CASE_FIXED_FP(61); | 559 | CASE_FIXED_FP(61); |
560 | CASE_FIXED_FP(62); | 560 | CASE_FIXED_FP(62); |
561 | CASE_FIXED_FP(63); | 561 | CASE_FIXED_FP(63); |
562 | CASE_FIXED_FP(64); | 562 | CASE_FIXED_FP(64); |
563 | CASE_FIXED_FP(65); | 563 | CASE_FIXED_FP(65); |
564 | CASE_FIXED_FP(66); | 564 | CASE_FIXED_FP(66); |
565 | CASE_FIXED_FP(67); | 565 | CASE_FIXED_FP(67); |
566 | CASE_FIXED_FP(68); | 566 | CASE_FIXED_FP(68); |
567 | CASE_FIXED_FP(69); | 567 | CASE_FIXED_FP(69); |
568 | CASE_FIXED_FP(70); | 568 | CASE_FIXED_FP(70); |
569 | CASE_FIXED_FP(71); | 569 | CASE_FIXED_FP(71); |
570 | CASE_FIXED_FP(72); | 570 | CASE_FIXED_FP(72); |
571 | CASE_FIXED_FP(73); | 571 | CASE_FIXED_FP(73); |
572 | CASE_FIXED_FP(74); | 572 | CASE_FIXED_FP(74); |
573 | CASE_FIXED_FP(75); | 573 | CASE_FIXED_FP(75); |
574 | CASE_FIXED_FP(76); | 574 | CASE_FIXED_FP(76); |
575 | CASE_FIXED_FP(77); | 575 | CASE_FIXED_FP(77); |
576 | CASE_FIXED_FP(78); | 576 | CASE_FIXED_FP(78); |
577 | CASE_FIXED_FP(79); | 577 | CASE_FIXED_FP(79); |
578 | CASE_FIXED_FP(80); | 578 | CASE_FIXED_FP(80); |
579 | CASE_FIXED_FP(81); | 579 | CASE_FIXED_FP(81); |
580 | CASE_FIXED_FP(82); | 580 | CASE_FIXED_FP(82); |
581 | CASE_FIXED_FP(83); | 581 | CASE_FIXED_FP(83); |
582 | CASE_FIXED_FP(84); | 582 | CASE_FIXED_FP(84); |
583 | CASE_FIXED_FP(85); | 583 | CASE_FIXED_FP(85); |
584 | CASE_FIXED_FP(86); | 584 | CASE_FIXED_FP(86); |
585 | CASE_FIXED_FP(87); | 585 | CASE_FIXED_FP(87); |
586 | CASE_FIXED_FP(88); | 586 | CASE_FIXED_FP(88); |
587 | CASE_FIXED_FP(89); | 587 | CASE_FIXED_FP(89); |
588 | CASE_FIXED_FP(90); | 588 | CASE_FIXED_FP(90); |
589 | CASE_FIXED_FP(91); | 589 | CASE_FIXED_FP(91); |
590 | CASE_FIXED_FP(92); | 590 | CASE_FIXED_FP(92); |
591 | CASE_FIXED_FP(93); | 591 | CASE_FIXED_FP(93); |
592 | CASE_FIXED_FP(94); | 592 | CASE_FIXED_FP(94); |
593 | CASE_FIXED_FP(95); | 593 | CASE_FIXED_FP(95); |
594 | CASE_FIXED_FP(96); | 594 | CASE_FIXED_FP(96); |
595 | CASE_FIXED_FP(97); | 595 | CASE_FIXED_FP(97); |
596 | CASE_FIXED_FP(98); | 596 | CASE_FIXED_FP(98); |
597 | CASE_FIXED_FP(99); | 597 | CASE_FIXED_FP(99); |
598 | CASE_FIXED_FP(100); | 598 | CASE_FIXED_FP(100); |
599 | CASE_FIXED_FP(101); | 599 | CASE_FIXED_FP(101); |
600 | CASE_FIXED_FP(102); | 600 | CASE_FIXED_FP(102); |
601 | CASE_FIXED_FP(103); | 601 | CASE_FIXED_FP(103); |
602 | CASE_FIXED_FP(104); | 602 | CASE_FIXED_FP(104); |
603 | CASE_FIXED_FP(105); | 603 | CASE_FIXED_FP(105); |
604 | CASE_FIXED_FP(106); | 604 | CASE_FIXED_FP(106); |
605 | CASE_FIXED_FP(107); | 605 | CASE_FIXED_FP(107); |
606 | CASE_FIXED_FP(108); | 606 | CASE_FIXED_FP(108); |
607 | CASE_FIXED_FP(109); | 607 | CASE_FIXED_FP(109); |
608 | CASE_FIXED_FP(110); | 608 | CASE_FIXED_FP(110); |
609 | CASE_FIXED_FP(111); | 609 | CASE_FIXED_FP(111); |
610 | CASE_FIXED_FP(112); | 610 | CASE_FIXED_FP(112); |
611 | CASE_FIXED_FP(113); | 611 | CASE_FIXED_FP(113); |
612 | CASE_FIXED_FP(114); | 612 | CASE_FIXED_FP(114); |
613 | CASE_FIXED_FP(115); | 613 | CASE_FIXED_FP(115); |
614 | CASE_FIXED_FP(116); | 614 | CASE_FIXED_FP(116); |
615 | CASE_FIXED_FP(117); | 615 | CASE_FIXED_FP(117); |
616 | CASE_FIXED_FP(118); | 616 | CASE_FIXED_FP(118); |
617 | CASE_FIXED_FP(119); | 617 | CASE_FIXED_FP(119); |
618 | CASE_FIXED_FP(120); | 618 | CASE_FIXED_FP(120); |
619 | CASE_FIXED_FP(121); | 619 | CASE_FIXED_FP(121); |
620 | CASE_FIXED_FP(122); | 620 | CASE_FIXED_FP(122); |
621 | CASE_FIXED_FP(123); | 621 | CASE_FIXED_FP(123); |
622 | CASE_FIXED_FP(124); | 622 | CASE_FIXED_FP(124); |
623 | CASE_FIXED_FP(125); | 623 | CASE_FIXED_FP(125); |
624 | CASE_FIXED_FP(126); | 624 | CASE_FIXED_FP(126); |
625 | CASE_FIXED_FP(127); | 625 | CASE_FIXED_FP(127); |
626 | } | 626 | } |
627 | #undef CASE_FIXED_FP | 627 | #undef CASE_FIXED_FP |
628 | } | 628 | } |
629 | 629 | ||
630 | void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, | 630 | void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, |
631 | struct kvm_pt_regs *regs) | 631 | struct kvm_pt_regs *regs) |
632 | { | 632 | { |
633 | /* Take floating register rotation into consideration*/ | 633 | /* Take floating register rotation into consideration*/ |
634 | if (regnum >= IA64_FIRST_ROTATING_FR) | 634 | if (regnum >= IA64_FIRST_ROTATING_FR) |
635 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); | 635 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); |
636 | 636 | ||
637 | #define CASE_FIXED_FP(reg) \ | 637 | #define CASE_FIXED_FP(reg) \ |
638 | case (reg) : \ | 638 | case (reg) : \ |
639 | ia64_ldf_fill(reg, fpval); \ | 639 | ia64_ldf_fill(reg, fpval); \ |
640 | break | 640 | break |
641 | 641 | ||
642 | switch (regnum) { | 642 | switch (regnum) { |
643 | CASE_FIXED_FP(2); | 643 | CASE_FIXED_FP(2); |
644 | CASE_FIXED_FP(3); | 644 | CASE_FIXED_FP(3); |
645 | CASE_FIXED_FP(4); | 645 | CASE_FIXED_FP(4); |
646 | CASE_FIXED_FP(5); | 646 | CASE_FIXED_FP(5); |
647 | 647 | ||
648 | CASE_FIXED_FP(6); | 648 | CASE_FIXED_FP(6); |
649 | CASE_FIXED_FP(7); | 649 | CASE_FIXED_FP(7); |
650 | CASE_FIXED_FP(8); | 650 | CASE_FIXED_FP(8); |
651 | CASE_FIXED_FP(9); | 651 | CASE_FIXED_FP(9); |
652 | CASE_FIXED_FP(10); | 652 | CASE_FIXED_FP(10); |
653 | CASE_FIXED_FP(11); | 653 | CASE_FIXED_FP(11); |
654 | 654 | ||
655 | CASE_FIXED_FP(12); | 655 | CASE_FIXED_FP(12); |
656 | CASE_FIXED_FP(13); | 656 | CASE_FIXED_FP(13); |
657 | CASE_FIXED_FP(14); | 657 | CASE_FIXED_FP(14); |
658 | CASE_FIXED_FP(15); | 658 | CASE_FIXED_FP(15); |
659 | CASE_FIXED_FP(16); | 659 | CASE_FIXED_FP(16); |
660 | CASE_FIXED_FP(17); | 660 | CASE_FIXED_FP(17); |
661 | CASE_FIXED_FP(18); | 661 | CASE_FIXED_FP(18); |
662 | CASE_FIXED_FP(19); | 662 | CASE_FIXED_FP(19); |
663 | CASE_FIXED_FP(20); | 663 | CASE_FIXED_FP(20); |
664 | CASE_FIXED_FP(21); | 664 | CASE_FIXED_FP(21); |
665 | CASE_FIXED_FP(22); | 665 | CASE_FIXED_FP(22); |
666 | CASE_FIXED_FP(23); | 666 | CASE_FIXED_FP(23); |
667 | CASE_FIXED_FP(24); | 667 | CASE_FIXED_FP(24); |
668 | CASE_FIXED_FP(25); | 668 | CASE_FIXED_FP(25); |
669 | CASE_FIXED_FP(26); | 669 | CASE_FIXED_FP(26); |
670 | CASE_FIXED_FP(27); | 670 | CASE_FIXED_FP(27); |
671 | CASE_FIXED_FP(28); | 671 | CASE_FIXED_FP(28); |
672 | CASE_FIXED_FP(29); | 672 | CASE_FIXED_FP(29); |
673 | CASE_FIXED_FP(30); | 673 | CASE_FIXED_FP(30); |
674 | CASE_FIXED_FP(31); | 674 | CASE_FIXED_FP(31); |
675 | CASE_FIXED_FP(32); | 675 | CASE_FIXED_FP(32); |
676 | CASE_FIXED_FP(33); | 676 | CASE_FIXED_FP(33); |
677 | CASE_FIXED_FP(34); | 677 | CASE_FIXED_FP(34); |
678 | CASE_FIXED_FP(35); | 678 | CASE_FIXED_FP(35); |
679 | CASE_FIXED_FP(36); | 679 | CASE_FIXED_FP(36); |
680 | CASE_FIXED_FP(37); | 680 | CASE_FIXED_FP(37); |
681 | CASE_FIXED_FP(38); | 681 | CASE_FIXED_FP(38); |
682 | CASE_FIXED_FP(39); | 682 | CASE_FIXED_FP(39); |
683 | CASE_FIXED_FP(40); | 683 | CASE_FIXED_FP(40); |
684 | CASE_FIXED_FP(41); | 684 | CASE_FIXED_FP(41); |
685 | CASE_FIXED_FP(42); | 685 | CASE_FIXED_FP(42); |
686 | CASE_FIXED_FP(43); | 686 | CASE_FIXED_FP(43); |
687 | CASE_FIXED_FP(44); | 687 | CASE_FIXED_FP(44); |
688 | CASE_FIXED_FP(45); | 688 | CASE_FIXED_FP(45); |
689 | CASE_FIXED_FP(46); | 689 | CASE_FIXED_FP(46); |
690 | CASE_FIXED_FP(47); | 690 | CASE_FIXED_FP(47); |
691 | CASE_FIXED_FP(48); | 691 | CASE_FIXED_FP(48); |
692 | CASE_FIXED_FP(49); | 692 | CASE_FIXED_FP(49); |
693 | CASE_FIXED_FP(50); | 693 | CASE_FIXED_FP(50); |
694 | CASE_FIXED_FP(51); | 694 | CASE_FIXED_FP(51); |
695 | CASE_FIXED_FP(52); | 695 | CASE_FIXED_FP(52); |
696 | CASE_FIXED_FP(53); | 696 | CASE_FIXED_FP(53); |
697 | CASE_FIXED_FP(54); | 697 | CASE_FIXED_FP(54); |
698 | CASE_FIXED_FP(55); | 698 | CASE_FIXED_FP(55); |
699 | CASE_FIXED_FP(56); | 699 | CASE_FIXED_FP(56); |
700 | CASE_FIXED_FP(57); | 700 | CASE_FIXED_FP(57); |
701 | CASE_FIXED_FP(58); | 701 | CASE_FIXED_FP(58); |
702 | CASE_FIXED_FP(59); | 702 | CASE_FIXED_FP(59); |
703 | CASE_FIXED_FP(60); | 703 | CASE_FIXED_FP(60); |
704 | CASE_FIXED_FP(61); | 704 | CASE_FIXED_FP(61); |
705 | CASE_FIXED_FP(62); | 705 | CASE_FIXED_FP(62); |
706 | CASE_FIXED_FP(63); | 706 | CASE_FIXED_FP(63); |
707 | CASE_FIXED_FP(64); | 707 | CASE_FIXED_FP(64); |
708 | CASE_FIXED_FP(65); | 708 | CASE_FIXED_FP(65); |
709 | CASE_FIXED_FP(66); | 709 | CASE_FIXED_FP(66); |
710 | CASE_FIXED_FP(67); | 710 | CASE_FIXED_FP(67); |
711 | CASE_FIXED_FP(68); | 711 | CASE_FIXED_FP(68); |
712 | CASE_FIXED_FP(69); | 712 | CASE_FIXED_FP(69); |
713 | CASE_FIXED_FP(70); | 713 | CASE_FIXED_FP(70); |
714 | CASE_FIXED_FP(71); | 714 | CASE_FIXED_FP(71); |
715 | CASE_FIXED_FP(72); | 715 | CASE_FIXED_FP(72); |
716 | CASE_FIXED_FP(73); | 716 | CASE_FIXED_FP(73); |
717 | CASE_FIXED_FP(74); | 717 | CASE_FIXED_FP(74); |
718 | CASE_FIXED_FP(75); | 718 | CASE_FIXED_FP(75); |
719 | CASE_FIXED_FP(76); | 719 | CASE_FIXED_FP(76); |
720 | CASE_FIXED_FP(77); | 720 | CASE_FIXED_FP(77); |
721 | CASE_FIXED_FP(78); | 721 | CASE_FIXED_FP(78); |
722 | CASE_FIXED_FP(79); | 722 | CASE_FIXED_FP(79); |
723 | CASE_FIXED_FP(80); | 723 | CASE_FIXED_FP(80); |
724 | CASE_FIXED_FP(81); | 724 | CASE_FIXED_FP(81); |
725 | CASE_FIXED_FP(82); | 725 | CASE_FIXED_FP(82); |
726 | CASE_FIXED_FP(83); | 726 | CASE_FIXED_FP(83); |
727 | CASE_FIXED_FP(84); | 727 | CASE_FIXED_FP(84); |
728 | CASE_FIXED_FP(85); | 728 | CASE_FIXED_FP(85); |
729 | CASE_FIXED_FP(86); | 729 | CASE_FIXED_FP(86); |
730 | CASE_FIXED_FP(87); | 730 | CASE_FIXED_FP(87); |
731 | CASE_FIXED_FP(88); | 731 | CASE_FIXED_FP(88); |
732 | CASE_FIXED_FP(89); | 732 | CASE_FIXED_FP(89); |
733 | CASE_FIXED_FP(90); | 733 | CASE_FIXED_FP(90); |
734 | CASE_FIXED_FP(91); | 734 | CASE_FIXED_FP(91); |
735 | CASE_FIXED_FP(92); | 735 | CASE_FIXED_FP(92); |
736 | CASE_FIXED_FP(93); | 736 | CASE_FIXED_FP(93); |
737 | CASE_FIXED_FP(94); | 737 | CASE_FIXED_FP(94); |
738 | CASE_FIXED_FP(95); | 738 | CASE_FIXED_FP(95); |
739 | CASE_FIXED_FP(96); | 739 | CASE_FIXED_FP(96); |
740 | CASE_FIXED_FP(97); | 740 | CASE_FIXED_FP(97); |
741 | CASE_FIXED_FP(98); | 741 | CASE_FIXED_FP(98); |
742 | CASE_FIXED_FP(99); | 742 | CASE_FIXED_FP(99); |
743 | CASE_FIXED_FP(100); | 743 | CASE_FIXED_FP(100); |
744 | CASE_FIXED_FP(101); | 744 | CASE_FIXED_FP(101); |
745 | CASE_FIXED_FP(102); | 745 | CASE_FIXED_FP(102); |
746 | CASE_FIXED_FP(103); | 746 | CASE_FIXED_FP(103); |
747 | CASE_FIXED_FP(104); | 747 | CASE_FIXED_FP(104); |
748 | CASE_FIXED_FP(105); | 748 | CASE_FIXED_FP(105); |
749 | CASE_FIXED_FP(106); | 749 | CASE_FIXED_FP(106); |
750 | CASE_FIXED_FP(107); | 750 | CASE_FIXED_FP(107); |
751 | CASE_FIXED_FP(108); | 751 | CASE_FIXED_FP(108); |
752 | CASE_FIXED_FP(109); | 752 | CASE_FIXED_FP(109); |
753 | CASE_FIXED_FP(110); | 753 | CASE_FIXED_FP(110); |
754 | CASE_FIXED_FP(111); | 754 | CASE_FIXED_FP(111); |
755 | CASE_FIXED_FP(112); | 755 | CASE_FIXED_FP(112); |
756 | CASE_FIXED_FP(113); | 756 | CASE_FIXED_FP(113); |
757 | CASE_FIXED_FP(114); | 757 | CASE_FIXED_FP(114); |
758 | CASE_FIXED_FP(115); | 758 | CASE_FIXED_FP(115); |
759 | CASE_FIXED_FP(116); | 759 | CASE_FIXED_FP(116); |
760 | CASE_FIXED_FP(117); | 760 | CASE_FIXED_FP(117); |
761 | CASE_FIXED_FP(118); | 761 | CASE_FIXED_FP(118); |
762 | CASE_FIXED_FP(119); | 762 | CASE_FIXED_FP(119); |
763 | CASE_FIXED_FP(120); | 763 | CASE_FIXED_FP(120); |
764 | CASE_FIXED_FP(121); | 764 | CASE_FIXED_FP(121); |
765 | CASE_FIXED_FP(122); | 765 | CASE_FIXED_FP(122); |
766 | CASE_FIXED_FP(123); | 766 | CASE_FIXED_FP(123); |
767 | CASE_FIXED_FP(124); | 767 | CASE_FIXED_FP(124); |
768 | CASE_FIXED_FP(125); | 768 | CASE_FIXED_FP(125); |
769 | CASE_FIXED_FP(126); | 769 | CASE_FIXED_FP(126); |
770 | CASE_FIXED_FP(127); | 770 | CASE_FIXED_FP(127); |
771 | } | 771 | } |
772 | } | 772 | } |
773 | 773 | ||
774 | void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | 774 | void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, |
775 | struct ia64_fpreg *val) | 775 | struct ia64_fpreg *val) |
776 | { | 776 | { |
777 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 777 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
778 | 778 | ||
779 | getfpreg(reg, val, regs); /* FIXME: handle NATs later*/ | 779 | getfpreg(reg, val, regs); /* FIXME: handle NATs later*/ |
780 | } | 780 | } |
781 | 781 | ||
782 | void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | 782 | void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, |
783 | struct ia64_fpreg *val) | 783 | struct ia64_fpreg *val) |
784 | { | 784 | { |
785 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 785 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
786 | 786 | ||
787 | if (reg > 1) | 787 | if (reg > 1) |
788 | setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ | 788 | setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ |
789 | } | 789 | } |
790 | 790 | ||
791 | /* | 791 | /* |
792 | * The Altix RTC is mapped specially here for the vmm module | 792 | * The Altix RTC is mapped specially here for the vmm module |
793 | */ | 793 | */ |
794 | #define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT)) | 794 | #define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT)) |
795 | static long kvm_get_itc(struct kvm_vcpu *vcpu) | 795 | static long kvm_get_itc(struct kvm_vcpu *vcpu) |
796 | { | 796 | { |
797 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | 797 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) |
798 | struct kvm *kvm = (struct kvm *)KVM_VM_BASE; | 798 | struct kvm *kvm = (struct kvm *)KVM_VM_BASE; |
799 | 799 | ||
800 | if (kvm->arch.is_sn2) | 800 | if (kvm->arch.is_sn2) |
801 | return (*SN_RTC_BASE); | 801 | return (*SN_RTC_BASE); |
802 | else | 802 | else |
803 | #endif | 803 | #endif |
804 | return ia64_getreg(_IA64_REG_AR_ITC); | 804 | return ia64_getreg(_IA64_REG_AR_ITC); |
805 | } | 805 | } |
806 | 806 | ||
807 | /************************************************************************ | 807 | /************************************************************************ |
808 | * lsapic timer | 808 | * lsapic timer |
809 | ***********************************************************************/ | 809 | ***********************************************************************/ |
810 | u64 vcpu_get_itc(struct kvm_vcpu *vcpu) | 810 | u64 vcpu_get_itc(struct kvm_vcpu *vcpu) |
811 | { | 811 | { |
812 | unsigned long guest_itc; | 812 | unsigned long guest_itc; |
813 | guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu); | 813 | guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu); |
814 | 814 | ||
815 | if (guest_itc >= VMX(vcpu, last_itc)) { | 815 | if (guest_itc >= VMX(vcpu, last_itc)) { |
816 | VMX(vcpu, last_itc) = guest_itc; | 816 | VMX(vcpu, last_itc) = guest_itc; |
817 | return guest_itc; | 817 | return guest_itc; |
818 | } else | 818 | } else |
819 | return VMX(vcpu, last_itc); | 819 | return VMX(vcpu, last_itc); |
820 | } | 820 | } |
821 | 821 | ||
822 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); | 822 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); |
823 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | 823 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) |
824 | { | 824 | { |
825 | struct kvm_vcpu *v; | 825 | struct kvm_vcpu *v; |
826 | struct kvm *kvm; | 826 | struct kvm *kvm; |
827 | int i; | 827 | int i; |
828 | long itc_offset = val - kvm_get_itc(vcpu); | 828 | long itc_offset = val - kvm_get_itc(vcpu); |
829 | unsigned long vitv = VCPU(vcpu, itv); | 829 | unsigned long vitv = VCPU(vcpu, itv); |
830 | 830 | ||
831 | kvm = (struct kvm *)KVM_VM_BASE; | 831 | kvm = (struct kvm *)KVM_VM_BASE; |
832 | 832 | ||
833 | if (kvm_vcpu_is_bsp(vcpu)) { | 833 | if (kvm_vcpu_is_bsp(vcpu)) { |
834 | for (i = 0; i < kvm->arch.online_vcpus; i++) { | 834 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { |
835 | v = (struct kvm_vcpu *)((char *)vcpu + | 835 | v = (struct kvm_vcpu *)((char *)vcpu + |
836 | sizeof(struct kvm_vcpu_data) * i); | 836 | sizeof(struct kvm_vcpu_data) * i); |
837 | VMX(v, itc_offset) = itc_offset; | 837 | VMX(v, itc_offset) = itc_offset; |
838 | VMX(v, last_itc) = 0; | 838 | VMX(v, last_itc) = 0; |
839 | } | 839 | } |
840 | } | 840 | } |
841 | VMX(vcpu, last_itc) = 0; | 841 | VMX(vcpu, last_itc) = 0; |
842 | if (VCPU(vcpu, itm) <= val) { | 842 | if (VCPU(vcpu, itm) <= val) { |
843 | VMX(vcpu, itc_check) = 0; | 843 | VMX(vcpu, itc_check) = 0; |
844 | vcpu_unpend_interrupt(vcpu, vitv); | 844 | vcpu_unpend_interrupt(vcpu, vitv); |
845 | } else { | 845 | } else { |
846 | VMX(vcpu, itc_check) = 1; | 846 | VMX(vcpu, itc_check) = 1; |
847 | vcpu_set_itm(vcpu, VCPU(vcpu, itm)); | 847 | vcpu_set_itm(vcpu, VCPU(vcpu, itm)); |
848 | } | 848 | } |
849 | 849 | ||
850 | } | 850 | } |
851 | 851 | ||
852 | static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu) | 852 | static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu) |
853 | { | 853 | { |
854 | return ((u64)VCPU(vcpu, itm)); | 854 | return ((u64)VCPU(vcpu, itm)); |
855 | } | 855 | } |
856 | 856 | ||
857 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val) | 857 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val) |
858 | { | 858 | { |
859 | unsigned long vitv = VCPU(vcpu, itv); | 859 | unsigned long vitv = VCPU(vcpu, itv); |
860 | VCPU(vcpu, itm) = val; | 860 | VCPU(vcpu, itm) = val; |
861 | 861 | ||
862 | if (val > vcpu_get_itc(vcpu)) { | 862 | if (val > vcpu_get_itc(vcpu)) { |
863 | VMX(vcpu, itc_check) = 1; | 863 | VMX(vcpu, itc_check) = 1; |
864 | vcpu_unpend_interrupt(vcpu, vitv); | 864 | vcpu_unpend_interrupt(vcpu, vitv); |
865 | VMX(vcpu, timer_pending) = 0; | 865 | VMX(vcpu, timer_pending) = 0; |
866 | } else | 866 | } else |
867 | VMX(vcpu, itc_check) = 0; | 867 | VMX(vcpu, itc_check) = 0; |
868 | } | 868 | } |
869 | 869 | ||
870 | #define ITV_VECTOR(itv) (itv&0xff) | 870 | #define ITV_VECTOR(itv) (itv&0xff) |
871 | #define ITV_IRQ_MASK(itv) (itv&(1<<16)) | 871 | #define ITV_IRQ_MASK(itv) (itv&(1<<16)) |
872 | 872 | ||
873 | static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val) | 873 | static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val) |
874 | { | 874 | { |
875 | VCPU(vcpu, itv) = val; | 875 | VCPU(vcpu, itv) = val; |
876 | if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) { | 876 | if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) { |
877 | vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); | 877 | vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); |
878 | vcpu->arch.timer_pending = 0; | 878 | vcpu->arch.timer_pending = 0; |
879 | } | 879 | } |
880 | } | 880 | } |
881 | 881 | ||
882 | static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val) | 882 | static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val) |
883 | { | 883 | { |
884 | int vec; | 884 | int vec; |
885 | 885 | ||
886 | vec = highest_inservice_irq(vcpu); | 886 | vec = highest_inservice_irq(vcpu); |
887 | if (vec == NULL_VECTOR) | 887 | if (vec == NULL_VECTOR) |
888 | return; | 888 | return; |
889 | VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63)); | 889 | VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63)); |
890 | VCPU(vcpu, eoi) = 0; | 890 | VCPU(vcpu, eoi) = 0; |
891 | vcpu->arch.irq_new_pending = 1; | 891 | vcpu->arch.irq_new_pending = 1; |
892 | 892 | ||
893 | } | 893 | } |
894 | 894 | ||
895 | /* See Table 5-8 in SDM vol2 for the definition */ | 895 | /* See Table 5-8 in SDM vol2 for the definition */ |
896 | int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice) | 896 | int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice) |
897 | { | 897 | { |
898 | union ia64_tpr vtpr; | 898 | union ia64_tpr vtpr; |
899 | 899 | ||
900 | vtpr.val = VCPU(vcpu, tpr); | 900 | vtpr.val = VCPU(vcpu, tpr); |
901 | 901 | ||
902 | if (h_inservice == NMI_VECTOR) | 902 | if (h_inservice == NMI_VECTOR) |
903 | return IRQ_MASKED_BY_INSVC; | 903 | return IRQ_MASKED_BY_INSVC; |
904 | 904 | ||
905 | if (h_pending == NMI_VECTOR) { | 905 | if (h_pending == NMI_VECTOR) { |
906 | /* Non Maskable Interrupt */ | 906 | /* Non Maskable Interrupt */ |
907 | return IRQ_NO_MASKED; | 907 | return IRQ_NO_MASKED; |
908 | } | 908 | } |
909 | 909 | ||
910 | if (h_inservice == ExtINT_VECTOR) | 910 | if (h_inservice == ExtINT_VECTOR) |
911 | return IRQ_MASKED_BY_INSVC; | 911 | return IRQ_MASKED_BY_INSVC; |
912 | 912 | ||
913 | if (h_pending == ExtINT_VECTOR) { | 913 | if (h_pending == ExtINT_VECTOR) { |
914 | if (vtpr.mmi) { | 914 | if (vtpr.mmi) { |
915 | /* mask all external IRQ */ | 915 | /* mask all external IRQ */ |
916 | return IRQ_MASKED_BY_VTPR; | 916 | return IRQ_MASKED_BY_VTPR; |
917 | } else | 917 | } else |
918 | return IRQ_NO_MASKED; | 918 | return IRQ_NO_MASKED; |
919 | } | 919 | } |
920 | 920 | ||
921 | if (is_higher_irq(h_pending, h_inservice)) { | 921 | if (is_higher_irq(h_pending, h_inservice)) { |
922 | if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4))) | 922 | if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4))) |
923 | return IRQ_NO_MASKED; | 923 | return IRQ_NO_MASKED; |
924 | else | 924 | else |
925 | return IRQ_MASKED_BY_VTPR; | 925 | return IRQ_MASKED_BY_VTPR; |
926 | } else { | 926 | } else { |
927 | return IRQ_MASKED_BY_INSVC; | 927 | return IRQ_MASKED_BY_INSVC; |
928 | } | 928 | } |
929 | } | 929 | } |
930 | 930 | ||
931 | void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec) | 931 | void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec) |
932 | { | 932 | { |
933 | long spsr; | 933 | long spsr; |
934 | int ret; | 934 | int ret; |
935 | 935 | ||
936 | local_irq_save(spsr); | 936 | local_irq_save(spsr); |
937 | ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0])); | 937 | ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0])); |
938 | local_irq_restore(spsr); | 938 | local_irq_restore(spsr); |
939 | 939 | ||
940 | vcpu->arch.irq_new_pending = 1; | 940 | vcpu->arch.irq_new_pending = 1; |
941 | } | 941 | } |
942 | 942 | ||
943 | void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec) | 943 | void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec) |
944 | { | 944 | { |
945 | long spsr; | 945 | long spsr; |
946 | int ret; | 946 | int ret; |
947 | 947 | ||
948 | local_irq_save(spsr); | 948 | local_irq_save(spsr); |
949 | ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0])); | 949 | ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0])); |
950 | local_irq_restore(spsr); | 950 | local_irq_restore(spsr); |
951 | if (ret) { | 951 | if (ret) { |
952 | vcpu->arch.irq_new_pending = 1; | 952 | vcpu->arch.irq_new_pending = 1; |
953 | wmb(); | 953 | wmb(); |
954 | } | 954 | } |
955 | } | 955 | } |
956 | 956 | ||
957 | void update_vhpi(struct kvm_vcpu *vcpu, int vec) | 957 | void update_vhpi(struct kvm_vcpu *vcpu, int vec) |
958 | { | 958 | { |
959 | u64 vhpi; | 959 | u64 vhpi; |
960 | 960 | ||
961 | if (vec == NULL_VECTOR) | 961 | if (vec == NULL_VECTOR) |
962 | vhpi = 0; | 962 | vhpi = 0; |
963 | else if (vec == NMI_VECTOR) | 963 | else if (vec == NMI_VECTOR) |
964 | vhpi = 32; | 964 | vhpi = 32; |
965 | else if (vec == ExtINT_VECTOR) | 965 | else if (vec == ExtINT_VECTOR) |
966 | vhpi = 16; | 966 | vhpi = 16; |
967 | else | 967 | else |
968 | vhpi = vec >> 4; | 968 | vhpi = vec >> 4; |
969 | 969 | ||
970 | VCPU(vcpu, vhpi) = vhpi; | 970 | VCPU(vcpu, vhpi) = vhpi; |
971 | if (VCPU(vcpu, vac).a_int) | 971 | if (VCPU(vcpu, vac).a_int) |
972 | ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, | 972 | ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, |
973 | (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0); | 973 | (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0); |
974 | } | 974 | } |
975 | 975 | ||
976 | u64 vcpu_get_ivr(struct kvm_vcpu *vcpu) | 976 | u64 vcpu_get_ivr(struct kvm_vcpu *vcpu) |
977 | { | 977 | { |
978 | int vec, h_inservice, mask; | 978 | int vec, h_inservice, mask; |
979 | 979 | ||
980 | vec = highest_pending_irq(vcpu); | 980 | vec = highest_pending_irq(vcpu); |
981 | h_inservice = highest_inservice_irq(vcpu); | 981 | h_inservice = highest_inservice_irq(vcpu); |
982 | mask = irq_masked(vcpu, vec, h_inservice); | 982 | mask = irq_masked(vcpu, vec, h_inservice); |
983 | if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { | 983 | if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { |
984 | if (VCPU(vcpu, vhpi)) | 984 | if (VCPU(vcpu, vhpi)) |
985 | update_vhpi(vcpu, NULL_VECTOR); | 985 | update_vhpi(vcpu, NULL_VECTOR); |
986 | return IA64_SPURIOUS_INT_VECTOR; | 986 | return IA64_SPURIOUS_INT_VECTOR; |
987 | } | 987 | } |
988 | if (mask == IRQ_MASKED_BY_VTPR) { | 988 | if (mask == IRQ_MASKED_BY_VTPR) { |
989 | update_vhpi(vcpu, vec); | 989 | update_vhpi(vcpu, vec); |
990 | return IA64_SPURIOUS_INT_VECTOR; | 990 | return IA64_SPURIOUS_INT_VECTOR; |
991 | } | 991 | } |
992 | VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63)); | 992 | VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63)); |
993 | vcpu_unpend_interrupt(vcpu, vec); | 993 | vcpu_unpend_interrupt(vcpu, vec); |
994 | return (u64)vec; | 994 | return (u64)vec; |
995 | } | 995 | } |
996 | 996 | ||
997 | /************************************************************************** | 997 | /************************************************************************** |
998 | Privileged operation emulation routines | 998 | Privileged operation emulation routines |
999 | **************************************************************************/ | 999 | **************************************************************************/ |
1000 | u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr) | 1000 | u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr) |
1001 | { | 1001 | { |
1002 | union ia64_pta vpta; | 1002 | union ia64_pta vpta; |
1003 | union ia64_rr vrr; | 1003 | union ia64_rr vrr; |
1004 | u64 pval; | 1004 | u64 pval; |
1005 | u64 vhpt_offset; | 1005 | u64 vhpt_offset; |
1006 | 1006 | ||
1007 | vpta.val = vcpu_get_pta(vcpu); | 1007 | vpta.val = vcpu_get_pta(vcpu); |
1008 | vrr.val = vcpu_get_rr(vcpu, vadr); | 1008 | vrr.val = vcpu_get_rr(vcpu, vadr); |
1009 | vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1); | 1009 | vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1); |
1010 | if (vpta.vf) { | 1010 | if (vpta.vf) { |
1011 | pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val, | 1011 | pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val, |
1012 | vpta.val, 0, 0, 0, 0); | 1012 | vpta.val, 0, 0, 0, 0); |
1013 | } else { | 1013 | } else { |
1014 | pval = (vadr & VRN_MASK) | vhpt_offset | | 1014 | pval = (vadr & VRN_MASK) | vhpt_offset | |
1015 | (vpta.val << 3 >> (vpta.size + 3) << (vpta.size)); | 1015 | (vpta.val << 3 >> (vpta.size + 3) << (vpta.size)); |
1016 | } | 1016 | } |
1017 | return pval; | 1017 | return pval; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr) | 1020 | u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr) |
1021 | { | 1021 | { |
1022 | union ia64_rr vrr; | 1022 | union ia64_rr vrr; |
1023 | union ia64_pta vpta; | 1023 | union ia64_pta vpta; |
1024 | u64 pval; | 1024 | u64 pval; |
1025 | 1025 | ||
1026 | vpta.val = vcpu_get_pta(vcpu); | 1026 | vpta.val = vcpu_get_pta(vcpu); |
1027 | vrr.val = vcpu_get_rr(vcpu, vadr); | 1027 | vrr.val = vcpu_get_rr(vcpu, vadr); |
1028 | if (vpta.vf) { | 1028 | if (vpta.vf) { |
1029 | pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val, | 1029 | pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val, |
1030 | 0, 0, 0, 0, 0); | 1030 | 0, 0, 0, 0, 0); |
1031 | } else | 1031 | } else |
1032 | pval = 1; | 1032 | pval = 1; |
1033 | 1033 | ||
1034 | return pval; | 1034 | return pval; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) | 1037 | u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) |
1038 | { | 1038 | { |
1039 | struct thash_data *data; | 1039 | struct thash_data *data; |
1040 | union ia64_pta vpta; | 1040 | union ia64_pta vpta; |
1041 | u64 key; | 1041 | u64 key; |
1042 | 1042 | ||
1043 | vpta.val = vcpu_get_pta(vcpu); | 1043 | vpta.val = vcpu_get_pta(vcpu); |
1044 | if (vpta.vf == 0) { | 1044 | if (vpta.vf == 0) { |
1045 | key = 1; | 1045 | key = 1; |
1046 | return key; | 1046 | return key; |
1047 | } | 1047 | } |
1048 | data = vtlb_lookup(vcpu, vadr, D_TLB); | 1048 | data = vtlb_lookup(vcpu, vadr, D_TLB); |
1049 | if (!data || !data->p) | 1049 | if (!data || !data->p) |
1050 | key = 1; | 1050 | key = 1; |
1051 | else | 1051 | else |
1052 | key = data->key; | 1052 | key = data->key; |
1053 | 1053 | ||
1054 | return key; | 1054 | return key; |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | 1057 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) |
1058 | { | 1058 | { |
1059 | unsigned long thash, vadr; | 1059 | unsigned long thash, vadr; |
1060 | 1060 | ||
1061 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); | 1061 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); |
1062 | thash = vcpu_thash(vcpu, vadr); | 1062 | thash = vcpu_thash(vcpu, vadr); |
1063 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); | 1063 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | 1066 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) |
1067 | { | 1067 | { |
1068 | unsigned long tag, vadr; | 1068 | unsigned long tag, vadr; |
1069 | 1069 | ||
1070 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); | 1070 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); |
1071 | tag = vcpu_ttag(vcpu, vadr); | 1071 | tag = vcpu_ttag(vcpu, vadr); |
1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); | 1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) | 1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) |
1076 | { | 1076 | { |
1077 | struct thash_data *data; | 1077 | struct thash_data *data; |
1078 | union ia64_isr visr, pt_isr; | 1078 | union ia64_isr visr, pt_isr; |
1079 | struct kvm_pt_regs *regs; | 1079 | struct kvm_pt_regs *regs; |
1080 | struct ia64_psr vpsr; | 1080 | struct ia64_psr vpsr; |
1081 | 1081 | ||
1082 | regs = vcpu_regs(vcpu); | 1082 | regs = vcpu_regs(vcpu); |
1083 | pt_isr.val = VMX(vcpu, cr_isr); | 1083 | pt_isr.val = VMX(vcpu, cr_isr); |
1084 | visr.val = 0; | 1084 | visr.val = 0; |
1085 | visr.ei = pt_isr.ei; | 1085 | visr.ei = pt_isr.ei; |
1086 | visr.ir = pt_isr.ir; | 1086 | visr.ir = pt_isr.ir; |
1087 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | 1087 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); |
1088 | visr.na = 1; | 1088 | visr.na = 1; |
1089 | 1089 | ||
1090 | data = vhpt_lookup(vadr); | 1090 | data = vhpt_lookup(vadr); |
1091 | if (data) { | 1091 | if (data) { |
1092 | if (data->p == 0) { | 1092 | if (data->p == 0) { |
1093 | vcpu_set_isr(vcpu, visr.val); | 1093 | vcpu_set_isr(vcpu, visr.val); |
1094 | data_page_not_present(vcpu, vadr); | 1094 | data_page_not_present(vcpu, vadr); |
1095 | return IA64_FAULT; | 1095 | return IA64_FAULT; |
1096 | } else if (data->ma == VA_MATTR_NATPAGE) { | 1096 | } else if (data->ma == VA_MATTR_NATPAGE) { |
1097 | vcpu_set_isr(vcpu, visr.val); | 1097 | vcpu_set_isr(vcpu, visr.val); |
1098 | dnat_page_consumption(vcpu, vadr); | 1098 | dnat_page_consumption(vcpu, vadr); |
1099 | return IA64_FAULT; | 1099 | return IA64_FAULT; |
1100 | } else { | 1100 | } else { |
1101 | *padr = (data->gpaddr >> data->ps << data->ps) | | 1101 | *padr = (data->gpaddr >> data->ps << data->ps) | |
1102 | (vadr & (PSIZE(data->ps) - 1)); | 1102 | (vadr & (PSIZE(data->ps) - 1)); |
1103 | return IA64_NO_FAULT; | 1103 | return IA64_NO_FAULT; |
1104 | } | 1104 | } |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | data = vtlb_lookup(vcpu, vadr, D_TLB); | 1107 | data = vtlb_lookup(vcpu, vadr, D_TLB); |
1108 | if (data) { | 1108 | if (data) { |
1109 | if (data->p == 0) { | 1109 | if (data->p == 0) { |
1110 | vcpu_set_isr(vcpu, visr.val); | 1110 | vcpu_set_isr(vcpu, visr.val); |
1111 | data_page_not_present(vcpu, vadr); | 1111 | data_page_not_present(vcpu, vadr); |
1112 | return IA64_FAULT; | 1112 | return IA64_FAULT; |
1113 | } else if (data->ma == VA_MATTR_NATPAGE) { | 1113 | } else if (data->ma == VA_MATTR_NATPAGE) { |
1114 | vcpu_set_isr(vcpu, visr.val); | 1114 | vcpu_set_isr(vcpu, visr.val); |
1115 | dnat_page_consumption(vcpu, vadr); | 1115 | dnat_page_consumption(vcpu, vadr); |
1116 | return IA64_FAULT; | 1116 | return IA64_FAULT; |
1117 | } else{ | 1117 | } else{ |
1118 | *padr = ((data->ppn >> (data->ps - 12)) << data->ps) | 1118 | *padr = ((data->ppn >> (data->ps - 12)) << data->ps) |
1119 | | (vadr & (PSIZE(data->ps) - 1)); | 1119 | | (vadr & (PSIZE(data->ps) - 1)); |
1120 | return IA64_NO_FAULT; | 1120 | return IA64_NO_FAULT; |
1121 | } | 1121 | } |
1122 | } | 1122 | } |
1123 | if (!vhpt_enabled(vcpu, vadr, NA_REF)) { | 1123 | if (!vhpt_enabled(vcpu, vadr, NA_REF)) { |
1124 | if (vpsr.ic) { | 1124 | if (vpsr.ic) { |
1125 | vcpu_set_isr(vcpu, visr.val); | 1125 | vcpu_set_isr(vcpu, visr.val); |
1126 | alt_dtlb(vcpu, vadr); | 1126 | alt_dtlb(vcpu, vadr); |
1127 | return IA64_FAULT; | 1127 | return IA64_FAULT; |
1128 | } else { | 1128 | } else { |
1129 | nested_dtlb(vcpu); | 1129 | nested_dtlb(vcpu); |
1130 | return IA64_FAULT; | 1130 | return IA64_FAULT; |
1131 | } | 1131 | } |
1132 | } else { | 1132 | } else { |
1133 | if (vpsr.ic) { | 1133 | if (vpsr.ic) { |
1134 | vcpu_set_isr(vcpu, visr.val); | 1134 | vcpu_set_isr(vcpu, visr.val); |
1135 | dvhpt_fault(vcpu, vadr); | 1135 | dvhpt_fault(vcpu, vadr); |
1136 | return IA64_FAULT; | 1136 | return IA64_FAULT; |
1137 | } else{ | 1137 | } else{ |
1138 | nested_dtlb(vcpu); | 1138 | nested_dtlb(vcpu); |
1139 | return IA64_FAULT; | 1139 | return IA64_FAULT; |
1140 | } | 1140 | } |
1141 | } | 1141 | } |
1142 | 1142 | ||
1143 | return IA64_NO_FAULT; | 1143 | return IA64_NO_FAULT; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) | 1146 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) |
1147 | { | 1147 | { |
1148 | unsigned long r1, r3; | 1148 | unsigned long r1, r3; |
1149 | 1149 | ||
1150 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); | 1150 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); |
1151 | 1151 | ||
1152 | if (vcpu_tpa(vcpu, r3, &r1)) | 1152 | if (vcpu_tpa(vcpu, r3, &r1)) |
1153 | return IA64_FAULT; | 1153 | return IA64_FAULT; |
1154 | 1154 | ||
1155 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | 1155 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
1156 | return(IA64_NO_FAULT); | 1156 | return(IA64_NO_FAULT); |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) | 1159 | void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) |
1160 | { | 1160 | { |
1161 | unsigned long r1, r3; | 1161 | unsigned long r1, r3; |
1162 | 1162 | ||
1163 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); | 1163 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); |
1164 | r1 = vcpu_tak(vcpu, r3); | 1164 | r1 = vcpu_tak(vcpu, r3); |
1165 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | 1165 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | /************************************ | 1168 | /************************************ |
1169 | * Insert/Purge translation register/cache | 1169 | * Insert/Purge translation register/cache |
1170 | ************************************/ | 1170 | ************************************/ |
1171 | void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) | 1171 | void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) |
1172 | { | 1172 | { |
1173 | thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB); | 1173 | thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) | 1176 | void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) |
1177 | { | 1177 | { |
1178 | thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB); | 1178 | thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB); |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) | 1181 | void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) |
1182 | { | 1182 | { |
1183 | u64 ps, va, rid; | 1183 | u64 ps, va, rid; |
1184 | struct thash_data *p_itr; | 1184 | struct thash_data *p_itr; |
1185 | 1185 | ||
1186 | ps = itir_ps(itir); | 1186 | ps = itir_ps(itir); |
1187 | va = PAGEALIGN(ifa, ps); | 1187 | va = PAGEALIGN(ifa, ps); |
1188 | pte &= ~PAGE_FLAGS_RV_MASK; | 1188 | pte &= ~PAGE_FLAGS_RV_MASK; |
1189 | rid = vcpu_get_rr(vcpu, ifa); | 1189 | rid = vcpu_get_rr(vcpu, ifa); |
1190 | rid = rid & RR_RID_MASK; | 1190 | rid = rid & RR_RID_MASK; |
1191 | p_itr = (struct thash_data *)&vcpu->arch.itrs[slot]; | 1191 | p_itr = (struct thash_data *)&vcpu->arch.itrs[slot]; |
1192 | vcpu_set_tr(p_itr, pte, itir, va, rid); | 1192 | vcpu_set_tr(p_itr, pte, itir, va, rid); |
1193 | vcpu_quick_region_set(VMX(vcpu, itr_regions), va); | 1193 | vcpu_quick_region_set(VMX(vcpu, itr_regions), va); |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | 1196 | ||
1197 | void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) | 1197 | void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) |
1198 | { | 1198 | { |
1199 | u64 gpfn; | 1199 | u64 gpfn; |
1200 | u64 ps, va, rid; | 1200 | u64 ps, va, rid; |
1201 | struct thash_data *p_dtr; | 1201 | struct thash_data *p_dtr; |
1202 | 1202 | ||
1203 | ps = itir_ps(itir); | 1203 | ps = itir_ps(itir); |
1204 | va = PAGEALIGN(ifa, ps); | 1204 | va = PAGEALIGN(ifa, ps); |
1205 | pte &= ~PAGE_FLAGS_RV_MASK; | 1205 | pte &= ~PAGE_FLAGS_RV_MASK; |
1206 | 1206 | ||
1207 | if (ps != _PAGE_SIZE_16M) | 1207 | if (ps != _PAGE_SIZE_16M) |
1208 | thash_purge_entries(vcpu, va, ps); | 1208 | thash_purge_entries(vcpu, va, ps); |
1209 | gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | 1209 | gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; |
1210 | if (__gpfn_is_io(gpfn)) | 1210 | if (__gpfn_is_io(gpfn)) |
1211 | pte |= VTLB_PTE_IO; | 1211 | pte |= VTLB_PTE_IO; |
1212 | rid = vcpu_get_rr(vcpu, va); | 1212 | rid = vcpu_get_rr(vcpu, va); |
1213 | rid = rid & RR_RID_MASK; | 1213 | rid = rid & RR_RID_MASK; |
1214 | p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot]; | 1214 | p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot]; |
1215 | vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot], | 1215 | vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot], |
1216 | pte, itir, va, rid); | 1216 | pte, itir, va, rid); |
1217 | vcpu_quick_region_set(VMX(vcpu, dtr_regions), va); | 1217 | vcpu_quick_region_set(VMX(vcpu, dtr_regions), va); |
1218 | } | 1218 | } |
1219 | 1219 | ||
1220 | void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) | 1220 | void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) |
1221 | { | 1221 | { |
1222 | int index; | 1222 | int index; |
1223 | u64 va; | 1223 | u64 va; |
1224 | 1224 | ||
1225 | va = PAGEALIGN(ifa, ps); | 1225 | va = PAGEALIGN(ifa, ps); |
1226 | while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0) | 1226 | while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0) |
1227 | vcpu->arch.dtrs[index].page_flags = 0; | 1227 | vcpu->arch.dtrs[index].page_flags = 0; |
1228 | 1228 | ||
1229 | thash_purge_entries(vcpu, va, ps); | 1229 | thash_purge_entries(vcpu, va, ps); |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) | 1232 | void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) |
1233 | { | 1233 | { |
1234 | int index; | 1234 | int index; |
1235 | u64 va; | 1235 | u64 va; |
1236 | 1236 | ||
1237 | va = PAGEALIGN(ifa, ps); | 1237 | va = PAGEALIGN(ifa, ps); |
1238 | while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0) | 1238 | while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0) |
1239 | vcpu->arch.itrs[index].page_flags = 0; | 1239 | vcpu->arch.itrs[index].page_flags = 0; |
1240 | 1240 | ||
1241 | thash_purge_entries(vcpu, va, ps); | 1241 | thash_purge_entries(vcpu, va, ps); |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps) | 1244 | void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps) |
1245 | { | 1245 | { |
1246 | va = PAGEALIGN(va, ps); | 1246 | va = PAGEALIGN(va, ps); |
1247 | thash_purge_entries(vcpu, va, ps); | 1247 | thash_purge_entries(vcpu, va, ps); |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va) | 1250 | void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va) |
1251 | { | 1251 | { |
1252 | thash_purge_all(vcpu); | 1252 | thash_purge_all(vcpu); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps) | 1255 | void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps) |
1256 | { | 1256 | { |
1257 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 1257 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
1258 | long psr; | 1258 | long psr; |
1259 | local_irq_save(psr); | 1259 | local_irq_save(psr); |
1260 | p->exit_reason = EXIT_REASON_PTC_G; | 1260 | p->exit_reason = EXIT_REASON_PTC_G; |
1261 | 1261 | ||
1262 | p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va); | 1262 | p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va); |
1263 | p->u.ptc_g_data.vaddr = va; | 1263 | p->u.ptc_g_data.vaddr = va; |
1264 | p->u.ptc_g_data.ps = ps; | 1264 | p->u.ptc_g_data.ps = ps; |
1265 | vmm_transition(vcpu); | 1265 | vmm_transition(vcpu); |
1266 | /* Do Local Purge Here*/ | 1266 | /* Do Local Purge Here*/ |
1267 | vcpu_ptc_l(vcpu, va, ps); | 1267 | vcpu_ptc_l(vcpu, va, ps); |
1268 | local_irq_restore(psr); | 1268 | local_irq_restore(psr); |
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | 1271 | ||
1272 | void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps) | 1272 | void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps) |
1273 | { | 1273 | { |
1274 | vcpu_ptc_ga(vcpu, va, ps); | 1274 | vcpu_ptc_ga(vcpu, va, ps); |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst) | 1277 | void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst) |
1278 | { | 1278 | { |
1279 | unsigned long ifa; | 1279 | unsigned long ifa; |
1280 | 1280 | ||
1281 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | 1281 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); |
1282 | vcpu_ptc_e(vcpu, ifa); | 1282 | vcpu_ptc_e(vcpu, ifa); |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst) | 1285 | void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst) |
1286 | { | 1286 | { |
1287 | unsigned long ifa, itir; | 1287 | unsigned long ifa, itir; |
1288 | 1288 | ||
1289 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | 1289 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); |
1290 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | 1290 | itir = vcpu_get_gr(vcpu, inst.M45.r2); |
1291 | vcpu_ptc_g(vcpu, ifa, itir_ps(itir)); | 1291 | vcpu_ptc_g(vcpu, ifa, itir_ps(itir)); |
1292 | } | 1292 | } |
1293 | 1293 | ||
1294 | void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst) | 1294 | void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst) |
1295 | { | 1295 | { |
1296 | unsigned long ifa, itir; | 1296 | unsigned long ifa, itir; |
1297 | 1297 | ||
1298 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | 1298 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); |
1299 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | 1299 | itir = vcpu_get_gr(vcpu, inst.M45.r2); |
1300 | vcpu_ptc_ga(vcpu, ifa, itir_ps(itir)); | 1300 | vcpu_ptc_ga(vcpu, ifa, itir_ps(itir)); |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst) | 1303 | void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst) |
1304 | { | 1304 | { |
1305 | unsigned long ifa, itir; | 1305 | unsigned long ifa, itir; |
1306 | 1306 | ||
1307 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | 1307 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); |
1308 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | 1308 | itir = vcpu_get_gr(vcpu, inst.M45.r2); |
1309 | vcpu_ptc_l(vcpu, ifa, itir_ps(itir)); | 1309 | vcpu_ptc_l(vcpu, ifa, itir_ps(itir)); |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst) | 1312 | void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst) |
1313 | { | 1313 | { |
1314 | unsigned long ifa, itir; | 1314 | unsigned long ifa, itir; |
1315 | 1315 | ||
1316 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | 1316 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); |
1317 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | 1317 | itir = vcpu_get_gr(vcpu, inst.M45.r2); |
1318 | vcpu_ptr_d(vcpu, ifa, itir_ps(itir)); | 1318 | vcpu_ptr_d(vcpu, ifa, itir_ps(itir)); |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst) | 1321 | void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst) |
1322 | { | 1322 | { |
1323 | unsigned long ifa, itir; | 1323 | unsigned long ifa, itir; |
1324 | 1324 | ||
1325 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | 1325 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); |
1326 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | 1326 | itir = vcpu_get_gr(vcpu, inst.M45.r2); |
1327 | vcpu_ptr_i(vcpu, ifa, itir_ps(itir)); | 1327 | vcpu_ptr_i(vcpu, ifa, itir_ps(itir)); |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst) | 1330 | void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst) |
1331 | { | 1331 | { |
1332 | unsigned long itir, ifa, pte, slot; | 1332 | unsigned long itir, ifa, pte, slot; |
1333 | 1333 | ||
1334 | slot = vcpu_get_gr(vcpu, inst.M45.r3); | 1334 | slot = vcpu_get_gr(vcpu, inst.M45.r3); |
1335 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | 1335 | pte = vcpu_get_gr(vcpu, inst.M45.r2); |
1336 | itir = vcpu_get_itir(vcpu); | 1336 | itir = vcpu_get_itir(vcpu); |
1337 | ifa = vcpu_get_ifa(vcpu); | 1337 | ifa = vcpu_get_ifa(vcpu); |
1338 | vcpu_itr_d(vcpu, slot, pte, itir, ifa); | 1338 | vcpu_itr_d(vcpu, slot, pte, itir, ifa); |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | 1341 | ||
1342 | 1342 | ||
1343 | void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst) | 1343 | void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst) |
1344 | { | 1344 | { |
1345 | unsigned long itir, ifa, pte, slot; | 1345 | unsigned long itir, ifa, pte, slot; |
1346 | 1346 | ||
1347 | slot = vcpu_get_gr(vcpu, inst.M45.r3); | 1347 | slot = vcpu_get_gr(vcpu, inst.M45.r3); |
1348 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | 1348 | pte = vcpu_get_gr(vcpu, inst.M45.r2); |
1349 | itir = vcpu_get_itir(vcpu); | 1349 | itir = vcpu_get_itir(vcpu); |
1350 | ifa = vcpu_get_ifa(vcpu); | 1350 | ifa = vcpu_get_ifa(vcpu); |
1351 | vcpu_itr_i(vcpu, slot, pte, itir, ifa); | 1351 | vcpu_itr_i(vcpu, slot, pte, itir, ifa); |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst) | 1354 | void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst) |
1355 | { | 1355 | { |
1356 | unsigned long itir, ifa, pte; | 1356 | unsigned long itir, ifa, pte; |
1357 | 1357 | ||
1358 | itir = vcpu_get_itir(vcpu); | 1358 | itir = vcpu_get_itir(vcpu); |
1359 | ifa = vcpu_get_ifa(vcpu); | 1359 | ifa = vcpu_get_ifa(vcpu); |
1360 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | 1360 | pte = vcpu_get_gr(vcpu, inst.M45.r2); |
1361 | vcpu_itc_d(vcpu, pte, itir, ifa); | 1361 | vcpu_itc_d(vcpu, pte, itir, ifa); |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst) | 1364 | void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst) |
1365 | { | 1365 | { |
1366 | unsigned long itir, ifa, pte; | 1366 | unsigned long itir, ifa, pte; |
1367 | 1367 | ||
1368 | itir = vcpu_get_itir(vcpu); | 1368 | itir = vcpu_get_itir(vcpu); |
1369 | ifa = vcpu_get_ifa(vcpu); | 1369 | ifa = vcpu_get_ifa(vcpu); |
1370 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | 1370 | pte = vcpu_get_gr(vcpu, inst.M45.r2); |
1371 | vcpu_itc_i(vcpu, pte, itir, ifa); | 1371 | vcpu_itc_i(vcpu, pte, itir, ifa); |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | /************************************* | 1374 | /************************************* |
1375 | * Moves to semi-privileged registers | 1375 | * Moves to semi-privileged registers |
1376 | *************************************/ | 1376 | *************************************/ |
1377 | 1377 | ||
1378 | void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst) | 1378 | void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst) |
1379 | { | 1379 | { |
1380 | unsigned long imm; | 1380 | unsigned long imm; |
1381 | 1381 | ||
1382 | if (inst.M30.s) | 1382 | if (inst.M30.s) |
1383 | imm = -inst.M30.imm; | 1383 | imm = -inst.M30.imm; |
1384 | else | 1384 | else |
1385 | imm = inst.M30.imm; | 1385 | imm = inst.M30.imm; |
1386 | 1386 | ||
1387 | vcpu_set_itc(vcpu, imm); | 1387 | vcpu_set_itc(vcpu, imm); |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | 1390 | void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) |
1391 | { | 1391 | { |
1392 | unsigned long r2; | 1392 | unsigned long r2; |
1393 | 1393 | ||
1394 | r2 = vcpu_get_gr(vcpu, inst.M29.r2); | 1394 | r2 = vcpu_get_gr(vcpu, inst.M29.r2); |
1395 | vcpu_set_itc(vcpu, r2); | 1395 | vcpu_set_itc(vcpu, r2); |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | 1398 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) |
1399 | { | 1399 | { |
1400 | unsigned long r1; | 1400 | unsigned long r1; |
1401 | 1401 | ||
1402 | r1 = vcpu_get_itc(vcpu); | 1402 | r1 = vcpu_get_itc(vcpu); |
1403 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); | 1403 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | /************************************************************************** | 1406 | /************************************************************************** |
1407 | struct kvm_vcpu protection key register access routines | 1407 | struct kvm_vcpu protection key register access routines |
1408 | **************************************************************************/ | 1408 | **************************************************************************/ |
1409 | 1409 | ||
1410 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) | 1410 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) |
1411 | { | 1411 | { |
1412 | return ((unsigned long)ia64_get_pkr(reg)); | 1412 | return ((unsigned long)ia64_get_pkr(reg)); |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) | 1415 | void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) |
1416 | { | 1416 | { |
1417 | ia64_set_pkr(reg, val); | 1417 | ia64_set_pkr(reg, val); |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | /******************************** | 1420 | /******************************** |
1421 | * Moves to privileged registers | 1421 | * Moves to privileged registers |
1422 | ********************************/ | 1422 | ********************************/ |
1423 | unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, | 1423 | unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, |
1424 | unsigned long val) | 1424 | unsigned long val) |
1425 | { | 1425 | { |
1426 | union ia64_rr oldrr, newrr; | 1426 | union ia64_rr oldrr, newrr; |
1427 | unsigned long rrval; | 1427 | unsigned long rrval; |
1428 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 1428 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
1429 | unsigned long psr; | 1429 | unsigned long psr; |
1430 | 1430 | ||
1431 | oldrr.val = vcpu_get_rr(vcpu, reg); | 1431 | oldrr.val = vcpu_get_rr(vcpu, reg); |
1432 | newrr.val = val; | 1432 | newrr.val = val; |
1433 | vcpu->arch.vrr[reg >> VRN_SHIFT] = val; | 1433 | vcpu->arch.vrr[reg >> VRN_SHIFT] = val; |
1434 | 1434 | ||
1435 | switch ((unsigned long)(reg >> VRN_SHIFT)) { | 1435 | switch ((unsigned long)(reg >> VRN_SHIFT)) { |
1436 | case VRN6: | 1436 | case VRN6: |
1437 | vcpu->arch.vmm_rr = vrrtomrr(val); | 1437 | vcpu->arch.vmm_rr = vrrtomrr(val); |
1438 | local_irq_save(psr); | 1438 | local_irq_save(psr); |
1439 | p->exit_reason = EXIT_REASON_SWITCH_RR6; | 1439 | p->exit_reason = EXIT_REASON_SWITCH_RR6; |
1440 | vmm_transition(vcpu); | 1440 | vmm_transition(vcpu); |
1441 | local_irq_restore(psr); | 1441 | local_irq_restore(psr); |
1442 | break; | 1442 | break; |
1443 | case VRN4: | 1443 | case VRN4: |
1444 | rrval = vrrtomrr(val); | 1444 | rrval = vrrtomrr(val); |
1445 | vcpu->arch.metaphysical_saved_rr4 = rrval; | 1445 | vcpu->arch.metaphysical_saved_rr4 = rrval; |
1446 | if (!is_physical_mode(vcpu)) | 1446 | if (!is_physical_mode(vcpu)) |
1447 | ia64_set_rr(reg, rrval); | 1447 | ia64_set_rr(reg, rrval); |
1448 | break; | 1448 | break; |
1449 | case VRN0: | 1449 | case VRN0: |
1450 | rrval = vrrtomrr(val); | 1450 | rrval = vrrtomrr(val); |
1451 | vcpu->arch.metaphysical_saved_rr0 = rrval; | 1451 | vcpu->arch.metaphysical_saved_rr0 = rrval; |
1452 | if (!is_physical_mode(vcpu)) | 1452 | if (!is_physical_mode(vcpu)) |
1453 | ia64_set_rr(reg, rrval); | 1453 | ia64_set_rr(reg, rrval); |
1454 | break; | 1454 | break; |
1455 | default: | 1455 | default: |
1456 | ia64_set_rr(reg, vrrtomrr(val)); | 1456 | ia64_set_rr(reg, vrrtomrr(val)); |
1457 | break; | 1457 | break; |
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | return (IA64_NO_FAULT); | 1460 | return (IA64_NO_FAULT); |
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1463 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1464 | { | 1464 | { |
1465 | unsigned long r3, r2; | 1465 | unsigned long r3, r2; |
1466 | 1466 | ||
1467 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | 1467 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
1468 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | 1468 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
1469 | vcpu_set_rr(vcpu, r3, r2); | 1469 | vcpu_set_rr(vcpu, r3, r2); |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst) | 1472 | void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst) |
1473 | { | 1473 | { |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst) | 1476 | void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst) |
1477 | { | 1477 | { |
1478 | } | 1478 | } |
1479 | 1479 | ||
1480 | void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst) | 1480 | void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst) |
1481 | { | 1481 | { |
1482 | unsigned long r3, r2; | 1482 | unsigned long r3, r2; |
1483 | 1483 | ||
1484 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | 1484 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
1485 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | 1485 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
1486 | vcpu_set_pmc(vcpu, r3, r2); | 1486 | vcpu_set_pmc(vcpu, r3, r2); |
1487 | } | 1487 | } |
1488 | 1488 | ||
1489 | void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst) | 1489 | void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst) |
1490 | { | 1490 | { |
1491 | unsigned long r3, r2; | 1491 | unsigned long r3, r2; |
1492 | 1492 | ||
1493 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | 1493 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
1494 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | 1494 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
1495 | vcpu_set_pmd(vcpu, r3, r2); | 1495 | vcpu_set_pmd(vcpu, r3, r2); |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) | 1498 | void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) |
1499 | { | 1499 | { |
1500 | u64 r3, r2; | 1500 | u64 r3, r2; |
1501 | 1501 | ||
1502 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | 1502 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
1503 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | 1503 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
1504 | vcpu_set_pkr(vcpu, r3, r2); | 1504 | vcpu_set_pkr(vcpu, r3, r2); |
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1507 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1508 | { | 1508 | { |
1509 | unsigned long r3, r1; | 1509 | unsigned long r3, r1; |
1510 | 1510 | ||
1511 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | 1511 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); |
1512 | r1 = vcpu_get_rr(vcpu, r3); | 1512 | r1 = vcpu_get_rr(vcpu, r3); |
1513 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1513 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1514 | } | 1514 | } |
1515 | 1515 | ||
1516 | void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst) | 1516 | void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst) |
1517 | { | 1517 | { |
1518 | unsigned long r3, r1; | 1518 | unsigned long r3, r1; |
1519 | 1519 | ||
1520 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | 1520 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); |
1521 | r1 = vcpu_get_pkr(vcpu, r3); | 1521 | r1 = vcpu_get_pkr(vcpu, r3); |
1522 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1522 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst) | 1525 | void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst) |
1526 | { | 1526 | { |
1527 | unsigned long r3, r1; | 1527 | unsigned long r3, r1; |
1528 | 1528 | ||
1529 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | 1529 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); |
1530 | r1 = vcpu_get_dbr(vcpu, r3); | 1530 | r1 = vcpu_get_dbr(vcpu, r3); |
1531 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1531 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst) | 1534 | void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst) |
1535 | { | 1535 | { |
1536 | unsigned long r3, r1; | 1536 | unsigned long r3, r1; |
1537 | 1537 | ||
1538 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | 1538 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); |
1539 | r1 = vcpu_get_ibr(vcpu, r3); | 1539 | r1 = vcpu_get_ibr(vcpu, r3); |
1540 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1540 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) | 1543 | void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) |
1544 | { | 1544 | { |
1545 | unsigned long r3, r1; | 1545 | unsigned long r3, r1; |
1546 | 1546 | ||
1547 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | 1547 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); |
1548 | r1 = vcpu_get_pmc(vcpu, r3); | 1548 | r1 = vcpu_get_pmc(vcpu, r3); |
1549 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1549 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) | 1552 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) |
1553 | { | 1553 | { |
1554 | /* FIXME: This could get called as a result of a rsvd-reg fault */ | 1554 | /* FIXME: This could get called as a result of a rsvd-reg fault */ |
1555 | if (reg > (ia64_get_cpuid(3) & 0xff)) | 1555 | if (reg > (ia64_get_cpuid(3) & 0xff)) |
1556 | return 0; | 1556 | return 0; |
1557 | else | 1557 | else |
1558 | return ia64_get_cpuid(reg); | 1558 | return ia64_get_cpuid(reg); |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst) | 1561 | void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst) |
1562 | { | 1562 | { |
1563 | unsigned long r3, r1; | 1563 | unsigned long r3, r1; |
1564 | 1564 | ||
1565 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | 1565 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); |
1566 | r1 = vcpu_get_cpuid(vcpu, r3); | 1566 | r1 = vcpu_get_cpuid(vcpu, r3); |
1567 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1567 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val) | 1570 | void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val) |
1571 | { | 1571 | { |
1572 | VCPU(vcpu, tpr) = val; | 1572 | VCPU(vcpu, tpr) = val; |
1573 | vcpu->arch.irq_check = 1; | 1573 | vcpu->arch.irq_check = 1; |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) | 1576 | unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) |
1577 | { | 1577 | { |
1578 | unsigned long r2; | 1578 | unsigned long r2; |
1579 | 1579 | ||
1580 | r2 = vcpu_get_gr(vcpu, inst.M32.r2); | 1580 | r2 = vcpu_get_gr(vcpu, inst.M32.r2); |
1581 | VCPU(vcpu, vcr[inst.M32.cr3]) = r2; | 1581 | VCPU(vcpu, vcr[inst.M32.cr3]) = r2; |
1582 | 1582 | ||
1583 | switch (inst.M32.cr3) { | 1583 | switch (inst.M32.cr3) { |
1584 | case 0: | 1584 | case 0: |
1585 | vcpu_set_dcr(vcpu, r2); | 1585 | vcpu_set_dcr(vcpu, r2); |
1586 | break; | 1586 | break; |
1587 | case 1: | 1587 | case 1: |
1588 | vcpu_set_itm(vcpu, r2); | 1588 | vcpu_set_itm(vcpu, r2); |
1589 | break; | 1589 | break; |
1590 | case 66: | 1590 | case 66: |
1591 | vcpu_set_tpr(vcpu, r2); | 1591 | vcpu_set_tpr(vcpu, r2); |
1592 | break; | 1592 | break; |
1593 | case 67: | 1593 | case 67: |
1594 | vcpu_set_eoi(vcpu, r2); | 1594 | vcpu_set_eoi(vcpu, r2); |
1595 | break; | 1595 | break; |
1596 | default: | 1596 | default: |
1597 | break; | 1597 | break; |
1598 | } | 1598 | } |
1599 | 1599 | ||
1600 | return 0; | 1600 | return 0; |
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | 1603 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) |
1604 | { | 1604 | { |
1605 | unsigned long tgt = inst.M33.r1; | 1605 | unsigned long tgt = inst.M33.r1; |
1606 | unsigned long val; | 1606 | unsigned long val; |
1607 | 1607 | ||
1608 | switch (inst.M33.cr3) { | 1608 | switch (inst.M33.cr3) { |
1609 | case 65: | 1609 | case 65: |
1610 | val = vcpu_get_ivr(vcpu); | 1610 | val = vcpu_get_ivr(vcpu); |
1611 | vcpu_set_gr(vcpu, tgt, val, 0); | 1611 | vcpu_set_gr(vcpu, tgt, val, 0); |
1612 | break; | 1612 | break; |
1613 | 1613 | ||
1614 | case 67: | 1614 | case 67: |
1615 | vcpu_set_gr(vcpu, tgt, 0L, 0); | 1615 | vcpu_set_gr(vcpu, tgt, 0L, 0); |
1616 | break; | 1616 | break; |
1617 | default: | 1617 | default: |
1618 | val = VCPU(vcpu, vcr[inst.M33.cr3]); | 1618 | val = VCPU(vcpu, vcr[inst.M33.cr3]); |
1619 | vcpu_set_gr(vcpu, tgt, val, 0); | 1619 | vcpu_set_gr(vcpu, tgt, val, 0); |
1620 | break; | 1620 | break; |
1621 | } | 1621 | } |
1622 | 1622 | ||
1623 | return 0; | 1623 | return 0; |
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | 1626 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) |
1627 | { | 1627 | { |
1628 | 1628 | ||
1629 | unsigned long mask; | 1629 | unsigned long mask; |
1630 | struct kvm_pt_regs *regs; | 1630 | struct kvm_pt_regs *regs; |
1631 | struct ia64_psr old_psr, new_psr; | 1631 | struct ia64_psr old_psr, new_psr; |
1632 | 1632 | ||
1633 | old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | 1633 | old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); |
1634 | 1634 | ||
1635 | regs = vcpu_regs(vcpu); | 1635 | regs = vcpu_regs(vcpu); |
1636 | /* We only support guest as: | 1636 | /* We only support guest as: |
1637 | * vpsr.pk = 0 | 1637 | * vpsr.pk = 0 |
1638 | * vpsr.is = 0 | 1638 | * vpsr.is = 0 |
1639 | * Otherwise panic | 1639 | * Otherwise panic |
1640 | */ | 1640 | */ |
1641 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) | 1641 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) |
1642 | panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ | 1642 | panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ |
1643 | & vpsr.is=0\n"); | 1643 | & vpsr.is=0\n"); |
1644 | 1644 | ||
1645 | /* | 1645 | /* |
1646 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia | 1646 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia |
1647 | * Since these bits will become 0, after success execution of each | 1647 | * Since these bits will become 0, after success execution of each |
1648 | * instruction, we will change set them to mIA64_PSR | 1648 | * instruction, we will change set them to mIA64_PSR |
1649 | */ | 1649 | */ |
1650 | VCPU(vcpu, vpsr) = val | 1650 | VCPU(vcpu, vpsr) = val |
1651 | & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | | 1651 | & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | |
1652 | IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)); | 1652 | IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)); |
1653 | 1653 | ||
1654 | if (!old_psr.i && (val & IA64_PSR_I)) { | 1654 | if (!old_psr.i && (val & IA64_PSR_I)) { |
1655 | /* vpsr.i 0->1 */ | 1655 | /* vpsr.i 0->1 */ |
1656 | vcpu->arch.irq_check = 1; | 1656 | vcpu->arch.irq_check = 1; |
1657 | } | 1657 | } |
1658 | new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | 1658 | new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); |
1659 | 1659 | ||
1660 | /* | 1660 | /* |
1661 | * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) | 1661 | * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) |
1662 | * , except for the following bits: | 1662 | * , except for the following bits: |
1663 | * ic/i/dt/si/rt/mc/it/bn/vm | 1663 | * ic/i/dt/si/rt/mc/it/bn/vm |
1664 | */ | 1664 | */ |
1665 | mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI + | 1665 | mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI + |
1666 | IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN + | 1666 | IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN + |
1667 | IA64_PSR_VM; | 1667 | IA64_PSR_VM; |
1668 | 1668 | ||
1669 | regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask)); | 1669 | regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask)); |
1670 | 1670 | ||
1671 | check_mm_mode_switch(vcpu, old_psr, new_psr); | 1671 | check_mm_mode_switch(vcpu, old_psr, new_psr); |
1672 | 1672 | ||
1673 | return ; | 1673 | return ; |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | unsigned long vcpu_cover(struct kvm_vcpu *vcpu) | 1676 | unsigned long vcpu_cover(struct kvm_vcpu *vcpu) |
1677 | { | 1677 | { |
1678 | struct ia64_psr vpsr; | 1678 | struct ia64_psr vpsr; |
1679 | 1679 | ||
1680 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1680 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1681 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | 1681 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); |
1682 | 1682 | ||
1683 | if (!vpsr.ic) | 1683 | if (!vpsr.ic) |
1684 | VCPU(vcpu, ifs) = regs->cr_ifs; | 1684 | VCPU(vcpu, ifs) = regs->cr_ifs; |
1685 | regs->cr_ifs = IA64_IFS_V; | 1685 | regs->cr_ifs = IA64_IFS_V; |
1686 | return (IA64_NO_FAULT); | 1686 | return (IA64_NO_FAULT); |
1687 | } | 1687 | } |
1688 | 1688 | ||
1689 | 1689 | ||
1690 | 1690 | ||
1691 | /************************************************************************** | 1691 | /************************************************************************** |
1692 | VCPU banked general register access routines | 1692 | VCPU banked general register access routines |
1693 | **************************************************************************/ | 1693 | **************************************************************************/ |
1694 | #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ | 1694 | #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ |
1695 | do { \ | 1695 | do { \ |
1696 | __asm__ __volatile__ ( \ | 1696 | __asm__ __volatile__ ( \ |
1697 | ";;extr.u %0 = %3,%6,16;;\n" \ | 1697 | ";;extr.u %0 = %3,%6,16;;\n" \ |
1698 | "dep %1 = %0, %1, 0, 16;;\n" \ | 1698 | "dep %1 = %0, %1, 0, 16;;\n" \ |
1699 | "st8 [%4] = %1\n" \ | 1699 | "st8 [%4] = %1\n" \ |
1700 | "extr.u %0 = %2, 16, 16;;\n" \ | 1700 | "extr.u %0 = %2, 16, 16;;\n" \ |
1701 | "dep %3 = %0, %3, %6, 16;;\n" \ | 1701 | "dep %3 = %0, %3, %6, 16;;\n" \ |
1702 | "st8 [%5] = %3\n" \ | 1702 | "st8 [%5] = %3\n" \ |
1703 | ::"r"(i), "r"(*b1unat), "r"(*b0unat), \ | 1703 | ::"r"(i), "r"(*b1unat), "r"(*b0unat), \ |
1704 | "r"(*runat), "r"(b1unat), "r"(runat), \ | 1704 | "r"(*runat), "r"(b1unat), "r"(runat), \ |
1705 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ | 1705 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ |
1706 | } while (0) | 1706 | } while (0) |
1707 | 1707 | ||
1708 | void vcpu_bsw0(struct kvm_vcpu *vcpu) | 1708 | void vcpu_bsw0(struct kvm_vcpu *vcpu) |
1709 | { | 1709 | { |
1710 | unsigned long i; | 1710 | unsigned long i; |
1711 | 1711 | ||
1712 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1712 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1713 | unsigned long *r = ®s->r16; | 1713 | unsigned long *r = ®s->r16; |
1714 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); | 1714 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); |
1715 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); | 1715 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); |
1716 | unsigned long *runat = ®s->eml_unat; | 1716 | unsigned long *runat = ®s->eml_unat; |
1717 | unsigned long *b0unat = &VCPU(vcpu, vbnat); | 1717 | unsigned long *b0unat = &VCPU(vcpu, vbnat); |
1718 | unsigned long *b1unat = &VCPU(vcpu, vnat); | 1718 | unsigned long *b1unat = &VCPU(vcpu, vnat); |
1719 | 1719 | ||
1720 | 1720 | ||
1721 | if (VCPU(vcpu, vpsr) & IA64_PSR_BN) { | 1721 | if (VCPU(vcpu, vpsr) & IA64_PSR_BN) { |
1722 | for (i = 0; i < 16; i++) { | 1722 | for (i = 0; i < 16; i++) { |
1723 | *b1++ = *r; | 1723 | *b1++ = *r; |
1724 | *r++ = *b0++; | 1724 | *r++ = *b0++; |
1725 | } | 1725 | } |
1726 | vcpu_bsw0_unat(i, b0unat, b1unat, runat, | 1726 | vcpu_bsw0_unat(i, b0unat, b1unat, runat, |
1727 | VMM_PT_REGS_R16_SLOT); | 1727 | VMM_PT_REGS_R16_SLOT); |
1728 | VCPU(vcpu, vpsr) &= ~IA64_PSR_BN; | 1728 | VCPU(vcpu, vpsr) &= ~IA64_PSR_BN; |
1729 | } | 1729 | } |
1730 | } | 1730 | } |
1731 | 1731 | ||
1732 | #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ | 1732 | #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ |
1733 | do { \ | 1733 | do { \ |
1734 | __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \ | 1734 | __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \ |
1735 | "dep %1 = %0, %1, 16, 16;;\n" \ | 1735 | "dep %1 = %0, %1, 16, 16;;\n" \ |
1736 | "st8 [%4] = %1\n" \ | 1736 | "st8 [%4] = %1\n" \ |
1737 | "extr.u %0 = %2, 0, 16;;\n" \ | 1737 | "extr.u %0 = %2, 0, 16;;\n" \ |
1738 | "dep %3 = %0, %3, %6, 16;;\n" \ | 1738 | "dep %3 = %0, %3, %6, 16;;\n" \ |
1739 | "st8 [%5] = %3\n" \ | 1739 | "st8 [%5] = %3\n" \ |
1740 | ::"r"(i), "r"(*b0unat), "r"(*b1unat), \ | 1740 | ::"r"(i), "r"(*b0unat), "r"(*b1unat), \ |
1741 | "r"(*runat), "r"(b0unat), "r"(runat), \ | 1741 | "r"(*runat), "r"(b0unat), "r"(runat), \ |
1742 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ | 1742 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ |
1743 | } while (0) | 1743 | } while (0) |
1744 | 1744 | ||
1745 | void vcpu_bsw1(struct kvm_vcpu *vcpu) | 1745 | void vcpu_bsw1(struct kvm_vcpu *vcpu) |
1746 | { | 1746 | { |
1747 | unsigned long i; | 1747 | unsigned long i; |
1748 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1748 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1749 | unsigned long *r = ®s->r16; | 1749 | unsigned long *r = ®s->r16; |
1750 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); | 1750 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); |
1751 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); | 1751 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); |
1752 | unsigned long *runat = ®s->eml_unat; | 1752 | unsigned long *runat = ®s->eml_unat; |
1753 | unsigned long *b0unat = &VCPU(vcpu, vbnat); | 1753 | unsigned long *b0unat = &VCPU(vcpu, vbnat); |
1754 | unsigned long *b1unat = &VCPU(vcpu, vnat); | 1754 | unsigned long *b1unat = &VCPU(vcpu, vnat); |
1755 | 1755 | ||
1756 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) { | 1756 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) { |
1757 | for (i = 0; i < 16; i++) { | 1757 | for (i = 0; i < 16; i++) { |
1758 | *b0++ = *r; | 1758 | *b0++ = *r; |
1759 | *r++ = *b1++; | 1759 | *r++ = *b1++; |
1760 | } | 1760 | } |
1761 | vcpu_bsw1_unat(i, b0unat, b1unat, runat, | 1761 | vcpu_bsw1_unat(i, b0unat, b1unat, runat, |
1762 | VMM_PT_REGS_R16_SLOT); | 1762 | VMM_PT_REGS_R16_SLOT); |
1763 | VCPU(vcpu, vpsr) |= IA64_PSR_BN; | 1763 | VCPU(vcpu, vpsr) |= IA64_PSR_BN; |
1764 | } | 1764 | } |
1765 | } | 1765 | } |
1766 | 1766 | ||
1767 | void vcpu_rfi(struct kvm_vcpu *vcpu) | 1767 | void vcpu_rfi(struct kvm_vcpu *vcpu) |
1768 | { | 1768 | { |
1769 | unsigned long ifs, psr; | 1769 | unsigned long ifs, psr; |
1770 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1770 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1771 | 1771 | ||
1772 | psr = VCPU(vcpu, ipsr); | 1772 | psr = VCPU(vcpu, ipsr); |
1773 | if (psr & IA64_PSR_BN) | 1773 | if (psr & IA64_PSR_BN) |
1774 | vcpu_bsw1(vcpu); | 1774 | vcpu_bsw1(vcpu); |
1775 | else | 1775 | else |
1776 | vcpu_bsw0(vcpu); | 1776 | vcpu_bsw0(vcpu); |
1777 | vcpu_set_psr(vcpu, psr); | 1777 | vcpu_set_psr(vcpu, psr); |
1778 | ifs = VCPU(vcpu, ifs); | 1778 | ifs = VCPU(vcpu, ifs); |
1779 | if (ifs >> 63) | 1779 | if (ifs >> 63) |
1780 | regs->cr_ifs = ifs; | 1780 | regs->cr_ifs = ifs; |
1781 | regs->cr_iip = VCPU(vcpu, iip); | 1781 | regs->cr_iip = VCPU(vcpu, iip); |
1782 | } | 1782 | } |
1783 | 1783 | ||
1784 | /* | 1784 | /* |
1785 | VPSR can't keep track of below bits of guest PSR | 1785 | VPSR can't keep track of below bits of guest PSR |
1786 | This function gets guest PSR | 1786 | This function gets guest PSR |
1787 | */ | 1787 | */ |
1788 | 1788 | ||
1789 | unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu) | 1789 | unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu) |
1790 | { | 1790 | { |
1791 | unsigned long mask; | 1791 | unsigned long mask; |
1792 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1792 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1793 | 1793 | ||
1794 | mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | | 1794 | mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | |
1795 | IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI; | 1795 | IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI; |
1796 | return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask); | 1796 | return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask); |
1797 | } | 1797 | } |
1798 | 1798 | ||
1799 | void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst) | 1799 | void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst) |
1800 | { | 1800 | { |
1801 | unsigned long vpsr; | 1801 | unsigned long vpsr; |
1802 | unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21) | 1802 | unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21) |
1803 | | inst.M44.imm; | 1803 | | inst.M44.imm; |
1804 | 1804 | ||
1805 | vpsr = vcpu_get_psr(vcpu); | 1805 | vpsr = vcpu_get_psr(vcpu); |
1806 | vpsr &= (~imm24); | 1806 | vpsr &= (~imm24); |
1807 | vcpu_set_psr(vcpu, vpsr); | 1807 | vcpu_set_psr(vcpu, vpsr); |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst) | 1810 | void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst) |
1811 | { | 1811 | { |
1812 | unsigned long vpsr; | 1812 | unsigned long vpsr; |
1813 | unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | 1813 | unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) |
1814 | | inst.M44.imm; | 1814 | | inst.M44.imm; |
1815 | 1815 | ||
1816 | vpsr = vcpu_get_psr(vcpu); | 1816 | vpsr = vcpu_get_psr(vcpu); |
1817 | vpsr |= imm24; | 1817 | vpsr |= imm24; |
1818 | vcpu_set_psr(vcpu, vpsr); | 1818 | vcpu_set_psr(vcpu, vpsr); |
1819 | } | 1819 | } |
1820 | 1820 | ||
1821 | /* Generate Mask | 1821 | /* Generate Mask |
1822 | * Parameter: | 1822 | * Parameter: |
1823 | * bit -- starting bit | 1823 | * bit -- starting bit |
1824 | * len -- how many bits | 1824 | * len -- how many bits |
1825 | */ | 1825 | */ |
1826 | #define MASK(bit,len) \ | 1826 | #define MASK(bit,len) \ |
1827 | ({ \ | 1827 | ({ \ |
1828 | __u64 ret; \ | 1828 | __u64 ret; \ |
1829 | \ | 1829 | \ |
1830 | __asm __volatile("dep %0=-1, r0, %1, %2"\ | 1830 | __asm __volatile("dep %0=-1, r0, %1, %2"\ |
1831 | : "=r" (ret): \ | 1831 | : "=r" (ret): \ |
1832 | "M" (bit), \ | 1832 | "M" (bit), \ |
1833 | "M" (len)); \ | 1833 | "M" (len)); \ |
1834 | ret; \ | 1834 | ret; \ |
1835 | }) | 1835 | }) |
1836 | 1836 | ||
1837 | void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val) | 1837 | void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val) |
1838 | { | 1838 | { |
1839 | val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32)); | 1839 | val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32)); |
1840 | vcpu_set_psr(vcpu, val); | 1840 | vcpu_set_psr(vcpu, val); |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst) | 1843 | void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst) |
1844 | { | 1844 | { |
1845 | unsigned long val; | 1845 | unsigned long val; |
1846 | 1846 | ||
1847 | val = vcpu_get_gr(vcpu, inst.M35.r2); | 1847 | val = vcpu_get_gr(vcpu, inst.M35.r2); |
1848 | vcpu_set_psr_l(vcpu, val); | 1848 | vcpu_set_psr_l(vcpu, val); |
1849 | } | 1849 | } |
1850 | 1850 | ||
1851 | void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst) | 1851 | void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst) |
1852 | { | 1852 | { |
1853 | unsigned long val; | 1853 | unsigned long val; |
1854 | 1854 | ||
1855 | val = vcpu_get_psr(vcpu); | 1855 | val = vcpu_get_psr(vcpu); |
1856 | val = (val & MASK(0, 32)) | (val & MASK(35, 2)); | 1856 | val = (val & MASK(0, 32)) | (val & MASK(35, 2)); |
1857 | vcpu_set_gr(vcpu, inst.M33.r1, val, 0); | 1857 | vcpu_set_gr(vcpu, inst.M33.r1, val, 0); |
1858 | } | 1858 | } |
1859 | 1859 | ||
1860 | void vcpu_increment_iip(struct kvm_vcpu *vcpu) | 1860 | void vcpu_increment_iip(struct kvm_vcpu *vcpu) |
1861 | { | 1861 | { |
1862 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1862 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1863 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; | 1863 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; |
1864 | if (ipsr->ri == 2) { | 1864 | if (ipsr->ri == 2) { |
1865 | ipsr->ri = 0; | 1865 | ipsr->ri = 0; |
1866 | regs->cr_iip += 16; | 1866 | regs->cr_iip += 16; |
1867 | } else | 1867 | } else |
1868 | ipsr->ri++; | 1868 | ipsr->ri++; |
1869 | } | 1869 | } |
1870 | 1870 | ||
1871 | void vcpu_decrement_iip(struct kvm_vcpu *vcpu) | 1871 | void vcpu_decrement_iip(struct kvm_vcpu *vcpu) |
1872 | { | 1872 | { |
1873 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 1873 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
1874 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; | 1874 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; |
1875 | 1875 | ||
1876 | if (ipsr->ri == 0) { | 1876 | if (ipsr->ri == 0) { |
1877 | ipsr->ri = 2; | 1877 | ipsr->ri = 2; |
1878 | regs->cr_iip -= 16; | 1878 | regs->cr_iip -= 16; |
1879 | } else | 1879 | } else |
1880 | ipsr->ri--; | 1880 | ipsr->ri--; |
1881 | } | 1881 | } |
1882 | 1882 | ||
1883 | /** Emulate a privileged operation. | 1883 | /** Emulate a privileged operation. |
1884 | * | 1884 | * |
1885 | * | 1885 | * |
1886 | * @param vcpu virtual cpu | 1886 | * @param vcpu virtual cpu |
1887 | * @cause the reason cause virtualization fault | 1887 | * @cause the reason cause virtualization fault |
1888 | * @opcode the instruction code which cause virtualization fault | 1888 | * @opcode the instruction code which cause virtualization fault |
1889 | */ | 1889 | */ |
1890 | 1890 | ||
1891 | void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs) | 1891 | void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs) |
1892 | { | 1892 | { |
1893 | unsigned long status, cause, opcode ; | 1893 | unsigned long status, cause, opcode ; |
1894 | INST64 inst; | 1894 | INST64 inst; |
1895 | 1895 | ||
1896 | status = IA64_NO_FAULT; | 1896 | status = IA64_NO_FAULT; |
1897 | cause = VMX(vcpu, cause); | 1897 | cause = VMX(vcpu, cause); |
1898 | opcode = VMX(vcpu, opcode); | 1898 | opcode = VMX(vcpu, opcode); |
1899 | inst.inst = opcode; | 1899 | inst.inst = opcode; |
1900 | /* | 1900 | /* |
1901 | * Switch to actual virtual rid in rr0 and rr4, | 1901 | * Switch to actual virtual rid in rr0 and rr4, |
1902 | * which is required by some tlb related instructions. | 1902 | * which is required by some tlb related instructions. |
1903 | */ | 1903 | */ |
1904 | prepare_if_physical_mode(vcpu); | 1904 | prepare_if_physical_mode(vcpu); |
1905 | 1905 | ||
1906 | switch (cause) { | 1906 | switch (cause) { |
1907 | case EVENT_RSM: | 1907 | case EVENT_RSM: |
1908 | kvm_rsm(vcpu, inst); | 1908 | kvm_rsm(vcpu, inst); |
1909 | break; | 1909 | break; |
1910 | case EVENT_SSM: | 1910 | case EVENT_SSM: |
1911 | kvm_ssm(vcpu, inst); | 1911 | kvm_ssm(vcpu, inst); |
1912 | break; | 1912 | break; |
1913 | case EVENT_MOV_TO_PSR: | 1913 | case EVENT_MOV_TO_PSR: |
1914 | kvm_mov_to_psr(vcpu, inst); | 1914 | kvm_mov_to_psr(vcpu, inst); |
1915 | break; | 1915 | break; |
1916 | case EVENT_MOV_FROM_PSR: | 1916 | case EVENT_MOV_FROM_PSR: |
1917 | kvm_mov_from_psr(vcpu, inst); | 1917 | kvm_mov_from_psr(vcpu, inst); |
1918 | break; | 1918 | break; |
1919 | case EVENT_MOV_FROM_CR: | 1919 | case EVENT_MOV_FROM_CR: |
1920 | kvm_mov_from_cr(vcpu, inst); | 1920 | kvm_mov_from_cr(vcpu, inst); |
1921 | break; | 1921 | break; |
1922 | case EVENT_MOV_TO_CR: | 1922 | case EVENT_MOV_TO_CR: |
1923 | kvm_mov_to_cr(vcpu, inst); | 1923 | kvm_mov_to_cr(vcpu, inst); |
1924 | break; | 1924 | break; |
1925 | case EVENT_BSW_0: | 1925 | case EVENT_BSW_0: |
1926 | vcpu_bsw0(vcpu); | 1926 | vcpu_bsw0(vcpu); |
1927 | break; | 1927 | break; |
1928 | case EVENT_BSW_1: | 1928 | case EVENT_BSW_1: |
1929 | vcpu_bsw1(vcpu); | 1929 | vcpu_bsw1(vcpu); |
1930 | break; | 1930 | break; |
1931 | case EVENT_COVER: | 1931 | case EVENT_COVER: |
1932 | vcpu_cover(vcpu); | 1932 | vcpu_cover(vcpu); |
1933 | break; | 1933 | break; |
1934 | case EVENT_RFI: | 1934 | case EVENT_RFI: |
1935 | vcpu_rfi(vcpu); | 1935 | vcpu_rfi(vcpu); |
1936 | break; | 1936 | break; |
1937 | case EVENT_ITR_D: | 1937 | case EVENT_ITR_D: |
1938 | kvm_itr_d(vcpu, inst); | 1938 | kvm_itr_d(vcpu, inst); |
1939 | break; | 1939 | break; |
1940 | case EVENT_ITR_I: | 1940 | case EVENT_ITR_I: |
1941 | kvm_itr_i(vcpu, inst); | 1941 | kvm_itr_i(vcpu, inst); |
1942 | break; | 1942 | break; |
1943 | case EVENT_PTR_D: | 1943 | case EVENT_PTR_D: |
1944 | kvm_ptr_d(vcpu, inst); | 1944 | kvm_ptr_d(vcpu, inst); |
1945 | break; | 1945 | break; |
1946 | case EVENT_PTR_I: | 1946 | case EVENT_PTR_I: |
1947 | kvm_ptr_i(vcpu, inst); | 1947 | kvm_ptr_i(vcpu, inst); |
1948 | break; | 1948 | break; |
1949 | case EVENT_ITC_D: | 1949 | case EVENT_ITC_D: |
1950 | kvm_itc_d(vcpu, inst); | 1950 | kvm_itc_d(vcpu, inst); |
1951 | break; | 1951 | break; |
1952 | case EVENT_ITC_I: | 1952 | case EVENT_ITC_I: |
1953 | kvm_itc_i(vcpu, inst); | 1953 | kvm_itc_i(vcpu, inst); |
1954 | break; | 1954 | break; |
1955 | case EVENT_PTC_L: | 1955 | case EVENT_PTC_L: |
1956 | kvm_ptc_l(vcpu, inst); | 1956 | kvm_ptc_l(vcpu, inst); |
1957 | break; | 1957 | break; |
1958 | case EVENT_PTC_G: | 1958 | case EVENT_PTC_G: |
1959 | kvm_ptc_g(vcpu, inst); | 1959 | kvm_ptc_g(vcpu, inst); |
1960 | break; | 1960 | break; |
1961 | case EVENT_PTC_GA: | 1961 | case EVENT_PTC_GA: |
1962 | kvm_ptc_ga(vcpu, inst); | 1962 | kvm_ptc_ga(vcpu, inst); |
1963 | break; | 1963 | break; |
1964 | case EVENT_PTC_E: | 1964 | case EVENT_PTC_E: |
1965 | kvm_ptc_e(vcpu, inst); | 1965 | kvm_ptc_e(vcpu, inst); |
1966 | break; | 1966 | break; |
1967 | case EVENT_MOV_TO_RR: | 1967 | case EVENT_MOV_TO_RR: |
1968 | kvm_mov_to_rr(vcpu, inst); | 1968 | kvm_mov_to_rr(vcpu, inst); |
1969 | break; | 1969 | break; |
1970 | case EVENT_MOV_FROM_RR: | 1970 | case EVENT_MOV_FROM_RR: |
1971 | kvm_mov_from_rr(vcpu, inst); | 1971 | kvm_mov_from_rr(vcpu, inst); |
1972 | break; | 1972 | break; |
1973 | case EVENT_THASH: | 1973 | case EVENT_THASH: |
1974 | kvm_thash(vcpu, inst); | 1974 | kvm_thash(vcpu, inst); |
1975 | break; | 1975 | break; |
1976 | case EVENT_TTAG: | 1976 | case EVENT_TTAG: |
1977 | kvm_ttag(vcpu, inst); | 1977 | kvm_ttag(vcpu, inst); |
1978 | break; | 1978 | break; |
1979 | case EVENT_TPA: | 1979 | case EVENT_TPA: |
1980 | status = kvm_tpa(vcpu, inst); | 1980 | status = kvm_tpa(vcpu, inst); |
1981 | break; | 1981 | break; |
1982 | case EVENT_TAK: | 1982 | case EVENT_TAK: |
1983 | kvm_tak(vcpu, inst); | 1983 | kvm_tak(vcpu, inst); |
1984 | break; | 1984 | break; |
1985 | case EVENT_MOV_TO_AR_IMM: | 1985 | case EVENT_MOV_TO_AR_IMM: |
1986 | kvm_mov_to_ar_imm(vcpu, inst); | 1986 | kvm_mov_to_ar_imm(vcpu, inst); |
1987 | break; | 1987 | break; |
1988 | case EVENT_MOV_TO_AR: | 1988 | case EVENT_MOV_TO_AR: |
1989 | kvm_mov_to_ar_reg(vcpu, inst); | 1989 | kvm_mov_to_ar_reg(vcpu, inst); |
1990 | break; | 1990 | break; |
1991 | case EVENT_MOV_FROM_AR: | 1991 | case EVENT_MOV_FROM_AR: |
1992 | kvm_mov_from_ar_reg(vcpu, inst); | 1992 | kvm_mov_from_ar_reg(vcpu, inst); |
1993 | break; | 1993 | break; |
1994 | case EVENT_MOV_TO_DBR: | 1994 | case EVENT_MOV_TO_DBR: |
1995 | kvm_mov_to_dbr(vcpu, inst); | 1995 | kvm_mov_to_dbr(vcpu, inst); |
1996 | break; | 1996 | break; |
1997 | case EVENT_MOV_TO_IBR: | 1997 | case EVENT_MOV_TO_IBR: |
1998 | kvm_mov_to_ibr(vcpu, inst); | 1998 | kvm_mov_to_ibr(vcpu, inst); |
1999 | break; | 1999 | break; |
2000 | case EVENT_MOV_TO_PMC: | 2000 | case EVENT_MOV_TO_PMC: |
2001 | kvm_mov_to_pmc(vcpu, inst); | 2001 | kvm_mov_to_pmc(vcpu, inst); |
2002 | break; | 2002 | break; |
2003 | case EVENT_MOV_TO_PMD: | 2003 | case EVENT_MOV_TO_PMD: |
2004 | kvm_mov_to_pmd(vcpu, inst); | 2004 | kvm_mov_to_pmd(vcpu, inst); |
2005 | break; | 2005 | break; |
2006 | case EVENT_MOV_TO_PKR: | 2006 | case EVENT_MOV_TO_PKR: |
2007 | kvm_mov_to_pkr(vcpu, inst); | 2007 | kvm_mov_to_pkr(vcpu, inst); |
2008 | break; | 2008 | break; |
2009 | case EVENT_MOV_FROM_DBR: | 2009 | case EVENT_MOV_FROM_DBR: |
2010 | kvm_mov_from_dbr(vcpu, inst); | 2010 | kvm_mov_from_dbr(vcpu, inst); |
2011 | break; | 2011 | break; |
2012 | case EVENT_MOV_FROM_IBR: | 2012 | case EVENT_MOV_FROM_IBR: |
2013 | kvm_mov_from_ibr(vcpu, inst); | 2013 | kvm_mov_from_ibr(vcpu, inst); |
2014 | break; | 2014 | break; |
2015 | case EVENT_MOV_FROM_PMC: | 2015 | case EVENT_MOV_FROM_PMC: |
2016 | kvm_mov_from_pmc(vcpu, inst); | 2016 | kvm_mov_from_pmc(vcpu, inst); |
2017 | break; | 2017 | break; |
2018 | case EVENT_MOV_FROM_PKR: | 2018 | case EVENT_MOV_FROM_PKR: |
2019 | kvm_mov_from_pkr(vcpu, inst); | 2019 | kvm_mov_from_pkr(vcpu, inst); |
2020 | break; | 2020 | break; |
2021 | case EVENT_MOV_FROM_CPUID: | 2021 | case EVENT_MOV_FROM_CPUID: |
2022 | kvm_mov_from_cpuid(vcpu, inst); | 2022 | kvm_mov_from_cpuid(vcpu, inst); |
2023 | break; | 2023 | break; |
2024 | case EVENT_VMSW: | 2024 | case EVENT_VMSW: |
2025 | status = IA64_FAULT; | 2025 | status = IA64_FAULT; |
2026 | break; | 2026 | break; |
2027 | default: | 2027 | default: |
2028 | break; | 2028 | break; |
2029 | }; | 2029 | }; |
2030 | /*Assume all status is NO_FAULT ?*/ | 2030 | /*Assume all status is NO_FAULT ?*/ |
2031 | if (status == IA64_NO_FAULT && cause != EVENT_RFI) | 2031 | if (status == IA64_NO_FAULT && cause != EVENT_RFI) |
2032 | vcpu_increment_iip(vcpu); | 2032 | vcpu_increment_iip(vcpu); |
2033 | 2033 | ||
2034 | recover_if_physical_mode(vcpu); | 2034 | recover_if_physical_mode(vcpu); |
2035 | } | 2035 | } |
2036 | 2036 | ||
2037 | void init_vcpu(struct kvm_vcpu *vcpu) | 2037 | void init_vcpu(struct kvm_vcpu *vcpu) |
2038 | { | 2038 | { |
2039 | int i; | 2039 | int i; |
2040 | 2040 | ||
2041 | vcpu->arch.mode_flags = GUEST_IN_PHY; | 2041 | vcpu->arch.mode_flags = GUEST_IN_PHY; |
2042 | VMX(vcpu, vrr[0]) = 0x38; | 2042 | VMX(vcpu, vrr[0]) = 0x38; |
2043 | VMX(vcpu, vrr[1]) = 0x38; | 2043 | VMX(vcpu, vrr[1]) = 0x38; |
2044 | VMX(vcpu, vrr[2]) = 0x38; | 2044 | VMX(vcpu, vrr[2]) = 0x38; |
2045 | VMX(vcpu, vrr[3]) = 0x38; | 2045 | VMX(vcpu, vrr[3]) = 0x38; |
2046 | VMX(vcpu, vrr[4]) = 0x38; | 2046 | VMX(vcpu, vrr[4]) = 0x38; |
2047 | VMX(vcpu, vrr[5]) = 0x38; | 2047 | VMX(vcpu, vrr[5]) = 0x38; |
2048 | VMX(vcpu, vrr[6]) = 0x38; | 2048 | VMX(vcpu, vrr[6]) = 0x38; |
2049 | VMX(vcpu, vrr[7]) = 0x38; | 2049 | VMX(vcpu, vrr[7]) = 0x38; |
2050 | VCPU(vcpu, vpsr) = IA64_PSR_BN; | 2050 | VCPU(vcpu, vpsr) = IA64_PSR_BN; |
2051 | VCPU(vcpu, dcr) = 0; | 2051 | VCPU(vcpu, dcr) = 0; |
2052 | /* pta.size must not be 0. The minimum is 15 (32k) */ | 2052 | /* pta.size must not be 0. The minimum is 15 (32k) */ |
2053 | VCPU(vcpu, pta) = 15 << 2; | 2053 | VCPU(vcpu, pta) = 15 << 2; |
2054 | VCPU(vcpu, itv) = 0x10000; | 2054 | VCPU(vcpu, itv) = 0x10000; |
2055 | VCPU(vcpu, itm) = 0; | 2055 | VCPU(vcpu, itm) = 0; |
2056 | VMX(vcpu, last_itc) = 0; | 2056 | VMX(vcpu, last_itc) = 0; |
2057 | 2057 | ||
2058 | VCPU(vcpu, lid) = VCPU_LID(vcpu); | 2058 | VCPU(vcpu, lid) = VCPU_LID(vcpu); |
2059 | VCPU(vcpu, ivr) = 0; | 2059 | VCPU(vcpu, ivr) = 0; |
2060 | VCPU(vcpu, tpr) = 0x10000; | 2060 | VCPU(vcpu, tpr) = 0x10000; |
2061 | VCPU(vcpu, eoi) = 0; | 2061 | VCPU(vcpu, eoi) = 0; |
2062 | VCPU(vcpu, irr[0]) = 0; | 2062 | VCPU(vcpu, irr[0]) = 0; |
2063 | VCPU(vcpu, irr[1]) = 0; | 2063 | VCPU(vcpu, irr[1]) = 0; |
2064 | VCPU(vcpu, irr[2]) = 0; | 2064 | VCPU(vcpu, irr[2]) = 0; |
2065 | VCPU(vcpu, irr[3]) = 0; | 2065 | VCPU(vcpu, irr[3]) = 0; |
2066 | VCPU(vcpu, pmv) = 0x10000; | 2066 | VCPU(vcpu, pmv) = 0x10000; |
2067 | VCPU(vcpu, cmcv) = 0x10000; | 2067 | VCPU(vcpu, cmcv) = 0x10000; |
2068 | VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */ | 2068 | VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */ |
2069 | VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */ | 2069 | VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */ |
2070 | update_vhpi(vcpu, NULL_VECTOR); | 2070 | update_vhpi(vcpu, NULL_VECTOR); |
2071 | VLSAPIC_XTP(vcpu) = 0x80; /* disabled */ | 2071 | VLSAPIC_XTP(vcpu) = 0x80; /* disabled */ |
2072 | 2072 | ||
2073 | for (i = 0; i < 4; i++) | 2073 | for (i = 0; i < 4; i++) |
2074 | VLSAPIC_INSVC(vcpu, i) = 0; | 2074 | VLSAPIC_INSVC(vcpu, i) = 0; |
2075 | } | 2075 | } |
2076 | 2076 | ||
2077 | void kvm_init_all_rr(struct kvm_vcpu *vcpu) | 2077 | void kvm_init_all_rr(struct kvm_vcpu *vcpu) |
2078 | { | 2078 | { |
2079 | unsigned long psr; | 2079 | unsigned long psr; |
2080 | 2080 | ||
2081 | local_irq_save(psr); | 2081 | local_irq_save(psr); |
2082 | 2082 | ||
2083 | /* WARNING: not allow co-exist of both virtual mode and physical | 2083 | /* WARNING: not allow co-exist of both virtual mode and physical |
2084 | * mode in same region | 2084 | * mode in same region |
2085 | */ | 2085 | */ |
2086 | 2086 | ||
2087 | vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0])); | 2087 | vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0])); |
2088 | vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4])); | 2088 | vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4])); |
2089 | 2089 | ||
2090 | if (is_physical_mode(vcpu)) { | 2090 | if (is_physical_mode(vcpu)) { |
2091 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) | 2091 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) |
2092 | panic_vm(vcpu, "Machine Status conflicts!\n"); | 2092 | panic_vm(vcpu, "Machine Status conflicts!\n"); |
2093 | 2093 | ||
2094 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); | 2094 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); |
2095 | ia64_dv_serialize_data(); | 2095 | ia64_dv_serialize_data(); |
2096 | ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); | 2096 | ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); |
2097 | ia64_dv_serialize_data(); | 2097 | ia64_dv_serialize_data(); |
2098 | } else { | 2098 | } else { |
2099 | ia64_set_rr((VRN0 << VRN_SHIFT), | 2099 | ia64_set_rr((VRN0 << VRN_SHIFT), |
2100 | vcpu->arch.metaphysical_saved_rr0); | 2100 | vcpu->arch.metaphysical_saved_rr0); |
2101 | ia64_dv_serialize_data(); | 2101 | ia64_dv_serialize_data(); |
2102 | ia64_set_rr((VRN4 << VRN_SHIFT), | 2102 | ia64_set_rr((VRN4 << VRN_SHIFT), |
2103 | vcpu->arch.metaphysical_saved_rr4); | 2103 | vcpu->arch.metaphysical_saved_rr4); |
2104 | ia64_dv_serialize_data(); | 2104 | ia64_dv_serialize_data(); |
2105 | } | 2105 | } |
2106 | ia64_set_rr((VRN1 << VRN_SHIFT), | 2106 | ia64_set_rr((VRN1 << VRN_SHIFT), |
2107 | vrrtomrr(VMX(vcpu, vrr[VRN1]))); | 2107 | vrrtomrr(VMX(vcpu, vrr[VRN1]))); |
2108 | ia64_dv_serialize_data(); | 2108 | ia64_dv_serialize_data(); |
2109 | ia64_set_rr((VRN2 << VRN_SHIFT), | 2109 | ia64_set_rr((VRN2 << VRN_SHIFT), |
2110 | vrrtomrr(VMX(vcpu, vrr[VRN2]))); | 2110 | vrrtomrr(VMX(vcpu, vrr[VRN2]))); |
2111 | ia64_dv_serialize_data(); | 2111 | ia64_dv_serialize_data(); |
2112 | ia64_set_rr((VRN3 << VRN_SHIFT), | 2112 | ia64_set_rr((VRN3 << VRN_SHIFT), |
2113 | vrrtomrr(VMX(vcpu, vrr[VRN3]))); | 2113 | vrrtomrr(VMX(vcpu, vrr[VRN3]))); |
2114 | ia64_dv_serialize_data(); | 2114 | ia64_dv_serialize_data(); |
2115 | ia64_set_rr((VRN5 << VRN_SHIFT), | 2115 | ia64_set_rr((VRN5 << VRN_SHIFT), |
2116 | vrrtomrr(VMX(vcpu, vrr[VRN5]))); | 2116 | vrrtomrr(VMX(vcpu, vrr[VRN5]))); |
2117 | ia64_dv_serialize_data(); | 2117 | ia64_dv_serialize_data(); |
2118 | ia64_set_rr((VRN7 << VRN_SHIFT), | 2118 | ia64_set_rr((VRN7 << VRN_SHIFT), |
2119 | vrrtomrr(VMX(vcpu, vrr[VRN7]))); | 2119 | vrrtomrr(VMX(vcpu, vrr[VRN7]))); |
2120 | ia64_dv_serialize_data(); | 2120 | ia64_dv_serialize_data(); |
2121 | ia64_srlz_d(); | 2121 | ia64_srlz_d(); |
2122 | ia64_set_psr(psr); | 2122 | ia64_set_psr(psr); |
2123 | } | 2123 | } |
2124 | 2124 | ||
2125 | int vmm_entry(void) | 2125 | int vmm_entry(void) |
2126 | { | 2126 | { |
2127 | struct kvm_vcpu *v; | 2127 | struct kvm_vcpu *v; |
2128 | v = current_vcpu; | 2128 | v = current_vcpu; |
2129 | 2129 | ||
2130 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd, | 2130 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd, |
2131 | 0, 0, 0, 0, 0, 0); | 2131 | 0, 0, 0, 0, 0, 0); |
2132 | kvm_init_vtlb(v); | 2132 | kvm_init_vtlb(v); |
2133 | kvm_init_vhpt(v); | 2133 | kvm_init_vhpt(v); |
2134 | init_vcpu(v); | 2134 | init_vcpu(v); |
2135 | kvm_init_all_rr(v); | 2135 | kvm_init_all_rr(v); |
2136 | vmm_reset_entry(); | 2136 | vmm_reset_entry(); |
2137 | 2137 | ||
2138 | return 0; | 2138 | return 0; |
2139 | } | 2139 | } |
2140 | 2140 | ||
2141 | static void kvm_show_registers(struct kvm_pt_regs *regs) | 2141 | static void kvm_show_registers(struct kvm_pt_regs *regs) |
2142 | { | 2142 | { |
2143 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; | 2143 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; |
2144 | 2144 | ||
2145 | struct kvm_vcpu *vcpu = current_vcpu; | 2145 | struct kvm_vcpu *vcpu = current_vcpu; |
2146 | if (vcpu != NULL) | 2146 | if (vcpu != NULL) |
2147 | printk("vcpu 0x%p vcpu %d\n", | 2147 | printk("vcpu 0x%p vcpu %d\n", |
2148 | vcpu, vcpu->vcpu_id); | 2148 | vcpu, vcpu->vcpu_id); |
2149 | 2149 | ||
2150 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n", | 2150 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n", |
2151 | regs->cr_ipsr, regs->cr_ifs, ip); | 2151 | regs->cr_ipsr, regs->cr_ifs, ip); |
2152 | 2152 | ||
2153 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", | 2153 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", |
2154 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); | 2154 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); |
2155 | printk("rnat: %016lx bspstore: %016lx pr : %016lx\n", | 2155 | printk("rnat: %016lx bspstore: %016lx pr : %016lx\n", |
2156 | regs->ar_rnat, regs->ar_bspstore, regs->pr); | 2156 | regs->ar_rnat, regs->ar_bspstore, regs->pr); |
2157 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", | 2157 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", |
2158 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); | 2158 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); |
2159 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); | 2159 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); |
2160 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, | 2160 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, |
2161 | regs->b6, regs->b7); | 2161 | regs->b6, regs->b7); |
2162 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", | 2162 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", |
2163 | regs->f6.u.bits[1], regs->f6.u.bits[0], | 2163 | regs->f6.u.bits[1], regs->f6.u.bits[0], |
2164 | regs->f7.u.bits[1], regs->f7.u.bits[0]); | 2164 | regs->f7.u.bits[1], regs->f7.u.bits[0]); |
2165 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", | 2165 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", |
2166 | regs->f8.u.bits[1], regs->f8.u.bits[0], | 2166 | regs->f8.u.bits[1], regs->f8.u.bits[0], |
2167 | regs->f9.u.bits[1], regs->f9.u.bits[0]); | 2167 | regs->f9.u.bits[1], regs->f9.u.bits[0]); |
2168 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", | 2168 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", |
2169 | regs->f10.u.bits[1], regs->f10.u.bits[0], | 2169 | regs->f10.u.bits[1], regs->f10.u.bits[0], |
2170 | regs->f11.u.bits[1], regs->f11.u.bits[0]); | 2170 | regs->f11.u.bits[1], regs->f11.u.bits[0]); |
2171 | 2171 | ||
2172 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, | 2172 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, |
2173 | regs->r2, regs->r3); | 2173 | regs->r2, regs->r3); |
2174 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, | 2174 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, |
2175 | regs->r9, regs->r10); | 2175 | regs->r9, regs->r10); |
2176 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, | 2176 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, |
2177 | regs->r12, regs->r13); | 2177 | regs->r12, regs->r13); |
2178 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, | 2178 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, |
2179 | regs->r15, regs->r16); | 2179 | regs->r15, regs->r16); |
2180 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, | 2180 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, |
2181 | regs->r18, regs->r19); | 2181 | regs->r18, regs->r19); |
2182 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, | 2182 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, |
2183 | regs->r21, regs->r22); | 2183 | regs->r21, regs->r22); |
2184 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, | 2184 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, |
2185 | regs->r24, regs->r25); | 2185 | regs->r24, regs->r25); |
2186 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, | 2186 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, |
2187 | regs->r27, regs->r28); | 2187 | regs->r27, regs->r28); |
2188 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, | 2188 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, |
2189 | regs->r30, regs->r31); | 2189 | regs->r30, regs->r31); |
2190 | 2190 | ||
2191 | } | 2191 | } |
2192 | 2192 | ||
2193 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...) | 2193 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...) |
2194 | { | 2194 | { |
2195 | va_list args; | 2195 | va_list args; |
2196 | char buf[256]; | 2196 | char buf[256]; |
2197 | 2197 | ||
2198 | struct kvm_pt_regs *regs = vcpu_regs(v); | 2198 | struct kvm_pt_regs *regs = vcpu_regs(v); |
2199 | struct exit_ctl_data *p = &v->arch.exit_data; | 2199 | struct exit_ctl_data *p = &v->arch.exit_data; |
2200 | va_start(args, fmt); | 2200 | va_start(args, fmt); |
2201 | vsnprintf(buf, sizeof(buf), fmt, args); | 2201 | vsnprintf(buf, sizeof(buf), fmt, args); |
2202 | va_end(args); | 2202 | va_end(args); |
2203 | printk(buf); | 2203 | printk(buf); |
2204 | kvm_show_registers(regs); | 2204 | kvm_show_registers(regs); |
2205 | p->exit_reason = EXIT_REASON_VM_PANIC; | 2205 | p->exit_reason = EXIT_REASON_VM_PANIC; |
2206 | vmm_transition(v); | 2206 | vmm_transition(v); |
2207 | /*Never to return*/ | 2207 | /*Never to return*/ |
2208 | while (1); | 2208 | while (1); |
2209 | } | 2209 | } |
2210 | 2210 |
arch/x86/kvm/Kconfig
1 | # | 1 | # |
2 | # KVM configuration | 2 | # KVM configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | source "virt/kvm/Kconfig" | 5 | source "virt/kvm/Kconfig" |
6 | 6 | ||
7 | menuconfig VIRTUALIZATION | 7 | menuconfig VIRTUALIZATION |
8 | bool "Virtualization" | 8 | bool "Virtualization" |
9 | depends on HAVE_KVM || X86 | 9 | depends on HAVE_KVM || X86 |
10 | default y | 10 | default y |
11 | ---help--- | 11 | ---help--- |
12 | Say Y here to get to see options for using your Linux host to run other | 12 | Say Y here to get to see options for using your Linux host to run other |
13 | operating systems inside virtual machines (guests). | 13 | operating systems inside virtual machines (guests). |
14 | This option alone does not add any kernel code. | 14 | This option alone does not add any kernel code. |
15 | 15 | ||
16 | If you say N, all options in this submenu will be skipped and disabled. | 16 | If you say N, all options in this submenu will be skipped and disabled. |
17 | 17 | ||
18 | if VIRTUALIZATION | 18 | if VIRTUALIZATION |
19 | 19 | ||
20 | config KVM | 20 | config KVM |
21 | tristate "Kernel-based Virtual Machine (KVM) support" | 21 | tristate "Kernel-based Virtual Machine (KVM) support" |
22 | depends on HAVE_KVM | 22 | depends on HAVE_KVM |
23 | # for device assignment: | 23 | # for device assignment: |
24 | depends on PCI | 24 | depends on PCI |
25 | select PREEMPT_NOTIFIERS | 25 | select PREEMPT_NOTIFIERS |
26 | select MMU_NOTIFIER | 26 | select MMU_NOTIFIER |
27 | select ANON_INODES | 27 | select ANON_INODES |
28 | select HAVE_KVM_IRQCHIP | 28 | select HAVE_KVM_IRQCHIP |
29 | select HAVE_KVM_EVENTFD | 29 | select HAVE_KVM_EVENTFD |
30 | select KVM_APIC_ARCHITECTURE | ||
30 | ---help--- | 31 | ---help--- |
31 | Support hosting fully virtualized guest machines using hardware | 32 | Support hosting fully virtualized guest machines using hardware |
32 | virtualization extensions. You will need a fairly recent | 33 | virtualization extensions. You will need a fairly recent |
33 | processor equipped with virtualization extensions. You will also | 34 | processor equipped with virtualization extensions. You will also |
34 | need to select one or more of the processor modules below. | 35 | need to select one or more of the processor modules below. |
35 | 36 | ||
36 | This module provides access to the hardware capabilities through | 37 | This module provides access to the hardware capabilities through |
37 | a character device node named /dev/kvm. | 38 | a character device node named /dev/kvm. |
38 | 39 | ||
39 | To compile this as a module, choose M here: the module | 40 | To compile this as a module, choose M here: the module |
40 | will be called kvm. | 41 | will be called kvm. |
41 | 42 | ||
42 | If unsure, say N. | 43 | If unsure, say N. |
43 | 44 | ||
44 | config KVM_INTEL | 45 | config KVM_INTEL |
45 | tristate "KVM for Intel processors support" | 46 | tristate "KVM for Intel processors support" |
46 | depends on KVM | 47 | depends on KVM |
47 | ---help--- | 48 | ---help--- |
48 | Provides support for KVM on Intel processors equipped with the VT | 49 | Provides support for KVM on Intel processors equipped with the VT |
49 | extensions. | 50 | extensions. |
50 | 51 | ||
51 | To compile this as a module, choose M here: the module | 52 | To compile this as a module, choose M here: the module |
52 | will be called kvm-intel. | 53 | will be called kvm-intel. |
53 | 54 | ||
54 | config KVM_AMD | 55 | config KVM_AMD |
55 | tristate "KVM for AMD processors support" | 56 | tristate "KVM for AMD processors support" |
56 | depends on KVM | 57 | depends on KVM |
57 | ---help--- | 58 | ---help--- |
58 | Provides support for KVM on AMD processors equipped with the AMD-V | 59 | Provides support for KVM on AMD processors equipped with the AMD-V |
59 | (SVM) extensions. | 60 | (SVM) extensions. |
60 | 61 | ||
61 | To compile this as a module, choose M here: the module | 62 | To compile this as a module, choose M here: the module |
62 | will be called kvm-amd. | 63 | will be called kvm-amd. |
63 | 64 | ||
64 | config KVM_TRACE | 65 | config KVM_TRACE |
65 | bool "KVM trace support" | 66 | bool "KVM trace support" |
66 | depends on KVM && SYSFS | 67 | depends on KVM && SYSFS |
67 | select MARKERS | 68 | select MARKERS |
68 | select RELAY | 69 | select RELAY |
69 | select DEBUG_FS | 70 | select DEBUG_FS |
70 | default n | 71 | default n |
71 | ---help--- | 72 | ---help--- |
72 | This option allows reading a trace of kvm-related events through | 73 | This option allows reading a trace of kvm-related events through |
73 | relayfs. Note the ABI is not considered stable and will be | 74 | relayfs. Note the ABI is not considered stable and will be |
74 | modified in future updates. | 75 | modified in future updates. |
75 | 76 | ||
76 | # OK, it's a little counter-intuitive to do this, but it puts it neatly under | 77 | # OK, it's a little counter-intuitive to do this, but it puts it neatly under |
77 | # the virtualization menu. | 78 | # the virtualization menu. |
78 | source drivers/lguest/Kconfig | 79 | source drivers/lguest/Kconfig |
79 | source drivers/virtio/Kconfig | 80 | source drivers/virtio/Kconfig |
80 | 81 | ||
81 | endif # VIRTUALIZATION | 82 | endif # VIRTUALIZATION |
82 | 83 |
include/linux/kvm.h
1 | #ifndef __LINUX_KVM_H | 1 | #ifndef __LINUX_KVM_H |
2 | #define __LINUX_KVM_H | 2 | #define __LINUX_KVM_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Userspace interface for /dev/kvm - kernel based virtual machine | 5 | * Userspace interface for /dev/kvm - kernel based virtual machine |
6 | * | 6 | * |
7 | * Note: you must update KVM_API_VERSION if you change this interface. | 7 | * Note: you must update KVM_API_VERSION if you change this interface. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
12 | #include <linux/ioctl.h> | 12 | #include <linux/ioctl.h> |
13 | #include <asm/kvm.h> | 13 | #include <asm/kvm.h> |
14 | 14 | ||
15 | #define KVM_API_VERSION 12 | 15 | #define KVM_API_VERSION 12 |
16 | 16 | ||
17 | /* for KVM_TRACE_ENABLE */ | 17 | /* for KVM_TRACE_ENABLE */ |
18 | struct kvm_user_trace_setup { | 18 | struct kvm_user_trace_setup { |
19 | __u32 buf_size; /* sub_buffer size of each per-cpu */ | 19 | __u32 buf_size; /* sub_buffer size of each per-cpu */ |
20 | __u32 buf_nr; /* the number of sub_buffers of each per-cpu */ | 20 | __u32 buf_nr; /* the number of sub_buffers of each per-cpu */ |
21 | }; | 21 | }; |
22 | 22 | ||
23 | /* for KVM_CREATE_MEMORY_REGION */ | 23 | /* for KVM_CREATE_MEMORY_REGION */ |
24 | struct kvm_memory_region { | 24 | struct kvm_memory_region { |
25 | __u32 slot; | 25 | __u32 slot; |
26 | __u32 flags; | 26 | __u32 flags; |
27 | __u64 guest_phys_addr; | 27 | __u64 guest_phys_addr; |
28 | __u64 memory_size; /* bytes */ | 28 | __u64 memory_size; /* bytes */ |
29 | }; | 29 | }; |
30 | 30 | ||
31 | /* for KVM_SET_USER_MEMORY_REGION */ | 31 | /* for KVM_SET_USER_MEMORY_REGION */ |
32 | struct kvm_userspace_memory_region { | 32 | struct kvm_userspace_memory_region { |
33 | __u32 slot; | 33 | __u32 slot; |
34 | __u32 flags; | 34 | __u32 flags; |
35 | __u64 guest_phys_addr; | 35 | __u64 guest_phys_addr; |
36 | __u64 memory_size; /* bytes */ | 36 | __u64 memory_size; /* bytes */ |
37 | __u64 userspace_addr; /* start of the userspace allocated memory */ | 37 | __u64 userspace_addr; /* start of the userspace allocated memory */ |
38 | }; | 38 | }; |
39 | 39 | ||
40 | /* for kvm_memory_region::flags */ | 40 | /* for kvm_memory_region::flags */ |
41 | #define KVM_MEM_LOG_DIRTY_PAGES 1UL | 41 | #define KVM_MEM_LOG_DIRTY_PAGES 1UL |
42 | 42 | ||
43 | 43 | ||
44 | /* for KVM_IRQ_LINE */ | 44 | /* for KVM_IRQ_LINE */ |
45 | struct kvm_irq_level { | 45 | struct kvm_irq_level { |
46 | /* | 46 | /* |
47 | * ACPI gsi notion of irq. | 47 | * ACPI gsi notion of irq. |
48 | * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. | 48 | * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. |
49 | * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. | 49 | * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. |
50 | */ | 50 | */ |
51 | union { | 51 | union { |
52 | __u32 irq; | 52 | __u32 irq; |
53 | __s32 status; | 53 | __s32 status; |
54 | }; | 54 | }; |
55 | __u32 level; | 55 | __u32 level; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | 58 | ||
59 | struct kvm_irqchip { | 59 | struct kvm_irqchip { |
60 | __u32 chip_id; | 60 | __u32 chip_id; |
61 | __u32 pad; | 61 | __u32 pad; |
62 | union { | 62 | union { |
63 | char dummy[512]; /* reserving space */ | 63 | char dummy[512]; /* reserving space */ |
64 | #ifdef __KVM_HAVE_PIT | 64 | #ifdef __KVM_HAVE_PIT |
65 | struct kvm_pic_state pic; | 65 | struct kvm_pic_state pic; |
66 | #endif | 66 | #endif |
67 | #ifdef __KVM_HAVE_IOAPIC | 67 | #ifdef __KVM_HAVE_IOAPIC |
68 | struct kvm_ioapic_state ioapic; | 68 | struct kvm_ioapic_state ioapic; |
69 | #endif | 69 | #endif |
70 | } chip; | 70 | } chip; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | /* for KVM_CREATE_PIT2 */ | 73 | /* for KVM_CREATE_PIT2 */ |
74 | struct kvm_pit_config { | 74 | struct kvm_pit_config { |
75 | __u32 flags; | 75 | __u32 flags; |
76 | __u32 pad[15]; | 76 | __u32 pad[15]; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | #define KVM_PIT_SPEAKER_DUMMY 1 | 79 | #define KVM_PIT_SPEAKER_DUMMY 1 |
80 | 80 | ||
81 | #define KVM_EXIT_UNKNOWN 0 | 81 | #define KVM_EXIT_UNKNOWN 0 |
82 | #define KVM_EXIT_EXCEPTION 1 | 82 | #define KVM_EXIT_EXCEPTION 1 |
83 | #define KVM_EXIT_IO 2 | 83 | #define KVM_EXIT_IO 2 |
84 | #define KVM_EXIT_HYPERCALL 3 | 84 | #define KVM_EXIT_HYPERCALL 3 |
85 | #define KVM_EXIT_DEBUG 4 | 85 | #define KVM_EXIT_DEBUG 4 |
86 | #define KVM_EXIT_HLT 5 | 86 | #define KVM_EXIT_HLT 5 |
87 | #define KVM_EXIT_MMIO 6 | 87 | #define KVM_EXIT_MMIO 6 |
88 | #define KVM_EXIT_IRQ_WINDOW_OPEN 7 | 88 | #define KVM_EXIT_IRQ_WINDOW_OPEN 7 |
89 | #define KVM_EXIT_SHUTDOWN 8 | 89 | #define KVM_EXIT_SHUTDOWN 8 |
90 | #define KVM_EXIT_FAIL_ENTRY 9 | 90 | #define KVM_EXIT_FAIL_ENTRY 9 |
91 | #define KVM_EXIT_INTR 10 | 91 | #define KVM_EXIT_INTR 10 |
92 | #define KVM_EXIT_SET_TPR 11 | 92 | #define KVM_EXIT_SET_TPR 11 |
93 | #define KVM_EXIT_TPR_ACCESS 12 | 93 | #define KVM_EXIT_TPR_ACCESS 12 |
94 | #define KVM_EXIT_S390_SIEIC 13 | 94 | #define KVM_EXIT_S390_SIEIC 13 |
95 | #define KVM_EXIT_S390_RESET 14 | 95 | #define KVM_EXIT_S390_RESET 14 |
96 | #define KVM_EXIT_DCR 15 | 96 | #define KVM_EXIT_DCR 15 |
97 | #define KVM_EXIT_NMI 16 | 97 | #define KVM_EXIT_NMI 16 |
98 | 98 | ||
99 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ | 99 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ |
100 | struct kvm_run { | 100 | struct kvm_run { |
101 | /* in */ | 101 | /* in */ |
102 | __u8 request_interrupt_window; | 102 | __u8 request_interrupt_window; |
103 | __u8 padding1[7]; | 103 | __u8 padding1[7]; |
104 | 104 | ||
105 | /* out */ | 105 | /* out */ |
106 | __u32 exit_reason; | 106 | __u32 exit_reason; |
107 | __u8 ready_for_interrupt_injection; | 107 | __u8 ready_for_interrupt_injection; |
108 | __u8 if_flag; | 108 | __u8 if_flag; |
109 | __u8 padding2[2]; | 109 | __u8 padding2[2]; |
110 | 110 | ||
111 | /* in (pre_kvm_run), out (post_kvm_run) */ | 111 | /* in (pre_kvm_run), out (post_kvm_run) */ |
112 | __u64 cr8; | 112 | __u64 cr8; |
113 | __u64 apic_base; | 113 | __u64 apic_base; |
114 | 114 | ||
115 | union { | 115 | union { |
116 | /* KVM_EXIT_UNKNOWN */ | 116 | /* KVM_EXIT_UNKNOWN */ |
117 | struct { | 117 | struct { |
118 | __u64 hardware_exit_reason; | 118 | __u64 hardware_exit_reason; |
119 | } hw; | 119 | } hw; |
120 | /* KVM_EXIT_FAIL_ENTRY */ | 120 | /* KVM_EXIT_FAIL_ENTRY */ |
121 | struct { | 121 | struct { |
122 | __u64 hardware_entry_failure_reason; | 122 | __u64 hardware_entry_failure_reason; |
123 | } fail_entry; | 123 | } fail_entry; |
124 | /* KVM_EXIT_EXCEPTION */ | 124 | /* KVM_EXIT_EXCEPTION */ |
125 | struct { | 125 | struct { |
126 | __u32 exception; | 126 | __u32 exception; |
127 | __u32 error_code; | 127 | __u32 error_code; |
128 | } ex; | 128 | } ex; |
129 | /* KVM_EXIT_IO */ | 129 | /* KVM_EXIT_IO */ |
130 | struct { | 130 | struct { |
131 | #define KVM_EXIT_IO_IN 0 | 131 | #define KVM_EXIT_IO_IN 0 |
132 | #define KVM_EXIT_IO_OUT 1 | 132 | #define KVM_EXIT_IO_OUT 1 |
133 | __u8 direction; | 133 | __u8 direction; |
134 | __u8 size; /* bytes */ | 134 | __u8 size; /* bytes */ |
135 | __u16 port; | 135 | __u16 port; |
136 | __u32 count; | 136 | __u32 count; |
137 | __u64 data_offset; /* relative to kvm_run start */ | 137 | __u64 data_offset; /* relative to kvm_run start */ |
138 | } io; | 138 | } io; |
139 | struct { | 139 | struct { |
140 | struct kvm_debug_exit_arch arch; | 140 | struct kvm_debug_exit_arch arch; |
141 | } debug; | 141 | } debug; |
142 | /* KVM_EXIT_MMIO */ | 142 | /* KVM_EXIT_MMIO */ |
143 | struct { | 143 | struct { |
144 | __u64 phys_addr; | 144 | __u64 phys_addr; |
145 | __u8 data[8]; | 145 | __u8 data[8]; |
146 | __u32 len; | 146 | __u32 len; |
147 | __u8 is_write; | 147 | __u8 is_write; |
148 | } mmio; | 148 | } mmio; |
149 | /* KVM_EXIT_HYPERCALL */ | 149 | /* KVM_EXIT_HYPERCALL */ |
150 | struct { | 150 | struct { |
151 | __u64 nr; | 151 | __u64 nr; |
152 | __u64 args[6]; | 152 | __u64 args[6]; |
153 | __u64 ret; | 153 | __u64 ret; |
154 | __u32 longmode; | 154 | __u32 longmode; |
155 | __u32 pad; | 155 | __u32 pad; |
156 | } hypercall; | 156 | } hypercall; |
157 | /* KVM_EXIT_TPR_ACCESS */ | 157 | /* KVM_EXIT_TPR_ACCESS */ |
158 | struct { | 158 | struct { |
159 | __u64 rip; | 159 | __u64 rip; |
160 | __u32 is_write; | 160 | __u32 is_write; |
161 | __u32 pad; | 161 | __u32 pad; |
162 | } tpr_access; | 162 | } tpr_access; |
163 | /* KVM_EXIT_S390_SIEIC */ | 163 | /* KVM_EXIT_S390_SIEIC */ |
164 | struct { | 164 | struct { |
165 | __u8 icptcode; | 165 | __u8 icptcode; |
166 | __u64 mask; /* psw upper half */ | 166 | __u64 mask; /* psw upper half */ |
167 | __u64 addr; /* psw lower half */ | 167 | __u64 addr; /* psw lower half */ |
168 | __u16 ipa; | 168 | __u16 ipa; |
169 | __u32 ipb; | 169 | __u32 ipb; |
170 | } s390_sieic; | 170 | } s390_sieic; |
171 | /* KVM_EXIT_S390_RESET */ | 171 | /* KVM_EXIT_S390_RESET */ |
172 | #define KVM_S390_RESET_POR 1 | 172 | #define KVM_S390_RESET_POR 1 |
173 | #define KVM_S390_RESET_CLEAR 2 | 173 | #define KVM_S390_RESET_CLEAR 2 |
174 | #define KVM_S390_RESET_SUBSYSTEM 4 | 174 | #define KVM_S390_RESET_SUBSYSTEM 4 |
175 | #define KVM_S390_RESET_CPU_INIT 8 | 175 | #define KVM_S390_RESET_CPU_INIT 8 |
176 | #define KVM_S390_RESET_IPL 16 | 176 | #define KVM_S390_RESET_IPL 16 |
177 | __u64 s390_reset_flags; | 177 | __u64 s390_reset_flags; |
178 | /* KVM_EXIT_DCR */ | 178 | /* KVM_EXIT_DCR */ |
179 | struct { | 179 | struct { |
180 | __u32 dcrn; | 180 | __u32 dcrn; |
181 | __u32 data; | 181 | __u32 data; |
182 | __u8 is_write; | 182 | __u8 is_write; |
183 | } dcr; | 183 | } dcr; |
184 | /* Fix the size of the union. */ | 184 | /* Fix the size of the union. */ |
185 | char padding[256]; | 185 | char padding[256]; |
186 | }; | 186 | }; |
187 | }; | 187 | }; |
188 | 188 | ||
189 | /* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */ | 189 | /* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */ |
190 | 190 | ||
191 | struct kvm_coalesced_mmio_zone { | 191 | struct kvm_coalesced_mmio_zone { |
192 | __u64 addr; | 192 | __u64 addr; |
193 | __u32 size; | 193 | __u32 size; |
194 | __u32 pad; | 194 | __u32 pad; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct kvm_coalesced_mmio { | 197 | struct kvm_coalesced_mmio { |
198 | __u64 phys_addr; | 198 | __u64 phys_addr; |
199 | __u32 len; | 199 | __u32 len; |
200 | __u32 pad; | 200 | __u32 pad; |
201 | __u8 data[8]; | 201 | __u8 data[8]; |
202 | }; | 202 | }; |
203 | 203 | ||
204 | struct kvm_coalesced_mmio_ring { | 204 | struct kvm_coalesced_mmio_ring { |
205 | __u32 first, last; | 205 | __u32 first, last; |
206 | struct kvm_coalesced_mmio coalesced_mmio[0]; | 206 | struct kvm_coalesced_mmio coalesced_mmio[0]; |
207 | }; | 207 | }; |
208 | 208 | ||
209 | #define KVM_COALESCED_MMIO_MAX \ | 209 | #define KVM_COALESCED_MMIO_MAX \ |
210 | ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \ | 210 | ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \ |
211 | sizeof(struct kvm_coalesced_mmio)) | 211 | sizeof(struct kvm_coalesced_mmio)) |
212 | 212 | ||
213 | /* for KVM_TRANSLATE */ | 213 | /* for KVM_TRANSLATE */ |
214 | struct kvm_translation { | 214 | struct kvm_translation { |
215 | /* in */ | 215 | /* in */ |
216 | __u64 linear_address; | 216 | __u64 linear_address; |
217 | 217 | ||
218 | /* out */ | 218 | /* out */ |
219 | __u64 physical_address; | 219 | __u64 physical_address; |
220 | __u8 valid; | 220 | __u8 valid; |
221 | __u8 writeable; | 221 | __u8 writeable; |
222 | __u8 usermode; | 222 | __u8 usermode; |
223 | __u8 pad[5]; | 223 | __u8 pad[5]; |
224 | }; | 224 | }; |
225 | 225 | ||
226 | /* for KVM_INTERRUPT */ | 226 | /* for KVM_INTERRUPT */ |
227 | struct kvm_interrupt { | 227 | struct kvm_interrupt { |
228 | /* in */ | 228 | /* in */ |
229 | __u32 irq; | 229 | __u32 irq; |
230 | }; | 230 | }; |
231 | 231 | ||
232 | /* for KVM_GET_DIRTY_LOG */ | 232 | /* for KVM_GET_DIRTY_LOG */ |
233 | struct kvm_dirty_log { | 233 | struct kvm_dirty_log { |
234 | __u32 slot; | 234 | __u32 slot; |
235 | __u32 padding1; | 235 | __u32 padding1; |
236 | union { | 236 | union { |
237 | void __user *dirty_bitmap; /* one bit per page */ | 237 | void __user *dirty_bitmap; /* one bit per page */ |
238 | __u64 padding2; | 238 | __u64 padding2; |
239 | }; | 239 | }; |
240 | }; | 240 | }; |
241 | 241 | ||
242 | /* for KVM_SET_SIGNAL_MASK */ | 242 | /* for KVM_SET_SIGNAL_MASK */ |
243 | struct kvm_signal_mask { | 243 | struct kvm_signal_mask { |
244 | __u32 len; | 244 | __u32 len; |
245 | __u8 sigset[0]; | 245 | __u8 sigset[0]; |
246 | }; | 246 | }; |
247 | 247 | ||
248 | /* for KVM_TPR_ACCESS_REPORTING */ | 248 | /* for KVM_TPR_ACCESS_REPORTING */ |
249 | struct kvm_tpr_access_ctl { | 249 | struct kvm_tpr_access_ctl { |
250 | __u32 enabled; | 250 | __u32 enabled; |
251 | __u32 flags; | 251 | __u32 flags; |
252 | __u32 reserved[8]; | 252 | __u32 reserved[8]; |
253 | }; | 253 | }; |
254 | 254 | ||
255 | /* for KVM_SET_VAPIC_ADDR */ | 255 | /* for KVM_SET_VAPIC_ADDR */ |
256 | struct kvm_vapic_addr { | 256 | struct kvm_vapic_addr { |
257 | __u64 vapic_addr; | 257 | __u64 vapic_addr; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | /* for KVM_SET_MPSTATE */ | 260 | /* for KVM_SET_MPSTATE */ |
261 | 261 | ||
262 | #define KVM_MP_STATE_RUNNABLE 0 | 262 | #define KVM_MP_STATE_RUNNABLE 0 |
263 | #define KVM_MP_STATE_UNINITIALIZED 1 | 263 | #define KVM_MP_STATE_UNINITIALIZED 1 |
264 | #define KVM_MP_STATE_INIT_RECEIVED 2 | 264 | #define KVM_MP_STATE_INIT_RECEIVED 2 |
265 | #define KVM_MP_STATE_HALTED 3 | 265 | #define KVM_MP_STATE_HALTED 3 |
266 | #define KVM_MP_STATE_SIPI_RECEIVED 4 | 266 | #define KVM_MP_STATE_SIPI_RECEIVED 4 |
267 | 267 | ||
268 | struct kvm_mp_state { | 268 | struct kvm_mp_state { |
269 | __u32 mp_state; | 269 | __u32 mp_state; |
270 | }; | 270 | }; |
271 | 271 | ||
272 | struct kvm_s390_psw { | 272 | struct kvm_s390_psw { |
273 | __u64 mask; | 273 | __u64 mask; |
274 | __u64 addr; | 274 | __u64 addr; |
275 | }; | 275 | }; |
276 | 276 | ||
277 | /* valid values for type in kvm_s390_interrupt */ | 277 | /* valid values for type in kvm_s390_interrupt */ |
278 | #define KVM_S390_SIGP_STOP 0xfffe0000u | 278 | #define KVM_S390_SIGP_STOP 0xfffe0000u |
279 | #define KVM_S390_PROGRAM_INT 0xfffe0001u | 279 | #define KVM_S390_PROGRAM_INT 0xfffe0001u |
280 | #define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u | 280 | #define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u |
281 | #define KVM_S390_RESTART 0xfffe0003u | 281 | #define KVM_S390_RESTART 0xfffe0003u |
282 | #define KVM_S390_INT_VIRTIO 0xffff2603u | 282 | #define KVM_S390_INT_VIRTIO 0xffff2603u |
283 | #define KVM_S390_INT_SERVICE 0xffff2401u | 283 | #define KVM_S390_INT_SERVICE 0xffff2401u |
284 | #define KVM_S390_INT_EMERGENCY 0xffff1201u | 284 | #define KVM_S390_INT_EMERGENCY 0xffff1201u |
285 | 285 | ||
286 | struct kvm_s390_interrupt { | 286 | struct kvm_s390_interrupt { |
287 | __u32 type; | 287 | __u32 type; |
288 | __u32 parm; | 288 | __u32 parm; |
289 | __u64 parm64; | 289 | __u64 parm64; |
290 | }; | 290 | }; |
291 | 291 | ||
292 | /* for KVM_SET_GUEST_DEBUG */ | 292 | /* for KVM_SET_GUEST_DEBUG */ |
293 | 293 | ||
294 | #define KVM_GUESTDBG_ENABLE 0x00000001 | 294 | #define KVM_GUESTDBG_ENABLE 0x00000001 |
295 | #define KVM_GUESTDBG_SINGLESTEP 0x00000002 | 295 | #define KVM_GUESTDBG_SINGLESTEP 0x00000002 |
296 | 296 | ||
297 | struct kvm_guest_debug { | 297 | struct kvm_guest_debug { |
298 | __u32 control; | 298 | __u32 control; |
299 | __u32 pad; | 299 | __u32 pad; |
300 | struct kvm_guest_debug_arch arch; | 300 | struct kvm_guest_debug_arch arch; |
301 | }; | 301 | }; |
302 | 302 | ||
303 | #define KVM_TRC_SHIFT 16 | 303 | #define KVM_TRC_SHIFT 16 |
304 | /* | 304 | /* |
305 | * kvm trace categories | 305 | * kvm trace categories |
306 | */ | 306 | */ |
307 | #define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) | 307 | #define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) |
308 | #define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */ | 308 | #define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */ |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * kvm trace action | 311 | * kvm trace action |
312 | */ | 312 | */ |
313 | #define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) | 313 | #define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) |
314 | #define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) | 314 | #define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) |
315 | #define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) | 315 | #define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) |
316 | 316 | ||
317 | #define KVM_TRC_HEAD_SIZE 12 | 317 | #define KVM_TRC_HEAD_SIZE 12 |
318 | #define KVM_TRC_CYCLE_SIZE 8 | 318 | #define KVM_TRC_CYCLE_SIZE 8 |
319 | #define KVM_TRC_EXTRA_MAX 7 | 319 | #define KVM_TRC_EXTRA_MAX 7 |
320 | 320 | ||
321 | /* This structure represents a single trace buffer record. */ | 321 | /* This structure represents a single trace buffer record. */ |
322 | struct kvm_trace_rec { | 322 | struct kvm_trace_rec { |
323 | /* variable rec_val | 323 | /* variable rec_val |
324 | * is split into: | 324 | * is split into: |
325 | * bits 0 - 27 -> event id | 325 | * bits 0 - 27 -> event id |
326 | * bits 28 -30 -> number of extra data args of size u32 | 326 | * bits 28 -30 -> number of extra data args of size u32 |
327 | * bits 31 -> binary indicator for if tsc is in record | 327 | * bits 31 -> binary indicator for if tsc is in record |
328 | */ | 328 | */ |
329 | __u32 rec_val; | 329 | __u32 rec_val; |
330 | __u32 pid; | 330 | __u32 pid; |
331 | __u32 vcpu_id; | 331 | __u32 vcpu_id; |
332 | union { | 332 | union { |
333 | struct { | 333 | struct { |
334 | __u64 timestamp; | 334 | __u64 timestamp; |
335 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; | 335 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; |
336 | } __attribute__((packed)) timestamp; | 336 | } __attribute__((packed)) timestamp; |
337 | struct { | 337 | struct { |
338 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; | 338 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; |
339 | } notimestamp; | 339 | } notimestamp; |
340 | } u; | 340 | } u; |
341 | }; | 341 | }; |
342 | 342 | ||
343 | #define TRACE_REC_EVENT_ID(val) \ | 343 | #define TRACE_REC_EVENT_ID(val) \ |
344 | (0x0fffffff & (val)) | 344 | (0x0fffffff & (val)) |
345 | #define TRACE_REC_NUM_DATA_ARGS(val) \ | 345 | #define TRACE_REC_NUM_DATA_ARGS(val) \ |
346 | (0x70000000 & ((val) << 28)) | 346 | (0x70000000 & ((val) << 28)) |
347 | #define TRACE_REC_TCS(val) \ | 347 | #define TRACE_REC_TCS(val) \ |
348 | (0x80000000 & ((val) << 31)) | 348 | (0x80000000 & ((val) << 31)) |
349 | 349 | ||
350 | #define KVMIO 0xAE | 350 | #define KVMIO 0xAE |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * ioctls for /dev/kvm fds: | 353 | * ioctls for /dev/kvm fds: |
354 | */ | 354 | */ |
355 | #define KVM_GET_API_VERSION _IO(KVMIO, 0x00) | 355 | #define KVM_GET_API_VERSION _IO(KVMIO, 0x00) |
356 | #define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ | 356 | #define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ |
357 | #define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) | 357 | #define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) |
358 | 358 | ||
359 | #define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06) | 359 | #define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06) |
360 | /* | 360 | /* |
361 | * Check if a kvm extension is available. Argument is extension number, | 361 | * Check if a kvm extension is available. Argument is extension number, |
362 | * return is 1 (yes) or 0 (no, sorry). | 362 | * return is 1 (yes) or 0 (no, sorry). |
363 | */ | 363 | */ |
364 | #define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03) | 364 | #define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03) |
365 | /* | 365 | /* |
366 | * Get size for mmap(vcpu_fd) | 366 | * Get size for mmap(vcpu_fd) |
367 | */ | 367 | */ |
368 | #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ | 368 | #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ |
369 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) | 369 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) |
370 | /* | 370 | /* |
371 | * ioctls for kvm trace | 371 | * ioctls for kvm trace |
372 | */ | 372 | */ |
373 | #define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) | 373 | #define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) |
374 | #define KVM_TRACE_PAUSE _IO(KVMIO, 0x07) | 374 | #define KVM_TRACE_PAUSE _IO(KVMIO, 0x07) |
375 | #define KVM_TRACE_DISABLE _IO(KVMIO, 0x08) | 375 | #define KVM_TRACE_DISABLE _IO(KVMIO, 0x08) |
376 | /* | 376 | /* |
377 | * Extension capability list. | 377 | * Extension capability list. |
378 | */ | 378 | */ |
379 | #define KVM_CAP_IRQCHIP 0 | 379 | #define KVM_CAP_IRQCHIP 0 |
380 | #define KVM_CAP_HLT 1 | 380 | #define KVM_CAP_HLT 1 |
381 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 | 381 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 |
382 | #define KVM_CAP_USER_MEMORY 3 | 382 | #define KVM_CAP_USER_MEMORY 3 |
383 | #define KVM_CAP_SET_TSS_ADDR 4 | 383 | #define KVM_CAP_SET_TSS_ADDR 4 |
384 | #define KVM_CAP_VAPIC 6 | 384 | #define KVM_CAP_VAPIC 6 |
385 | #define KVM_CAP_EXT_CPUID 7 | 385 | #define KVM_CAP_EXT_CPUID 7 |
386 | #define KVM_CAP_CLOCKSOURCE 8 | 386 | #define KVM_CAP_CLOCKSOURCE 8 |
387 | #define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */ | 387 | #define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */ |
388 | #define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */ | 388 | #define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */ |
389 | #define KVM_CAP_PIT 11 | 389 | #define KVM_CAP_PIT 11 |
390 | #define KVM_CAP_NOP_IO_DELAY 12 | 390 | #define KVM_CAP_NOP_IO_DELAY 12 |
391 | #define KVM_CAP_PV_MMU 13 | 391 | #define KVM_CAP_PV_MMU 13 |
392 | #define KVM_CAP_MP_STATE 14 | 392 | #define KVM_CAP_MP_STATE 14 |
393 | #define KVM_CAP_COALESCED_MMIO 15 | 393 | #define KVM_CAP_COALESCED_MMIO 15 |
394 | #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ | 394 | #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ |
395 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 395 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |
396 | #define KVM_CAP_DEVICE_ASSIGNMENT 17 | 396 | #define KVM_CAP_DEVICE_ASSIGNMENT 17 |
397 | #endif | 397 | #endif |
398 | #define KVM_CAP_IOMMU 18 | 398 | #define KVM_CAP_IOMMU 18 |
399 | #ifdef __KVM_HAVE_MSI | 399 | #ifdef __KVM_HAVE_MSI |
400 | #define KVM_CAP_DEVICE_MSI 20 | 400 | #define KVM_CAP_DEVICE_MSI 20 |
401 | #endif | 401 | #endif |
402 | /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ | 402 | /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ |
403 | #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 | 403 | #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 |
404 | #ifdef __KVM_HAVE_USER_NMI | 404 | #ifdef __KVM_HAVE_USER_NMI |
405 | #define KVM_CAP_USER_NMI 22 | 405 | #define KVM_CAP_USER_NMI 22 |
406 | #endif | 406 | #endif |
407 | #ifdef __KVM_HAVE_GUEST_DEBUG | 407 | #ifdef __KVM_HAVE_GUEST_DEBUG |
408 | #define KVM_CAP_SET_GUEST_DEBUG 23 | 408 | #define KVM_CAP_SET_GUEST_DEBUG 23 |
409 | #endif | 409 | #endif |
410 | #ifdef __KVM_HAVE_PIT | 410 | #ifdef __KVM_HAVE_PIT |
411 | #define KVM_CAP_REINJECT_CONTROL 24 | 411 | #define KVM_CAP_REINJECT_CONTROL 24 |
412 | #endif | 412 | #endif |
413 | #ifdef __KVM_HAVE_IOAPIC | 413 | #ifdef __KVM_HAVE_IOAPIC |
414 | #define KVM_CAP_IRQ_ROUTING 25 | 414 | #define KVM_CAP_IRQ_ROUTING 25 |
415 | #endif | 415 | #endif |
416 | #define KVM_CAP_IRQ_INJECT_STATUS 26 | 416 | #define KVM_CAP_IRQ_INJECT_STATUS 26 |
417 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 417 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |
418 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 | 418 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 |
419 | #endif | 419 | #endif |
420 | #ifdef __KVM_HAVE_MSIX | 420 | #ifdef __KVM_HAVE_MSIX |
421 | #define KVM_CAP_DEVICE_MSIX 28 | 421 | #define KVM_CAP_DEVICE_MSIX 28 |
422 | #endif | 422 | #endif |
423 | #define KVM_CAP_ASSIGN_DEV_IRQ 29 | 423 | #define KVM_CAP_ASSIGN_DEV_IRQ 29 |
424 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ | 424 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ |
425 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 | 425 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 |
426 | #ifdef __KVM_HAVE_MCE | 426 | #ifdef __KVM_HAVE_MCE |
427 | #define KVM_CAP_MCE 31 | 427 | #define KVM_CAP_MCE 31 |
428 | #endif | 428 | #endif |
429 | #define KVM_CAP_IRQFD 32 | 429 | #define KVM_CAP_IRQFD 32 |
430 | #ifdef __KVM_HAVE_PIT | 430 | #ifdef __KVM_HAVE_PIT |
431 | #define KVM_CAP_PIT2 33 | 431 | #define KVM_CAP_PIT2 33 |
432 | #endif | 432 | #endif |
433 | #define KVM_CAP_SET_BOOT_CPU_ID 34 | ||
433 | 434 | ||
434 | #ifdef KVM_CAP_IRQ_ROUTING | 435 | #ifdef KVM_CAP_IRQ_ROUTING |
435 | 436 | ||
436 | struct kvm_irq_routing_irqchip { | 437 | struct kvm_irq_routing_irqchip { |
437 | __u32 irqchip; | 438 | __u32 irqchip; |
438 | __u32 pin; | 439 | __u32 pin; |
439 | }; | 440 | }; |
440 | 441 | ||
441 | struct kvm_irq_routing_msi { | 442 | struct kvm_irq_routing_msi { |
442 | __u32 address_lo; | 443 | __u32 address_lo; |
443 | __u32 address_hi; | 444 | __u32 address_hi; |
444 | __u32 data; | 445 | __u32 data; |
445 | __u32 pad; | 446 | __u32 pad; |
446 | }; | 447 | }; |
447 | 448 | ||
448 | /* gsi routing entry types */ | 449 | /* gsi routing entry types */ |
449 | #define KVM_IRQ_ROUTING_IRQCHIP 1 | 450 | #define KVM_IRQ_ROUTING_IRQCHIP 1 |
450 | #define KVM_IRQ_ROUTING_MSI 2 | 451 | #define KVM_IRQ_ROUTING_MSI 2 |
451 | 452 | ||
452 | struct kvm_irq_routing_entry { | 453 | struct kvm_irq_routing_entry { |
453 | __u32 gsi; | 454 | __u32 gsi; |
454 | __u32 type; | 455 | __u32 type; |
455 | __u32 flags; | 456 | __u32 flags; |
456 | __u32 pad; | 457 | __u32 pad; |
457 | union { | 458 | union { |
458 | struct kvm_irq_routing_irqchip irqchip; | 459 | struct kvm_irq_routing_irqchip irqchip; |
459 | struct kvm_irq_routing_msi msi; | 460 | struct kvm_irq_routing_msi msi; |
460 | __u32 pad[8]; | 461 | __u32 pad[8]; |
461 | } u; | 462 | } u; |
462 | }; | 463 | }; |
463 | 464 | ||
464 | struct kvm_irq_routing { | 465 | struct kvm_irq_routing { |
465 | __u32 nr; | 466 | __u32 nr; |
466 | __u32 flags; | 467 | __u32 flags; |
467 | struct kvm_irq_routing_entry entries[0]; | 468 | struct kvm_irq_routing_entry entries[0]; |
468 | }; | 469 | }; |
469 | 470 | ||
470 | #endif | 471 | #endif |
471 | 472 | ||
472 | #ifdef KVM_CAP_MCE | 473 | #ifdef KVM_CAP_MCE |
473 | /* x86 MCE */ | 474 | /* x86 MCE */ |
474 | struct kvm_x86_mce { | 475 | struct kvm_x86_mce { |
475 | __u64 status; | 476 | __u64 status; |
476 | __u64 addr; | 477 | __u64 addr; |
477 | __u64 misc; | 478 | __u64 misc; |
478 | __u64 mcg_status; | 479 | __u64 mcg_status; |
479 | __u8 bank; | 480 | __u8 bank; |
480 | __u8 pad1[7]; | 481 | __u8 pad1[7]; |
481 | __u64 pad2[3]; | 482 | __u64 pad2[3]; |
482 | }; | 483 | }; |
483 | #endif | 484 | #endif |
484 | 485 | ||
485 | #define KVM_IRQFD_FLAG_DEASSIGN (1 << 0) | 486 | #define KVM_IRQFD_FLAG_DEASSIGN (1 << 0) |
486 | 487 | ||
487 | struct kvm_irqfd { | 488 | struct kvm_irqfd { |
488 | __u32 fd; | 489 | __u32 fd; |
489 | __u32 gsi; | 490 | __u32 gsi; |
490 | __u32 flags; | 491 | __u32 flags; |
491 | __u8 pad[20]; | 492 | __u8 pad[20]; |
492 | }; | 493 | }; |
493 | 494 | ||
494 | /* | 495 | /* |
495 | * ioctls for VM fds | 496 | * ioctls for VM fds |
496 | */ | 497 | */ |
497 | #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) | 498 | #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) |
498 | /* | 499 | /* |
499 | * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns | 500 | * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns |
500 | * a vcpu fd. | 501 | * a vcpu fd. |
501 | */ | 502 | */ |
502 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) | 503 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) |
503 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) | 504 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) |
504 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) | 505 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) |
505 | #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) | 506 | #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) |
506 | #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) | 507 | #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) |
507 | #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\ | 508 | #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\ |
508 | struct kvm_userspace_memory_region) | 509 | struct kvm_userspace_memory_region) |
509 | #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) | 510 | #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) |
510 | /* Device model IOC */ | 511 | /* Device model IOC */ |
511 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) | 512 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) |
512 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) | 513 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) |
513 | #define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip) | 514 | #define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip) |
514 | #define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip) | 515 | #define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip) |
515 | #define KVM_CREATE_PIT _IO(KVMIO, 0x64) | 516 | #define KVM_CREATE_PIT _IO(KVMIO, 0x64) |
516 | #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) | 517 | #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) |
517 | #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) | 518 | #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) |
518 | #define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level) | 519 | #define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level) |
519 | #define KVM_REGISTER_COALESCED_MMIO \ | 520 | #define KVM_REGISTER_COALESCED_MMIO \ |
520 | _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) | 521 | _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) |
521 | #define KVM_UNREGISTER_COALESCED_MMIO \ | 522 | #define KVM_UNREGISTER_COALESCED_MMIO \ |
522 | _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) | 523 | _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) |
523 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ | 524 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ |
524 | struct kvm_assigned_pci_dev) | 525 | struct kvm_assigned_pci_dev) |
525 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) | 526 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) |
526 | /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ | 527 | /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ |
527 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ | 528 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ |
528 | struct kvm_assigned_irq) | 529 | struct kvm_assigned_irq) |
529 | #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) | 530 | #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) |
530 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) | 531 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) |
531 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ | 532 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ |
532 | struct kvm_assigned_pci_dev) | 533 | struct kvm_assigned_pci_dev) |
533 | #define KVM_ASSIGN_SET_MSIX_NR \ | 534 | #define KVM_ASSIGN_SET_MSIX_NR \ |
534 | _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr) | 535 | _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr) |
535 | #define KVM_ASSIGN_SET_MSIX_ENTRY \ | 536 | #define KVM_ASSIGN_SET_MSIX_ENTRY \ |
536 | _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry) | 537 | _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry) |
537 | #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) | 538 | #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) |
538 | #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) | 539 | #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) |
539 | #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) | 540 | #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) |
541 | #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) | ||
540 | 542 | ||
541 | /* | 543 | /* |
542 | * ioctls for vcpu fds | 544 | * ioctls for vcpu fds |
543 | */ | 545 | */ |
544 | #define KVM_RUN _IO(KVMIO, 0x80) | 546 | #define KVM_RUN _IO(KVMIO, 0x80) |
545 | #define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) | 547 | #define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) |
546 | #define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) | 548 | #define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) |
547 | #define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) | 549 | #define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) |
548 | #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) | 550 | #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) |
549 | #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) | 551 | #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) |
550 | #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) | 552 | #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) |
551 | /* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ | 553 | /* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ |
552 | #define KVM_DEBUG_GUEST __KVM_DEPRECATED_DEBUG_GUEST | 554 | #define KVM_DEBUG_GUEST __KVM_DEPRECATED_DEBUG_GUEST |
553 | #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) | 555 | #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) |
554 | #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) | 556 | #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) |
555 | #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) | 557 | #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) |
556 | #define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask) | 558 | #define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask) |
557 | #define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu) | 559 | #define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu) |
558 | #define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) | 560 | #define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) |
559 | #define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state) | 561 | #define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state) |
560 | #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) | 562 | #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) |
561 | #define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2) | 563 | #define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2) |
562 | #define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) | 564 | #define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) |
563 | /* Available with KVM_CAP_VAPIC */ | 565 | /* Available with KVM_CAP_VAPIC */ |
564 | #define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) | 566 | #define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) |
565 | /* Available with KVM_CAP_VAPIC */ | 567 | /* Available with KVM_CAP_VAPIC */ |
566 | #define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) | 568 | #define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) |
567 | /* valid for virtual machine (for floating interrupt)_and_ vcpu */ | 569 | /* valid for virtual machine (for floating interrupt)_and_ vcpu */ |
568 | #define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt) | 570 | #define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt) |
569 | /* store status for s390 */ | 571 | /* store status for s390 */ |
570 | #define KVM_S390_STORE_STATUS_NOADDR (-1ul) | 572 | #define KVM_S390_STORE_STATUS_NOADDR (-1ul) |
571 | #define KVM_S390_STORE_STATUS_PREFIXED (-2ul) | 573 | #define KVM_S390_STORE_STATUS_PREFIXED (-2ul) |
572 | #define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long) | 574 | #define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long) |
573 | /* initial ipl psw for s390 */ | 575 | /* initial ipl psw for s390 */ |
574 | #define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw) | 576 | #define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw) |
575 | /* initial reset for s390 */ | 577 | /* initial reset for s390 */ |
576 | #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) | 578 | #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) |
577 | #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) | 579 | #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) |
578 | #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) | 580 | #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) |
579 | /* Available with KVM_CAP_NMI */ | 581 | /* Available with KVM_CAP_NMI */ |
580 | #define KVM_NMI _IO(KVMIO, 0x9a) | 582 | #define KVM_NMI _IO(KVMIO, 0x9a) |
581 | /* Available with KVM_CAP_SET_GUEST_DEBUG */ | 583 | /* Available with KVM_CAP_SET_GUEST_DEBUG */ |
582 | #define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug) | 584 | #define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug) |
583 | /* MCE for x86 */ | 585 | /* MCE for x86 */ |
584 | #define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64) | 586 | #define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64) |
585 | #define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64) | 587 | #define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64) |
586 | #define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce) | 588 | #define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce) |
587 | 589 | ||
588 | /* | 590 | /* |
589 | * Deprecated interfaces | 591 | * Deprecated interfaces |
590 | */ | 592 | */ |
591 | struct kvm_breakpoint { | 593 | struct kvm_breakpoint { |
592 | __u32 enabled; | 594 | __u32 enabled; |
593 | __u32 padding; | 595 | __u32 padding; |
594 | __u64 address; | 596 | __u64 address; |
595 | }; | 597 | }; |
596 | 598 | ||
597 | struct kvm_debug_guest { | 599 | struct kvm_debug_guest { |
598 | __u32 enabled; | 600 | __u32 enabled; |
599 | __u32 pad; | 601 | __u32 pad; |
600 | struct kvm_breakpoint breakpoints[4]; | 602 | struct kvm_breakpoint breakpoints[4]; |
601 | __u32 singlestep; | 603 | __u32 singlestep; |
602 | }; | 604 | }; |
603 | 605 | ||
604 | #define __KVM_DEPRECATED_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest) | 606 | #define __KVM_DEPRECATED_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest) |
605 | 607 | ||
606 | #define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) | 608 | #define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) |
607 | #define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) | 609 | #define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) |
608 | 610 | ||
609 | #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) | 611 | #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) |
610 | #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) | 612 | #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) |
611 | #define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) | 613 | #define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) |
612 | #define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) | 614 | #define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) |
613 | #define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) | 615 | #define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) |
614 | #define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) | 616 | #define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) |
615 | #define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) | 617 | #define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) |
616 | #define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) | 618 | #define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) |
617 | #define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) | 619 | #define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) |
618 | #define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) | 620 | #define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) |
619 | #define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) | 621 | #define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) |
620 | #define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) | 622 | #define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) |
621 | #define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) | 623 | #define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) |
622 | #define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) | 624 | #define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) |
623 | #define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) | 625 | #define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) |
624 | #define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) | 626 | #define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) |
625 | #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) | 627 | #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) |
626 | #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) | 628 | #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) |
627 | #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) | 629 | #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) |
628 | #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) | 630 | #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) |
629 | #define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) | 631 | #define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) |
630 | #define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) | 632 | #define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) |
631 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) | 633 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) |
632 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) | 634 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) |
633 | 635 | ||
634 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | 636 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) |
635 | 637 | ||
636 | struct kvm_assigned_pci_dev { | 638 | struct kvm_assigned_pci_dev { |
637 | __u32 assigned_dev_id; | 639 | __u32 assigned_dev_id; |
638 | __u32 busnr; | 640 | __u32 busnr; |
639 | __u32 devfn; | 641 | __u32 devfn; |
640 | __u32 flags; | 642 | __u32 flags; |
641 | union { | 643 | union { |
642 | __u32 reserved[12]; | 644 | __u32 reserved[12]; |
643 | }; | 645 | }; |
644 | }; | 646 | }; |
645 | 647 | ||
646 | #define KVM_DEV_IRQ_HOST_INTX (1 << 0) | 648 | #define KVM_DEV_IRQ_HOST_INTX (1 << 0) |
647 | #define KVM_DEV_IRQ_HOST_MSI (1 << 1) | 649 | #define KVM_DEV_IRQ_HOST_MSI (1 << 1) |
648 | #define KVM_DEV_IRQ_HOST_MSIX (1 << 2) | 650 | #define KVM_DEV_IRQ_HOST_MSIX (1 << 2) |
649 | 651 | ||
650 | #define KVM_DEV_IRQ_GUEST_INTX (1 << 8) | 652 | #define KVM_DEV_IRQ_GUEST_INTX (1 << 8) |
651 | #define KVM_DEV_IRQ_GUEST_MSI (1 << 9) | 653 | #define KVM_DEV_IRQ_GUEST_MSI (1 << 9) |
652 | #define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) | 654 | #define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) |
653 | 655 | ||
654 | #define KVM_DEV_IRQ_HOST_MASK 0x00ff | 656 | #define KVM_DEV_IRQ_HOST_MASK 0x00ff |
655 | #define KVM_DEV_IRQ_GUEST_MASK 0xff00 | 657 | #define KVM_DEV_IRQ_GUEST_MASK 0xff00 |
656 | 658 | ||
657 | struct kvm_assigned_irq { | 659 | struct kvm_assigned_irq { |
658 | __u32 assigned_dev_id; | 660 | __u32 assigned_dev_id; |
659 | __u32 host_irq; | 661 | __u32 host_irq; |
660 | __u32 guest_irq; | 662 | __u32 guest_irq; |
661 | __u32 flags; | 663 | __u32 flags; |
662 | union { | 664 | union { |
663 | struct { | 665 | struct { |
664 | __u32 addr_lo; | 666 | __u32 addr_lo; |
665 | __u32 addr_hi; | 667 | __u32 addr_hi; |
666 | __u32 data; | 668 | __u32 data; |
667 | } guest_msi; | 669 | } guest_msi; |
668 | __u32 reserved[12]; | 670 | __u32 reserved[12]; |
669 | }; | 671 | }; |
670 | }; | 672 | }; |
671 | 673 | ||
672 | 674 | ||
673 | struct kvm_assigned_msix_nr { | 675 | struct kvm_assigned_msix_nr { |
674 | __u32 assigned_dev_id; | 676 | __u32 assigned_dev_id; |
675 | __u16 entry_nr; | 677 | __u16 entry_nr; |
676 | __u16 padding; | 678 | __u16 padding; |
677 | }; | 679 | }; |
678 | 680 | ||
679 | #define KVM_MAX_MSIX_PER_DEV 256 | 681 | #define KVM_MAX_MSIX_PER_DEV 256 |
680 | struct kvm_assigned_msix_entry { | 682 | struct kvm_assigned_msix_entry { |
681 | __u32 assigned_dev_id; | 683 | __u32 assigned_dev_id; |
682 | __u32 gsi; | 684 | __u32 gsi; |
683 | __u16 entry; /* The index of entry in the MSI-X table */ | 685 | __u16 entry; /* The index of entry in the MSI-X table */ |
684 | __u16 padding[3]; | 686 | __u16 padding[3]; |
685 | }; | 687 | }; |
686 | 688 | ||
687 | #endif | 689 | #endif |
688 | 690 |
include/linux/kvm_host.h
1 | #ifndef __KVM_HOST_H | 1 | #ifndef __KVM_HOST_H |
2 | #define __KVM_HOST_H | 2 | #define __KVM_HOST_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This work is licensed under the terms of the GNU GPL, version 2. See | 5 | * This work is licensed under the terms of the GNU GPL, version 2. See |
6 | * the COPYING file in the top-level directory. | 6 | * the COPYING file in the top-level directory. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/hardirq.h> | 10 | #include <linux/hardirq.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/preempt.h> | 17 | #include <linux/preempt.h> |
18 | #include <linux/marker.h> | 18 | #include <linux/marker.h> |
19 | #include <linux/msi.h> | 19 | #include <linux/msi.h> |
20 | #include <asm/signal.h> | 20 | #include <asm/signal.h> |
21 | 21 | ||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_para.h> | 23 | #include <linux/kvm_para.h> |
24 | 24 | ||
25 | #include <linux/kvm_types.h> | 25 | #include <linux/kvm_types.h> |
26 | 26 | ||
27 | #include <asm/kvm_host.h> | 27 | #include <asm/kvm_host.h> |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * vcpu->requests bit members | 30 | * vcpu->requests bit members |
31 | */ | 31 | */ |
32 | #define KVM_REQ_TLB_FLUSH 0 | 32 | #define KVM_REQ_TLB_FLUSH 0 |
33 | #define KVM_REQ_MIGRATE_TIMER 1 | 33 | #define KVM_REQ_MIGRATE_TIMER 1 |
34 | #define KVM_REQ_REPORT_TPR_ACCESS 2 | 34 | #define KVM_REQ_REPORT_TPR_ACCESS 2 |
35 | #define KVM_REQ_MMU_RELOAD 3 | 35 | #define KVM_REQ_MMU_RELOAD 3 |
36 | #define KVM_REQ_TRIPLE_FAULT 4 | 36 | #define KVM_REQ_TRIPLE_FAULT 4 |
37 | #define KVM_REQ_PENDING_TIMER 5 | 37 | #define KVM_REQ_PENDING_TIMER 5 |
38 | #define KVM_REQ_UNHALT 6 | 38 | #define KVM_REQ_UNHALT 6 |
39 | #define KVM_REQ_MMU_SYNC 7 | 39 | #define KVM_REQ_MMU_SYNC 7 |
40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 |
41 | #define KVM_REQ_KICK 9 | 41 | #define KVM_REQ_KICK 9 |
42 | 42 | ||
43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
44 | 44 | ||
45 | struct kvm_vcpu; | 45 | struct kvm_vcpu; |
46 | extern struct kmem_cache *kvm_vcpu_cache; | 46 | extern struct kmem_cache *kvm_vcpu_cache; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * It would be nice to use something smarter than a linear search, TBD... | 49 | * It would be nice to use something smarter than a linear search, TBD... |
50 | * Thankfully we dont expect many devices to register (famous last words :), | 50 | * Thankfully we dont expect many devices to register (famous last words :), |
51 | * so until then it will suffice. At least its abstracted so we can change | 51 | * so until then it will suffice. At least its abstracted so we can change |
52 | * in one place. | 52 | * in one place. |
53 | */ | 53 | */ |
54 | struct kvm_io_bus { | 54 | struct kvm_io_bus { |
55 | int dev_count; | 55 | int dev_count; |
56 | #define NR_IOBUS_DEVS 6 | 56 | #define NR_IOBUS_DEVS 6 |
57 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | 57 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | void kvm_io_bus_init(struct kvm_io_bus *bus); | 60 | void kvm_io_bus_init(struct kvm_io_bus *bus); |
61 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | 61 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
62 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, | 62 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, |
63 | gpa_t addr, int len, int is_write); | 63 | gpa_t addr, int len, int is_write); |
64 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | 64 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, |
65 | struct kvm_io_device *dev); | 65 | struct kvm_io_device *dev); |
66 | 66 | ||
67 | struct kvm_vcpu { | 67 | struct kvm_vcpu { |
68 | struct kvm *kvm; | 68 | struct kvm *kvm; |
69 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 69 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
70 | struct preempt_notifier preempt_notifier; | 70 | struct preempt_notifier preempt_notifier; |
71 | #endif | 71 | #endif |
72 | int vcpu_id; | 72 | int vcpu_id; |
73 | struct mutex mutex; | 73 | struct mutex mutex; |
74 | int cpu; | 74 | int cpu; |
75 | struct kvm_run *run; | 75 | struct kvm_run *run; |
76 | unsigned long requests; | 76 | unsigned long requests; |
77 | unsigned long guest_debug; | 77 | unsigned long guest_debug; |
78 | int fpu_active; | 78 | int fpu_active; |
79 | int guest_fpu_loaded; | 79 | int guest_fpu_loaded; |
80 | wait_queue_head_t wq; | 80 | wait_queue_head_t wq; |
81 | int sigset_active; | 81 | int sigset_active; |
82 | sigset_t sigset; | 82 | sigset_t sigset; |
83 | struct kvm_vcpu_stat stat; | 83 | struct kvm_vcpu_stat stat; |
84 | 84 | ||
85 | #ifdef CONFIG_HAS_IOMEM | 85 | #ifdef CONFIG_HAS_IOMEM |
86 | int mmio_needed; | 86 | int mmio_needed; |
87 | int mmio_read_completed; | 87 | int mmio_read_completed; |
88 | int mmio_is_write; | 88 | int mmio_is_write; |
89 | int mmio_size; | 89 | int mmio_size; |
90 | unsigned char mmio_data[8]; | 90 | unsigned char mmio_data[8]; |
91 | gpa_t mmio_phys_addr; | 91 | gpa_t mmio_phys_addr; |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | struct kvm_vcpu_arch arch; | 94 | struct kvm_vcpu_arch arch; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | struct kvm_memory_slot { | 97 | struct kvm_memory_slot { |
98 | gfn_t base_gfn; | 98 | gfn_t base_gfn; |
99 | unsigned long npages; | 99 | unsigned long npages; |
100 | unsigned long flags; | 100 | unsigned long flags; |
101 | unsigned long *rmap; | 101 | unsigned long *rmap; |
102 | unsigned long *dirty_bitmap; | 102 | unsigned long *dirty_bitmap; |
103 | struct { | 103 | struct { |
104 | unsigned long rmap_pde; | 104 | unsigned long rmap_pde; |
105 | int write_count; | 105 | int write_count; |
106 | } *lpage_info; | 106 | } *lpage_info; |
107 | unsigned long userspace_addr; | 107 | unsigned long userspace_addr; |
108 | int user_alloc; | 108 | int user_alloc; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | struct kvm_kernel_irq_routing_entry { | 111 | struct kvm_kernel_irq_routing_entry { |
112 | u32 gsi; | 112 | u32 gsi; |
113 | u32 type; | 113 | u32 type; |
114 | int (*set)(struct kvm_kernel_irq_routing_entry *e, | 114 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
115 | struct kvm *kvm, int level); | 115 | struct kvm *kvm, int level); |
116 | union { | 116 | union { |
117 | struct { | 117 | struct { |
118 | unsigned irqchip; | 118 | unsigned irqchip; |
119 | unsigned pin; | 119 | unsigned pin; |
120 | } irqchip; | 120 | } irqchip; |
121 | struct msi_msg msi; | 121 | struct msi_msg msi; |
122 | }; | 122 | }; |
123 | struct list_head link; | 123 | struct list_head link; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | struct kvm { | 126 | struct kvm { |
127 | spinlock_t mmu_lock; | 127 | spinlock_t mmu_lock; |
128 | spinlock_t requests_lock; | 128 | spinlock_t requests_lock; |
129 | struct rw_semaphore slots_lock; | 129 | struct rw_semaphore slots_lock; |
130 | struct mm_struct *mm; /* userspace tied to this vm */ | 130 | struct mm_struct *mm; /* userspace tied to this vm */ |
131 | int nmemslots; | 131 | int nmemslots; |
132 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 132 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
133 | KVM_PRIVATE_MEM_SLOTS]; | 133 | KVM_PRIVATE_MEM_SLOTS]; |
134 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
135 | u32 bsp_vcpu_id; | ||
134 | struct kvm_vcpu *bsp_vcpu; | 136 | struct kvm_vcpu *bsp_vcpu; |
137 | #endif | ||
135 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | 138 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
139 | atomic_t online_vcpus; | ||
136 | struct list_head vm_list; | 140 | struct list_head vm_list; |
137 | struct mutex lock; | 141 | struct mutex lock; |
138 | struct kvm_io_bus mmio_bus; | 142 | struct kvm_io_bus mmio_bus; |
139 | struct kvm_io_bus pio_bus; | 143 | struct kvm_io_bus pio_bus; |
140 | #ifdef CONFIG_HAVE_KVM_EVENTFD | 144 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
141 | struct { | 145 | struct { |
142 | spinlock_t lock; | 146 | spinlock_t lock; |
143 | struct list_head items; | 147 | struct list_head items; |
144 | } irqfds; | 148 | } irqfds; |
145 | #endif | 149 | #endif |
146 | struct kvm_vm_stat stat; | 150 | struct kvm_vm_stat stat; |
147 | struct kvm_arch arch; | 151 | struct kvm_arch arch; |
148 | atomic_t users_count; | 152 | atomic_t users_count; |
149 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 153 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
150 | struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; | 154 | struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; |
151 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; | 155 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
152 | #endif | 156 | #endif |
153 | 157 | ||
154 | struct mutex irq_lock; | 158 | struct mutex irq_lock; |
155 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 159 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
156 | struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ | 160 | struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ |
157 | struct hlist_head mask_notifier_list; | 161 | struct hlist_head mask_notifier_list; |
158 | #endif | 162 | #endif |
159 | 163 | ||
160 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER | 164 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
161 | struct mmu_notifier mmu_notifier; | 165 | struct mmu_notifier mmu_notifier; |
162 | unsigned long mmu_notifier_seq; | 166 | unsigned long mmu_notifier_seq; |
163 | long mmu_notifier_count; | 167 | long mmu_notifier_count; |
164 | #endif | 168 | #endif |
165 | }; | 169 | }; |
166 | 170 | ||
167 | /* The guest did something we don't support. */ | 171 | /* The guest did something we don't support. */ |
168 | #define pr_unimpl(vcpu, fmt, ...) \ | 172 | #define pr_unimpl(vcpu, fmt, ...) \ |
169 | do { \ | 173 | do { \ |
170 | if (printk_ratelimit()) \ | 174 | if (printk_ratelimit()) \ |
171 | printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ | 175 | printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ |
172 | current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ | 176 | current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ |
173 | } while (0) | 177 | } while (0) |
174 | 178 | ||
175 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) | 179 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) |
176 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) | 180 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) |
177 | 181 | ||
178 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); | 182 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
179 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | 183 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
180 | 184 | ||
181 | void vcpu_load(struct kvm_vcpu *vcpu); | 185 | void vcpu_load(struct kvm_vcpu *vcpu); |
182 | void vcpu_put(struct kvm_vcpu *vcpu); | 186 | void vcpu_put(struct kvm_vcpu *vcpu); |
183 | 187 | ||
184 | int kvm_init(void *opaque, unsigned int vcpu_size, | 188 | int kvm_init(void *opaque, unsigned int vcpu_size, |
185 | struct module *module); | 189 | struct module *module); |
186 | void kvm_exit(void); | 190 | void kvm_exit(void); |
187 | 191 | ||
188 | void kvm_get_kvm(struct kvm *kvm); | 192 | void kvm_get_kvm(struct kvm *kvm); |
189 | void kvm_put_kvm(struct kvm *kvm); | 193 | void kvm_put_kvm(struct kvm *kvm); |
190 | 194 | ||
191 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | 195 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
192 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | 196 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) |
193 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | 197 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } |
194 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); | 198 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); |
195 | 199 | ||
196 | extern struct page *bad_page; | 200 | extern struct page *bad_page; |
197 | extern pfn_t bad_pfn; | 201 | extern pfn_t bad_pfn; |
198 | 202 | ||
199 | int is_error_page(struct page *page); | 203 | int is_error_page(struct page *page); |
200 | int is_error_pfn(pfn_t pfn); | 204 | int is_error_pfn(pfn_t pfn); |
201 | int kvm_is_error_hva(unsigned long addr); | 205 | int kvm_is_error_hva(unsigned long addr); |
202 | int kvm_set_memory_region(struct kvm *kvm, | 206 | int kvm_set_memory_region(struct kvm *kvm, |
203 | struct kvm_userspace_memory_region *mem, | 207 | struct kvm_userspace_memory_region *mem, |
204 | int user_alloc); | 208 | int user_alloc); |
205 | int __kvm_set_memory_region(struct kvm *kvm, | 209 | int __kvm_set_memory_region(struct kvm *kvm, |
206 | struct kvm_userspace_memory_region *mem, | 210 | struct kvm_userspace_memory_region *mem, |
207 | int user_alloc); | 211 | int user_alloc); |
208 | int kvm_arch_set_memory_region(struct kvm *kvm, | 212 | int kvm_arch_set_memory_region(struct kvm *kvm, |
209 | struct kvm_userspace_memory_region *mem, | 213 | struct kvm_userspace_memory_region *mem, |
210 | struct kvm_memory_slot old, | 214 | struct kvm_memory_slot old, |
211 | int user_alloc); | 215 | int user_alloc); |
212 | void kvm_arch_flush_shadow(struct kvm *kvm); | 216 | void kvm_arch_flush_shadow(struct kvm *kvm); |
213 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | 217 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
214 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 218 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
215 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 219 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
216 | void kvm_release_page_clean(struct page *page); | 220 | void kvm_release_page_clean(struct page *page); |
217 | void kvm_release_page_dirty(struct page *page); | 221 | void kvm_release_page_dirty(struct page *page); |
218 | void kvm_set_page_dirty(struct page *page); | 222 | void kvm_set_page_dirty(struct page *page); |
219 | void kvm_set_page_accessed(struct page *page); | 223 | void kvm_set_page_accessed(struct page *page); |
220 | 224 | ||
221 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 225 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
222 | void kvm_release_pfn_dirty(pfn_t); | 226 | void kvm_release_pfn_dirty(pfn_t); |
223 | void kvm_release_pfn_clean(pfn_t pfn); | 227 | void kvm_release_pfn_clean(pfn_t pfn); |
224 | void kvm_set_pfn_dirty(pfn_t pfn); | 228 | void kvm_set_pfn_dirty(pfn_t pfn); |
225 | void kvm_set_pfn_accessed(pfn_t pfn); | 229 | void kvm_set_pfn_accessed(pfn_t pfn); |
226 | void kvm_get_pfn(pfn_t pfn); | 230 | void kvm_get_pfn(pfn_t pfn); |
227 | 231 | ||
228 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | 232 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
229 | int len); | 233 | int len); |
230 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | 234 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
231 | unsigned long len); | 235 | unsigned long len); |
232 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); | 236 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
233 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | 237 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
234 | int offset, int len); | 238 | int offset, int len); |
235 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | 239 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
236 | unsigned long len); | 240 | unsigned long len); |
237 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | 241 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
238 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | 242 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
239 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | 243 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
240 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); | 244 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
241 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | 245 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
242 | 246 | ||
243 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 247 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
244 | void kvm_resched(struct kvm_vcpu *vcpu); | 248 | void kvm_resched(struct kvm_vcpu *vcpu); |
245 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | 249 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
246 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | 250 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
247 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 251 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
248 | void kvm_reload_remote_mmus(struct kvm *kvm); | 252 | void kvm_reload_remote_mmus(struct kvm *kvm); |
249 | 253 | ||
250 | long kvm_arch_dev_ioctl(struct file *filp, | 254 | long kvm_arch_dev_ioctl(struct file *filp, |
251 | unsigned int ioctl, unsigned long arg); | 255 | unsigned int ioctl, unsigned long arg); |
252 | long kvm_arch_vcpu_ioctl(struct file *filp, | 256 | long kvm_arch_vcpu_ioctl(struct file *filp, |
253 | unsigned int ioctl, unsigned long arg); | 257 | unsigned int ioctl, unsigned long arg); |
254 | 258 | ||
255 | int kvm_dev_ioctl_check_extension(long ext); | 259 | int kvm_dev_ioctl_check_extension(long ext); |
256 | 260 | ||
257 | int kvm_get_dirty_log(struct kvm *kvm, | 261 | int kvm_get_dirty_log(struct kvm *kvm, |
258 | struct kvm_dirty_log *log, int *is_dirty); | 262 | struct kvm_dirty_log *log, int *is_dirty); |
259 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 263 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
260 | struct kvm_dirty_log *log); | 264 | struct kvm_dirty_log *log); |
261 | 265 | ||
262 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 266 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
263 | struct | 267 | struct |
264 | kvm_userspace_memory_region *mem, | 268 | kvm_userspace_memory_region *mem, |
265 | int user_alloc); | 269 | int user_alloc); |
266 | long kvm_arch_vm_ioctl(struct file *filp, | 270 | long kvm_arch_vm_ioctl(struct file *filp, |
267 | unsigned int ioctl, unsigned long arg); | 271 | unsigned int ioctl, unsigned long arg); |
268 | 272 | ||
269 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | 273 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
270 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | 274 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
271 | 275 | ||
272 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 276 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
273 | struct kvm_translation *tr); | 277 | struct kvm_translation *tr); |
274 | 278 | ||
275 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | 279 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
276 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | 280 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
277 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 281 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
278 | struct kvm_sregs *sregs); | 282 | struct kvm_sregs *sregs); |
279 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 283 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
280 | struct kvm_sregs *sregs); | 284 | struct kvm_sregs *sregs); |
281 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 285 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
282 | struct kvm_mp_state *mp_state); | 286 | struct kvm_mp_state *mp_state); |
283 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 287 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
284 | struct kvm_mp_state *mp_state); | 288 | struct kvm_mp_state *mp_state); |
285 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 289 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
286 | struct kvm_guest_debug *dbg); | 290 | struct kvm_guest_debug *dbg); |
287 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | 291 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
288 | 292 | ||
289 | int kvm_arch_init(void *opaque); | 293 | int kvm_arch_init(void *opaque); |
290 | void kvm_arch_exit(void); | 294 | void kvm_arch_exit(void); |
291 | 295 | ||
292 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); | 296 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
293 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); | 297 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
294 | 298 | ||
295 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); | 299 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); |
296 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 300 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
297 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | 301 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
298 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); | 302 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
299 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); | 303 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
300 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); | 304 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
301 | 305 | ||
302 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); | 306 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); |
303 | void kvm_arch_hardware_enable(void *garbage); | 307 | void kvm_arch_hardware_enable(void *garbage); |
304 | void kvm_arch_hardware_disable(void *garbage); | 308 | void kvm_arch_hardware_disable(void *garbage); |
305 | int kvm_arch_hardware_setup(void); | 309 | int kvm_arch_hardware_setup(void); |
306 | void kvm_arch_hardware_unsetup(void); | 310 | void kvm_arch_hardware_unsetup(void); |
307 | void kvm_arch_check_processor_compat(void *rtn); | 311 | void kvm_arch_check_processor_compat(void *rtn); |
308 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | 312 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
309 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 313 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
310 | 314 | ||
311 | void kvm_free_physmem(struct kvm *kvm); | 315 | void kvm_free_physmem(struct kvm *kvm); |
312 | 316 | ||
313 | struct kvm *kvm_arch_create_vm(void); | 317 | struct kvm *kvm_arch_create_vm(void); |
314 | void kvm_arch_destroy_vm(struct kvm *kvm); | 318 | void kvm_arch_destroy_vm(struct kvm *kvm); |
315 | void kvm_free_all_assigned_devices(struct kvm *kvm); | 319 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
316 | void kvm_arch_sync_events(struct kvm *kvm); | 320 | void kvm_arch_sync_events(struct kvm *kvm); |
317 | 321 | ||
318 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | 322 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
319 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v); | 323 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v); |
320 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); | 324 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
321 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 325 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
322 | 326 | ||
323 | int kvm_is_mmio_pfn(pfn_t pfn); | 327 | int kvm_is_mmio_pfn(pfn_t pfn); |
324 | 328 | ||
325 | struct kvm_irq_ack_notifier { | 329 | struct kvm_irq_ack_notifier { |
326 | struct hlist_node link; | 330 | struct hlist_node link; |
327 | unsigned gsi; | 331 | unsigned gsi; |
328 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 332 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
329 | }; | 333 | }; |
330 | 334 | ||
331 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 | 335 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 |
332 | struct kvm_guest_msix_entry { | 336 | struct kvm_guest_msix_entry { |
333 | u32 vector; | 337 | u32 vector; |
334 | u16 entry; | 338 | u16 entry; |
335 | u16 flags; | 339 | u16 flags; |
336 | }; | 340 | }; |
337 | 341 | ||
338 | struct kvm_assigned_dev_kernel { | 342 | struct kvm_assigned_dev_kernel { |
339 | struct kvm_irq_ack_notifier ack_notifier; | 343 | struct kvm_irq_ack_notifier ack_notifier; |
340 | struct work_struct interrupt_work; | 344 | struct work_struct interrupt_work; |
341 | struct list_head list; | 345 | struct list_head list; |
342 | int assigned_dev_id; | 346 | int assigned_dev_id; |
343 | int host_busnr; | 347 | int host_busnr; |
344 | int host_devfn; | 348 | int host_devfn; |
345 | unsigned int entries_nr; | 349 | unsigned int entries_nr; |
346 | int host_irq; | 350 | int host_irq; |
347 | bool host_irq_disabled; | 351 | bool host_irq_disabled; |
348 | struct msix_entry *host_msix_entries; | 352 | struct msix_entry *host_msix_entries; |
349 | int guest_irq; | 353 | int guest_irq; |
350 | struct kvm_guest_msix_entry *guest_msix_entries; | 354 | struct kvm_guest_msix_entry *guest_msix_entries; |
351 | unsigned long irq_requested_type; | 355 | unsigned long irq_requested_type; |
352 | int irq_source_id; | 356 | int irq_source_id; |
353 | int flags; | 357 | int flags; |
354 | struct pci_dev *dev; | 358 | struct pci_dev *dev; |
355 | struct kvm *kvm; | 359 | struct kvm *kvm; |
356 | spinlock_t assigned_dev_lock; | 360 | spinlock_t assigned_dev_lock; |
357 | }; | 361 | }; |
358 | 362 | ||
359 | struct kvm_irq_mask_notifier { | 363 | struct kvm_irq_mask_notifier { |
360 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | 364 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); |
361 | int irq; | 365 | int irq; |
362 | struct hlist_node link; | 366 | struct hlist_node link; |
363 | }; | 367 | }; |
364 | 368 | ||
365 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | 369 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, |
366 | struct kvm_irq_mask_notifier *kimn); | 370 | struct kvm_irq_mask_notifier *kimn); |
367 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | 371 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, |
368 | struct kvm_irq_mask_notifier *kimn); | 372 | struct kvm_irq_mask_notifier *kimn); |
369 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); | 373 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); |
370 | 374 | ||
371 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); | 375 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); |
372 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 376 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
373 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 377 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
374 | struct kvm_irq_ack_notifier *kian); | 378 | struct kvm_irq_ack_notifier *kian); |
375 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | 379 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
376 | struct kvm_irq_ack_notifier *kian); | 380 | struct kvm_irq_ack_notifier *kian); |
377 | int kvm_request_irq_source_id(struct kvm *kvm); | 381 | int kvm_request_irq_source_id(struct kvm *kvm); |
378 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 382 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
379 | 383 | ||
380 | /* For vcpu->arch.iommu_flags */ | 384 | /* For vcpu->arch.iommu_flags */ |
381 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | 385 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 |
382 | 386 | ||
383 | #ifdef CONFIG_IOMMU_API | 387 | #ifdef CONFIG_IOMMU_API |
384 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 388 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
385 | unsigned long npages); | 389 | unsigned long npages); |
386 | int kvm_iommu_map_guest(struct kvm *kvm); | 390 | int kvm_iommu_map_guest(struct kvm *kvm); |
387 | int kvm_iommu_unmap_guest(struct kvm *kvm); | 391 | int kvm_iommu_unmap_guest(struct kvm *kvm); |
388 | int kvm_assign_device(struct kvm *kvm, | 392 | int kvm_assign_device(struct kvm *kvm, |
389 | struct kvm_assigned_dev_kernel *assigned_dev); | 393 | struct kvm_assigned_dev_kernel *assigned_dev); |
390 | int kvm_deassign_device(struct kvm *kvm, | 394 | int kvm_deassign_device(struct kvm *kvm, |
391 | struct kvm_assigned_dev_kernel *assigned_dev); | 395 | struct kvm_assigned_dev_kernel *assigned_dev); |
392 | #else /* CONFIG_IOMMU_API */ | 396 | #else /* CONFIG_IOMMU_API */ |
393 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 397 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
394 | gfn_t base_gfn, | 398 | gfn_t base_gfn, |
395 | unsigned long npages) | 399 | unsigned long npages) |
396 | { | 400 | { |
397 | return 0; | 401 | return 0; |
398 | } | 402 | } |
399 | 403 | ||
400 | static inline int kvm_iommu_map_guest(struct kvm *kvm) | 404 | static inline int kvm_iommu_map_guest(struct kvm *kvm) |
401 | { | 405 | { |
402 | return -ENODEV; | 406 | return -ENODEV; |
403 | } | 407 | } |
404 | 408 | ||
405 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | 409 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) |
406 | { | 410 | { |
407 | return 0; | 411 | return 0; |
408 | } | 412 | } |
409 | 413 | ||
410 | static inline int kvm_assign_device(struct kvm *kvm, | 414 | static inline int kvm_assign_device(struct kvm *kvm, |
411 | struct kvm_assigned_dev_kernel *assigned_dev) | 415 | struct kvm_assigned_dev_kernel *assigned_dev) |
412 | { | 416 | { |
413 | return 0; | 417 | return 0; |
414 | } | 418 | } |
415 | 419 | ||
416 | static inline int kvm_deassign_device(struct kvm *kvm, | 420 | static inline int kvm_deassign_device(struct kvm *kvm, |
417 | struct kvm_assigned_dev_kernel *assigned_dev) | 421 | struct kvm_assigned_dev_kernel *assigned_dev) |
418 | { | 422 | { |
419 | return 0; | 423 | return 0; |
420 | } | 424 | } |
421 | #endif /* CONFIG_IOMMU_API */ | 425 | #endif /* CONFIG_IOMMU_API */ |
422 | 426 | ||
423 | static inline void kvm_guest_enter(void) | 427 | static inline void kvm_guest_enter(void) |
424 | { | 428 | { |
425 | account_system_vtime(current); | 429 | account_system_vtime(current); |
426 | current->flags |= PF_VCPU; | 430 | current->flags |= PF_VCPU; |
427 | } | 431 | } |
428 | 432 | ||
429 | static inline void kvm_guest_exit(void) | 433 | static inline void kvm_guest_exit(void) |
430 | { | 434 | { |
431 | account_system_vtime(current); | 435 | account_system_vtime(current); |
432 | current->flags &= ~PF_VCPU; | 436 | current->flags &= ~PF_VCPU; |
433 | } | 437 | } |
434 | 438 | ||
435 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) | 439 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) |
436 | { | 440 | { |
437 | return slot - kvm->memslots; | 441 | return slot - kvm->memslots; |
438 | } | 442 | } |
439 | 443 | ||
440 | static inline gpa_t gfn_to_gpa(gfn_t gfn) | 444 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
441 | { | 445 | { |
442 | return (gpa_t)gfn << PAGE_SHIFT; | 446 | return (gpa_t)gfn << PAGE_SHIFT; |
443 | } | 447 | } |
444 | 448 | ||
445 | static inline hpa_t pfn_to_hpa(pfn_t pfn) | 449 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
446 | { | 450 | { |
447 | return (hpa_t)pfn << PAGE_SHIFT; | 451 | return (hpa_t)pfn << PAGE_SHIFT; |
448 | } | 452 | } |
449 | 453 | ||
450 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) | 454 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) |
451 | { | 455 | { |
452 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); | 456 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); |
453 | } | 457 | } |
454 | 458 | ||
455 | enum kvm_stat_kind { | 459 | enum kvm_stat_kind { |
456 | KVM_STAT_VM, | 460 | KVM_STAT_VM, |
457 | KVM_STAT_VCPU, | 461 | KVM_STAT_VCPU, |
458 | }; | 462 | }; |
459 | 463 | ||
460 | struct kvm_stats_debugfs_item { | 464 | struct kvm_stats_debugfs_item { |
461 | const char *name; | 465 | const char *name; |
462 | int offset; | 466 | int offset; |
463 | enum kvm_stat_kind kind; | 467 | enum kvm_stat_kind kind; |
464 | struct dentry *dentry; | 468 | struct dentry *dentry; |
465 | }; | 469 | }; |
466 | extern struct kvm_stats_debugfs_item debugfs_entries[]; | 470 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
467 | extern struct dentry *kvm_debugfs_dir; | 471 | extern struct dentry *kvm_debugfs_dir; |
468 | 472 | ||
469 | #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ | 473 | #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ |
470 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 474 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
471 | vcpu, 5, d1, d2, d3, d4, d5) | 475 | vcpu, 5, d1, d2, d3, d4, d5) |
472 | #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ | 476 | #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ |
473 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 477 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
474 | vcpu, 4, d1, d2, d3, d4, 0) | 478 | vcpu, 4, d1, d2, d3, d4, 0) |
475 | #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ | 479 | #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ |
476 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 480 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
477 | vcpu, 3, d1, d2, d3, 0, 0) | 481 | vcpu, 3, d1, d2, d3, 0, 0) |
478 | #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ | 482 | #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ |
479 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 483 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
480 | vcpu, 2, d1, d2, 0, 0, 0) | 484 | vcpu, 2, d1, d2, 0, 0, 0) |
481 | #define KVMTRACE_1D(evt, vcpu, d1, name) \ | 485 | #define KVMTRACE_1D(evt, vcpu, d1, name) \ |
482 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 486 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
483 | vcpu, 1, d1, 0, 0, 0, 0) | 487 | vcpu, 1, d1, 0, 0, 0, 0) |
484 | #define KVMTRACE_0D(evt, vcpu, name) \ | 488 | #define KVMTRACE_0D(evt, vcpu, name) \ |
485 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 489 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
486 | vcpu, 0, 0, 0, 0, 0, 0) | 490 | vcpu, 0, 0, 0, 0, 0, 0) |
487 | 491 | ||
488 | #ifdef CONFIG_KVM_TRACE | 492 | #ifdef CONFIG_KVM_TRACE |
489 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); | 493 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); |
490 | void kvm_trace_cleanup(void); | 494 | void kvm_trace_cleanup(void); |
491 | #else | 495 | #else |
492 | static inline | 496 | static inline |
493 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) | 497 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) |
494 | { | 498 | { |
495 | return -EINVAL; | 499 | return -EINVAL; |
496 | } | 500 | } |
497 | #define kvm_trace_cleanup() ((void)0) | 501 | #define kvm_trace_cleanup() ((void)0) |
498 | #endif | 502 | #endif |
499 | 503 | ||
500 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER | 504 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
501 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) | 505 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) |
502 | { | 506 | { |
503 | if (unlikely(vcpu->kvm->mmu_notifier_count)) | 507 | if (unlikely(vcpu->kvm->mmu_notifier_count)) |
504 | return 1; | 508 | return 1; |
505 | /* | 509 | /* |
506 | * Both reads happen under the mmu_lock and both values are | 510 | * Both reads happen under the mmu_lock and both values are |
507 | * modified under mmu_lock, so there's no need of smb_rmb() | 511 | * modified under mmu_lock, so there's no need of smb_rmb() |
508 | * here in between, otherwise mmu_notifier_count should be | 512 | * here in between, otherwise mmu_notifier_count should be |
509 | * read before mmu_notifier_seq, see | 513 | * read before mmu_notifier_seq, see |
510 | * mmu_notifier_invalidate_range_end write side. | 514 | * mmu_notifier_invalidate_range_end write side. |
511 | */ | 515 | */ |
512 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) | 516 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) |
513 | return 1; | 517 | return 1; |
514 | return 0; | 518 | return 0; |
515 | } | 519 | } |
516 | #endif | 520 | #endif |
517 | 521 | ||
518 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 522 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
519 | 523 | ||
520 | #define KVM_MAX_IRQ_ROUTES 1024 | 524 | #define KVM_MAX_IRQ_ROUTES 1024 |
521 | 525 | ||
522 | int kvm_setup_default_irq_routing(struct kvm *kvm); | 526 | int kvm_setup_default_irq_routing(struct kvm *kvm); |
523 | int kvm_set_irq_routing(struct kvm *kvm, | 527 | int kvm_set_irq_routing(struct kvm *kvm, |
524 | const struct kvm_irq_routing_entry *entries, | 528 | const struct kvm_irq_routing_entry *entries, |
525 | unsigned nr, | 529 | unsigned nr, |
526 | unsigned flags); | 530 | unsigned flags); |
527 | void kvm_free_irq_routing(struct kvm *kvm); | 531 | void kvm_free_irq_routing(struct kvm *kvm); |
528 | 532 | ||
529 | #else | 533 | #else |
530 | 534 | ||
531 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} | 535 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} |
532 | 536 | ||
533 | #endif | 537 | #endif |
534 | 538 | ||
535 | #ifdef CONFIG_HAVE_KVM_EVENTFD | 539 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
536 | 540 | ||
537 | void kvm_irqfd_init(struct kvm *kvm); | 541 | void kvm_irqfd_init(struct kvm *kvm); |
538 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); | 542 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); |
539 | void kvm_irqfd_release(struct kvm *kvm); | 543 | void kvm_irqfd_release(struct kvm *kvm); |
540 | 544 | ||
541 | #else | 545 | #else |
542 | 546 | ||
543 | static inline void kvm_irqfd_init(struct kvm *kvm) {} | 547 | static inline void kvm_irqfd_init(struct kvm *kvm) {} |
544 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) | 548 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) |
545 | { | 549 | { |
546 | return -EINVAL; | 550 | return -EINVAL; |
547 | } | 551 | } |
548 | 552 | ||
549 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | 553 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
550 | 554 | ||
551 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | 555 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
552 | 556 | ||
557 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
553 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) | 558 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) |
554 | { | 559 | { |
555 | return vcpu->kvm->bsp_vcpu == vcpu; | 560 | return vcpu->kvm->bsp_vcpu == vcpu; |
556 | } | 561 | } |
562 | #endif | ||
557 | #endif | 563 | #endif |
558 | 564 |
virt/kvm/Kconfig
1 | # KVM common configuration items and defaults | 1 | # KVM common configuration items and defaults |
2 | 2 | ||
3 | config HAVE_KVM | 3 | config HAVE_KVM |
4 | bool | 4 | bool |
5 | 5 | ||
6 | config HAVE_KVM_IRQCHIP | 6 | config HAVE_KVM_IRQCHIP |
7 | bool | 7 | bool |
8 | 8 | ||
9 | config HAVE_KVM_EVENTFD | 9 | config HAVE_KVM_EVENTFD |
10 | bool | 10 | bool |
11 | select EVENTFD | 11 | select EVENTFD |
12 | |||
13 | config KVM_APIC_ARCHITECTURE | ||
14 | bool | ||
12 | 15 |
virt/kvm/kvm_main.c
1 | /* | 1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | 2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | 3 | * |
4 | * This module enables machines with Intel VT-x extensions to run virtual | 4 | * This module enables machines with Intel VT-x extensions to run virtual |
5 | * machines without emulation or binary translation. | 5 | * machines without emulation or binary translation. |
6 | * | 6 | * |
7 | * Copyright (C) 2006 Qumranet, Inc. | 7 | * Copyright (C) 2006 Qumranet, Inc. |
8 | * | 8 | * |
9 | * Authors: | 9 | * Authors: |
10 | * Avi Kivity <avi@qumranet.com> | 10 | * Avi Kivity <avi@qumranet.com> |
11 | * Yaniv Kamay <yaniv@qumranet.com> | 11 | * Yaniv Kamay <yaniv@qumranet.com> |
12 | * | 12 | * |
13 | * This work is licensed under the terms of the GNU GPL, version 2. See | 13 | * This work is licensed under the terms of the GNU GPL, version 2. See |
14 | * the COPYING file in the top-level directory. | 14 | * the COPYING file in the top-level directory. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "iodev.h" | 18 | #include "iodev.h" |
19 | 19 | ||
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/kvm.h> | 21 | #include <linux/kvm.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/miscdevice.h> | 27 | #include <linux/miscdevice.h> |
28 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
29 | #include <linux/reboot.h> | 29 | #include <linux/reboot.h> |
30 | #include <linux/debugfs.h> | 30 | #include <linux/debugfs.h> |
31 | #include <linux/highmem.h> | 31 | #include <linux/highmem.h> |
32 | #include <linux/file.h> | 32 | #include <linux/file.h> |
33 | #include <linux/sysdev.h> | 33 | #include <linux/sysdev.h> |
34 | #include <linux/cpu.h> | 34 | #include <linux/cpu.h> |
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/cpumask.h> | 36 | #include <linux/cpumask.h> |
37 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
38 | #include <linux/anon_inodes.h> | 38 | #include <linux/anon_inodes.h> |
39 | #include <linux/profile.h> | 39 | #include <linux/profile.h> |
40 | #include <linux/kvm_para.h> | 40 | #include <linux/kvm_para.h> |
41 | #include <linux/pagemap.h> | 41 | #include <linux/pagemap.h> |
42 | #include <linux/mman.h> | 42 | #include <linux/mman.h> |
43 | #include <linux/swap.h> | 43 | #include <linux/swap.h> |
44 | #include <linux/bitops.h> | 44 | #include <linux/bitops.h> |
45 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
46 | 46 | ||
47 | #include <asm/processor.h> | 47 | #include <asm/processor.h> |
48 | #include <asm/io.h> | 48 | #include <asm/io.h> |
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | #include <asm/pgtable.h> | 50 | #include <asm/pgtable.h> |
51 | 51 | ||
52 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 52 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
53 | #include "coalesced_mmio.h" | 53 | #include "coalesced_mmio.h" |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | 56 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT |
57 | #include <linux/pci.h> | 57 | #include <linux/pci.h> |
58 | #include <linux/interrupt.h> | 58 | #include <linux/interrupt.h> |
59 | #include "irq.h" | 59 | #include "irq.h" |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | MODULE_AUTHOR("Qumranet"); | 62 | MODULE_AUTHOR("Qumranet"); |
63 | MODULE_LICENSE("GPL"); | 63 | MODULE_LICENSE("GPL"); |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Ordering of locks: | 66 | * Ordering of locks: |
67 | * | 67 | * |
68 | * kvm->lock --> kvm->irq_lock | 68 | * kvm->lock --> kvm->irq_lock |
69 | */ | 69 | */ |
70 | 70 | ||
71 | DEFINE_SPINLOCK(kvm_lock); | 71 | DEFINE_SPINLOCK(kvm_lock); |
72 | LIST_HEAD(vm_list); | 72 | LIST_HEAD(vm_list); |
73 | 73 | ||
74 | static cpumask_var_t cpus_hardware_enabled; | 74 | static cpumask_var_t cpus_hardware_enabled; |
75 | 75 | ||
76 | struct kmem_cache *kvm_vcpu_cache; | 76 | struct kmem_cache *kvm_vcpu_cache; |
77 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | 77 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); |
78 | 78 | ||
79 | static __read_mostly struct preempt_ops kvm_preempt_ops; | 79 | static __read_mostly struct preempt_ops kvm_preempt_ops; |
80 | 80 | ||
81 | struct dentry *kvm_debugfs_dir; | 81 | struct dentry *kvm_debugfs_dir; |
82 | 82 | ||
83 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 83 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
84 | unsigned long arg); | 84 | unsigned long arg); |
85 | 85 | ||
86 | static bool kvm_rebooting; | 86 | static bool kvm_rebooting; |
87 | 87 | ||
88 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | 88 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT |
89 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | 89 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, |
90 | int assigned_dev_id) | 90 | int assigned_dev_id) |
91 | { | 91 | { |
92 | struct list_head *ptr; | 92 | struct list_head *ptr; |
93 | struct kvm_assigned_dev_kernel *match; | 93 | struct kvm_assigned_dev_kernel *match; |
94 | 94 | ||
95 | list_for_each(ptr, head) { | 95 | list_for_each(ptr, head) { |
96 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | 96 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); |
97 | if (match->assigned_dev_id == assigned_dev_id) | 97 | if (match->assigned_dev_id == assigned_dev_id) |
98 | return match; | 98 | return match; |
99 | } | 99 | } |
100 | return NULL; | 100 | return NULL; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | 103 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel |
104 | *assigned_dev, int irq) | 104 | *assigned_dev, int irq) |
105 | { | 105 | { |
106 | int i, index; | 106 | int i, index; |
107 | struct msix_entry *host_msix_entries; | 107 | struct msix_entry *host_msix_entries; |
108 | 108 | ||
109 | host_msix_entries = assigned_dev->host_msix_entries; | 109 | host_msix_entries = assigned_dev->host_msix_entries; |
110 | 110 | ||
111 | index = -1; | 111 | index = -1; |
112 | for (i = 0; i < assigned_dev->entries_nr; i++) | 112 | for (i = 0; i < assigned_dev->entries_nr; i++) |
113 | if (irq == host_msix_entries[i].vector) { | 113 | if (irq == host_msix_entries[i].vector) { |
114 | index = i; | 114 | index = i; |
115 | break; | 115 | break; |
116 | } | 116 | } |
117 | if (index < 0) { | 117 | if (index < 0) { |
118 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | 118 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); |
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | return index; | 122 | return index; |
123 | } | 123 | } |
124 | 124 | ||
125 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | 125 | static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) |
126 | { | 126 | { |
127 | struct kvm_assigned_dev_kernel *assigned_dev; | 127 | struct kvm_assigned_dev_kernel *assigned_dev; |
128 | struct kvm *kvm; | 128 | struct kvm *kvm; |
129 | int i; | 129 | int i; |
130 | 130 | ||
131 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, | 131 | assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, |
132 | interrupt_work); | 132 | interrupt_work); |
133 | kvm = assigned_dev->kvm; | 133 | kvm = assigned_dev->kvm; |
134 | 134 | ||
135 | mutex_lock(&kvm->irq_lock); | 135 | mutex_lock(&kvm->irq_lock); |
136 | spin_lock_irq(&assigned_dev->assigned_dev_lock); | 136 | spin_lock_irq(&assigned_dev->assigned_dev_lock); |
137 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | 137 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { |
138 | struct kvm_guest_msix_entry *guest_entries = | 138 | struct kvm_guest_msix_entry *guest_entries = |
139 | assigned_dev->guest_msix_entries; | 139 | assigned_dev->guest_msix_entries; |
140 | for (i = 0; i < assigned_dev->entries_nr; i++) { | 140 | for (i = 0; i < assigned_dev->entries_nr; i++) { |
141 | if (!(guest_entries[i].flags & | 141 | if (!(guest_entries[i].flags & |
142 | KVM_ASSIGNED_MSIX_PENDING)) | 142 | KVM_ASSIGNED_MSIX_PENDING)) |
143 | continue; | 143 | continue; |
144 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; | 144 | guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; |
145 | kvm_set_irq(assigned_dev->kvm, | 145 | kvm_set_irq(assigned_dev->kvm, |
146 | assigned_dev->irq_source_id, | 146 | assigned_dev->irq_source_id, |
147 | guest_entries[i].vector, 1); | 147 | guest_entries[i].vector, 1); |
148 | } | 148 | } |
149 | } else | 149 | } else |
150 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 150 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
151 | assigned_dev->guest_irq, 1); | 151 | assigned_dev->guest_irq, 1); |
152 | 152 | ||
153 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); | 153 | spin_unlock_irq(&assigned_dev->assigned_dev_lock); |
154 | mutex_unlock(&assigned_dev->kvm->irq_lock); | 154 | mutex_unlock(&assigned_dev->kvm->irq_lock); |
155 | } | 155 | } |
156 | 156 | ||
157 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | 157 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) |
158 | { | 158 | { |
159 | unsigned long flags; | 159 | unsigned long flags; |
160 | struct kvm_assigned_dev_kernel *assigned_dev = | 160 | struct kvm_assigned_dev_kernel *assigned_dev = |
161 | (struct kvm_assigned_dev_kernel *) dev_id; | 161 | (struct kvm_assigned_dev_kernel *) dev_id; |
162 | 162 | ||
163 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); | 163 | spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); |
164 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | 164 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { |
165 | int index = find_index_from_host_irq(assigned_dev, irq); | 165 | int index = find_index_from_host_irq(assigned_dev, irq); |
166 | if (index < 0) | 166 | if (index < 0) |
167 | goto out; | 167 | goto out; |
168 | assigned_dev->guest_msix_entries[index].flags |= | 168 | assigned_dev->guest_msix_entries[index].flags |= |
169 | KVM_ASSIGNED_MSIX_PENDING; | 169 | KVM_ASSIGNED_MSIX_PENDING; |
170 | } | 170 | } |
171 | 171 | ||
172 | schedule_work(&assigned_dev->interrupt_work); | 172 | schedule_work(&assigned_dev->interrupt_work); |
173 | 173 | ||
174 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | 174 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { |
175 | disable_irq_nosync(irq); | 175 | disable_irq_nosync(irq); |
176 | assigned_dev->host_irq_disabled = true; | 176 | assigned_dev->host_irq_disabled = true; |
177 | } | 177 | } |
178 | 178 | ||
179 | out: | 179 | out: |
180 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); | 180 | spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); |
181 | return IRQ_HANDLED; | 181 | return IRQ_HANDLED; |
182 | } | 182 | } |
183 | 183 | ||
184 | /* Ack the irq line for an assigned device */ | 184 | /* Ack the irq line for an assigned device */ |
185 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | 185 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) |
186 | { | 186 | { |
187 | struct kvm_assigned_dev_kernel *dev; | 187 | struct kvm_assigned_dev_kernel *dev; |
188 | unsigned long flags; | 188 | unsigned long flags; |
189 | 189 | ||
190 | if (kian->gsi == -1) | 190 | if (kian->gsi == -1) |
191 | return; | 191 | return; |
192 | 192 | ||
193 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | 193 | dev = container_of(kian, struct kvm_assigned_dev_kernel, |
194 | ack_notifier); | 194 | ack_notifier); |
195 | 195 | ||
196 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | 196 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); |
197 | 197 | ||
198 | /* The guest irq may be shared so this ack may be | 198 | /* The guest irq may be shared so this ack may be |
199 | * from another device. | 199 | * from another device. |
200 | */ | 200 | */ |
201 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); | 201 | spin_lock_irqsave(&dev->assigned_dev_lock, flags); |
202 | if (dev->host_irq_disabled) { | 202 | if (dev->host_irq_disabled) { |
203 | enable_irq(dev->host_irq); | 203 | enable_irq(dev->host_irq); |
204 | dev->host_irq_disabled = false; | 204 | dev->host_irq_disabled = false; |
205 | } | 205 | } |
206 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); | 206 | spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); |
207 | } | 207 | } |
208 | 208 | ||
209 | static void deassign_guest_irq(struct kvm *kvm, | 209 | static void deassign_guest_irq(struct kvm *kvm, |
210 | struct kvm_assigned_dev_kernel *assigned_dev) | 210 | struct kvm_assigned_dev_kernel *assigned_dev) |
211 | { | 211 | { |
212 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | 212 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); |
213 | assigned_dev->ack_notifier.gsi = -1; | 213 | assigned_dev->ack_notifier.gsi = -1; |
214 | 214 | ||
215 | if (assigned_dev->irq_source_id != -1) | 215 | if (assigned_dev->irq_source_id != -1) |
216 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | 216 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); |
217 | assigned_dev->irq_source_id = -1; | 217 | assigned_dev->irq_source_id = -1; |
218 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | 218 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); |
219 | } | 219 | } |
220 | 220 | ||
221 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | 221 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ |
222 | static void deassign_host_irq(struct kvm *kvm, | 222 | static void deassign_host_irq(struct kvm *kvm, |
223 | struct kvm_assigned_dev_kernel *assigned_dev) | 223 | struct kvm_assigned_dev_kernel *assigned_dev) |
224 | { | 224 | { |
225 | /* | 225 | /* |
226 | * In kvm_free_device_irq, cancel_work_sync return true if: | 226 | * In kvm_free_device_irq, cancel_work_sync return true if: |
227 | * 1. work is scheduled, and then cancelled. | 227 | * 1. work is scheduled, and then cancelled. |
228 | * 2. work callback is executed. | 228 | * 2. work callback is executed. |
229 | * | 229 | * |
230 | * The first one ensured that the irq is disabled and no more events | 230 | * The first one ensured that the irq is disabled and no more events |
231 | * would happen. But for the second one, the irq may be enabled (e.g. | 231 | * would happen. But for the second one, the irq may be enabled (e.g. |
232 | * for MSI). So we disable irq here to prevent further events. | 232 | * for MSI). So we disable irq here to prevent further events. |
233 | * | 233 | * |
234 | * Notice this maybe result in nested disable if the interrupt type is | 234 | * Notice this maybe result in nested disable if the interrupt type is |
235 | * INTx, but it's OK for we are going to free it. | 235 | * INTx, but it's OK for we are going to free it. |
236 | * | 236 | * |
237 | * If this function is a part of VM destroy, please ensure that till | 237 | * If this function is a part of VM destroy, please ensure that till |
238 | * now, the kvm state is still legal for probably we also have to wait | 238 | * now, the kvm state is still legal for probably we also have to wait |
239 | * interrupt_work done. | 239 | * interrupt_work done. |
240 | */ | 240 | */ |
241 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | 241 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { |
242 | int i; | 242 | int i; |
243 | for (i = 0; i < assigned_dev->entries_nr; i++) | 243 | for (i = 0; i < assigned_dev->entries_nr; i++) |
244 | disable_irq_nosync(assigned_dev-> | 244 | disable_irq_nosync(assigned_dev-> |
245 | host_msix_entries[i].vector); | 245 | host_msix_entries[i].vector); |
246 | 246 | ||
247 | cancel_work_sync(&assigned_dev->interrupt_work); | 247 | cancel_work_sync(&assigned_dev->interrupt_work); |
248 | 248 | ||
249 | for (i = 0; i < assigned_dev->entries_nr; i++) | 249 | for (i = 0; i < assigned_dev->entries_nr; i++) |
250 | free_irq(assigned_dev->host_msix_entries[i].vector, | 250 | free_irq(assigned_dev->host_msix_entries[i].vector, |
251 | (void *)assigned_dev); | 251 | (void *)assigned_dev); |
252 | 252 | ||
253 | assigned_dev->entries_nr = 0; | 253 | assigned_dev->entries_nr = 0; |
254 | kfree(assigned_dev->host_msix_entries); | 254 | kfree(assigned_dev->host_msix_entries); |
255 | kfree(assigned_dev->guest_msix_entries); | 255 | kfree(assigned_dev->guest_msix_entries); |
256 | pci_disable_msix(assigned_dev->dev); | 256 | pci_disable_msix(assigned_dev->dev); |
257 | } else { | 257 | } else { |
258 | /* Deal with MSI and INTx */ | 258 | /* Deal with MSI and INTx */ |
259 | disable_irq_nosync(assigned_dev->host_irq); | 259 | disable_irq_nosync(assigned_dev->host_irq); |
260 | cancel_work_sync(&assigned_dev->interrupt_work); | 260 | cancel_work_sync(&assigned_dev->interrupt_work); |
261 | 261 | ||
262 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | 262 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); |
263 | 263 | ||
264 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | 264 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) |
265 | pci_disable_msi(assigned_dev->dev); | 265 | pci_disable_msi(assigned_dev->dev); |
266 | } | 266 | } |
267 | 267 | ||
268 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | 268 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); |
269 | } | 269 | } |
270 | 270 | ||
271 | static int kvm_deassign_irq(struct kvm *kvm, | 271 | static int kvm_deassign_irq(struct kvm *kvm, |
272 | struct kvm_assigned_dev_kernel *assigned_dev, | 272 | struct kvm_assigned_dev_kernel *assigned_dev, |
273 | unsigned long irq_requested_type) | 273 | unsigned long irq_requested_type) |
274 | { | 274 | { |
275 | unsigned long guest_irq_type, host_irq_type; | 275 | unsigned long guest_irq_type, host_irq_type; |
276 | 276 | ||
277 | if (!irqchip_in_kernel(kvm)) | 277 | if (!irqchip_in_kernel(kvm)) |
278 | return -EINVAL; | 278 | return -EINVAL; |
279 | /* no irq assignment to deassign */ | 279 | /* no irq assignment to deassign */ |
280 | if (!assigned_dev->irq_requested_type) | 280 | if (!assigned_dev->irq_requested_type) |
281 | return -ENXIO; | 281 | return -ENXIO; |
282 | 282 | ||
283 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | 283 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; |
284 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | 284 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; |
285 | 285 | ||
286 | if (host_irq_type) | 286 | if (host_irq_type) |
287 | deassign_host_irq(kvm, assigned_dev); | 287 | deassign_host_irq(kvm, assigned_dev); |
288 | if (guest_irq_type) | 288 | if (guest_irq_type) |
289 | deassign_guest_irq(kvm, assigned_dev); | 289 | deassign_guest_irq(kvm, assigned_dev); |
290 | 290 | ||
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
294 | static void kvm_free_assigned_irq(struct kvm *kvm, | 294 | static void kvm_free_assigned_irq(struct kvm *kvm, |
295 | struct kvm_assigned_dev_kernel *assigned_dev) | 295 | struct kvm_assigned_dev_kernel *assigned_dev) |
296 | { | 296 | { |
297 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | 297 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); |
298 | } | 298 | } |
299 | 299 | ||
300 | static void kvm_free_assigned_device(struct kvm *kvm, | 300 | static void kvm_free_assigned_device(struct kvm *kvm, |
301 | struct kvm_assigned_dev_kernel | 301 | struct kvm_assigned_dev_kernel |
302 | *assigned_dev) | 302 | *assigned_dev) |
303 | { | 303 | { |
304 | kvm_free_assigned_irq(kvm, assigned_dev); | 304 | kvm_free_assigned_irq(kvm, assigned_dev); |
305 | 305 | ||
306 | pci_reset_function(assigned_dev->dev); | 306 | pci_reset_function(assigned_dev->dev); |
307 | 307 | ||
308 | pci_release_regions(assigned_dev->dev); | 308 | pci_release_regions(assigned_dev->dev); |
309 | pci_disable_device(assigned_dev->dev); | 309 | pci_disable_device(assigned_dev->dev); |
310 | pci_dev_put(assigned_dev->dev); | 310 | pci_dev_put(assigned_dev->dev); |
311 | 311 | ||
312 | list_del(&assigned_dev->list); | 312 | list_del(&assigned_dev->list); |
313 | kfree(assigned_dev); | 313 | kfree(assigned_dev); |
314 | } | 314 | } |
315 | 315 | ||
316 | void kvm_free_all_assigned_devices(struct kvm *kvm) | 316 | void kvm_free_all_assigned_devices(struct kvm *kvm) |
317 | { | 317 | { |
318 | struct list_head *ptr, *ptr2; | 318 | struct list_head *ptr, *ptr2; |
319 | struct kvm_assigned_dev_kernel *assigned_dev; | 319 | struct kvm_assigned_dev_kernel *assigned_dev; |
320 | 320 | ||
321 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | 321 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { |
322 | assigned_dev = list_entry(ptr, | 322 | assigned_dev = list_entry(ptr, |
323 | struct kvm_assigned_dev_kernel, | 323 | struct kvm_assigned_dev_kernel, |
324 | list); | 324 | list); |
325 | 325 | ||
326 | kvm_free_assigned_device(kvm, assigned_dev); | 326 | kvm_free_assigned_device(kvm, assigned_dev); |
327 | } | 327 | } |
328 | } | 328 | } |
329 | 329 | ||
330 | static int assigned_device_enable_host_intx(struct kvm *kvm, | 330 | static int assigned_device_enable_host_intx(struct kvm *kvm, |
331 | struct kvm_assigned_dev_kernel *dev) | 331 | struct kvm_assigned_dev_kernel *dev) |
332 | { | 332 | { |
333 | dev->host_irq = dev->dev->irq; | 333 | dev->host_irq = dev->dev->irq; |
334 | /* Even though this is PCI, we don't want to use shared | 334 | /* Even though this is PCI, we don't want to use shared |
335 | * interrupts. Sharing host devices with guest-assigned devices | 335 | * interrupts. Sharing host devices with guest-assigned devices |
336 | * on the same interrupt line is not a happy situation: there | 336 | * on the same interrupt line is not a happy situation: there |
337 | * are going to be long delays in accepting, acking, etc. | 337 | * are going to be long delays in accepting, acking, etc. |
338 | */ | 338 | */ |
339 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, | 339 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, |
340 | 0, "kvm_assigned_intx_device", (void *)dev)) | 340 | 0, "kvm_assigned_intx_device", (void *)dev)) |
341 | return -EIO; | 341 | return -EIO; |
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
345 | #ifdef __KVM_HAVE_MSI | 345 | #ifdef __KVM_HAVE_MSI |
346 | static int assigned_device_enable_host_msi(struct kvm *kvm, | 346 | static int assigned_device_enable_host_msi(struct kvm *kvm, |
347 | struct kvm_assigned_dev_kernel *dev) | 347 | struct kvm_assigned_dev_kernel *dev) |
348 | { | 348 | { |
349 | int r; | 349 | int r; |
350 | 350 | ||
351 | if (!dev->dev->msi_enabled) { | 351 | if (!dev->dev->msi_enabled) { |
352 | r = pci_enable_msi(dev->dev); | 352 | r = pci_enable_msi(dev->dev); |
353 | if (r) | 353 | if (r) |
354 | return r; | 354 | return r; |
355 | } | 355 | } |
356 | 356 | ||
357 | dev->host_irq = dev->dev->irq; | 357 | dev->host_irq = dev->dev->irq; |
358 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, | 358 | if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, |
359 | "kvm_assigned_msi_device", (void *)dev)) { | 359 | "kvm_assigned_msi_device", (void *)dev)) { |
360 | pci_disable_msi(dev->dev); | 360 | pci_disable_msi(dev->dev); |
361 | return -EIO; | 361 | return -EIO; |
362 | } | 362 | } |
363 | 363 | ||
364 | return 0; | 364 | return 0; |
365 | } | 365 | } |
366 | #endif | 366 | #endif |
367 | 367 | ||
368 | #ifdef __KVM_HAVE_MSIX | 368 | #ifdef __KVM_HAVE_MSIX |
369 | static int assigned_device_enable_host_msix(struct kvm *kvm, | 369 | static int assigned_device_enable_host_msix(struct kvm *kvm, |
370 | struct kvm_assigned_dev_kernel *dev) | 370 | struct kvm_assigned_dev_kernel *dev) |
371 | { | 371 | { |
372 | int i, r = -EINVAL; | 372 | int i, r = -EINVAL; |
373 | 373 | ||
374 | /* host_msix_entries and guest_msix_entries should have been | 374 | /* host_msix_entries and guest_msix_entries should have been |
375 | * initialized */ | 375 | * initialized */ |
376 | if (dev->entries_nr == 0) | 376 | if (dev->entries_nr == 0) |
377 | return r; | 377 | return r; |
378 | 378 | ||
379 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | 379 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); |
380 | if (r) | 380 | if (r) |
381 | return r; | 381 | return r; |
382 | 382 | ||
383 | for (i = 0; i < dev->entries_nr; i++) { | 383 | for (i = 0; i < dev->entries_nr; i++) { |
384 | r = request_irq(dev->host_msix_entries[i].vector, | 384 | r = request_irq(dev->host_msix_entries[i].vector, |
385 | kvm_assigned_dev_intr, 0, | 385 | kvm_assigned_dev_intr, 0, |
386 | "kvm_assigned_msix_device", | 386 | "kvm_assigned_msix_device", |
387 | (void *)dev); | 387 | (void *)dev); |
388 | /* FIXME: free requested_irq's on failure */ | 388 | /* FIXME: free requested_irq's on failure */ |
389 | if (r) | 389 | if (r) |
390 | return r; | 390 | return r; |
391 | } | 391 | } |
392 | 392 | ||
393 | return 0; | 393 | return 0; |
394 | } | 394 | } |
395 | 395 | ||
396 | #endif | 396 | #endif |
397 | 397 | ||
398 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | 398 | static int assigned_device_enable_guest_intx(struct kvm *kvm, |
399 | struct kvm_assigned_dev_kernel *dev, | 399 | struct kvm_assigned_dev_kernel *dev, |
400 | struct kvm_assigned_irq *irq) | 400 | struct kvm_assigned_irq *irq) |
401 | { | 401 | { |
402 | dev->guest_irq = irq->guest_irq; | 402 | dev->guest_irq = irq->guest_irq; |
403 | dev->ack_notifier.gsi = irq->guest_irq; | 403 | dev->ack_notifier.gsi = irq->guest_irq; |
404 | return 0; | 404 | return 0; |
405 | } | 405 | } |
406 | 406 | ||
407 | #ifdef __KVM_HAVE_MSI | 407 | #ifdef __KVM_HAVE_MSI |
408 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | 408 | static int assigned_device_enable_guest_msi(struct kvm *kvm, |
409 | struct kvm_assigned_dev_kernel *dev, | 409 | struct kvm_assigned_dev_kernel *dev, |
410 | struct kvm_assigned_irq *irq) | 410 | struct kvm_assigned_irq *irq) |
411 | { | 411 | { |
412 | dev->guest_irq = irq->guest_irq; | 412 | dev->guest_irq = irq->guest_irq; |
413 | dev->ack_notifier.gsi = -1; | 413 | dev->ack_notifier.gsi = -1; |
414 | dev->host_irq_disabled = false; | 414 | dev->host_irq_disabled = false; |
415 | return 0; | 415 | return 0; |
416 | } | 416 | } |
417 | #endif | 417 | #endif |
418 | #ifdef __KVM_HAVE_MSIX | 418 | #ifdef __KVM_HAVE_MSIX |
419 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | 419 | static int assigned_device_enable_guest_msix(struct kvm *kvm, |
420 | struct kvm_assigned_dev_kernel *dev, | 420 | struct kvm_assigned_dev_kernel *dev, |
421 | struct kvm_assigned_irq *irq) | 421 | struct kvm_assigned_irq *irq) |
422 | { | 422 | { |
423 | dev->guest_irq = irq->guest_irq; | 423 | dev->guest_irq = irq->guest_irq; |
424 | dev->ack_notifier.gsi = -1; | 424 | dev->ack_notifier.gsi = -1; |
425 | dev->host_irq_disabled = false; | 425 | dev->host_irq_disabled = false; |
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | #endif | 428 | #endif |
429 | 429 | ||
430 | static int assign_host_irq(struct kvm *kvm, | 430 | static int assign_host_irq(struct kvm *kvm, |
431 | struct kvm_assigned_dev_kernel *dev, | 431 | struct kvm_assigned_dev_kernel *dev, |
432 | __u32 host_irq_type) | 432 | __u32 host_irq_type) |
433 | { | 433 | { |
434 | int r = -EEXIST; | 434 | int r = -EEXIST; |
435 | 435 | ||
436 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | 436 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) |
437 | return r; | 437 | return r; |
438 | 438 | ||
439 | switch (host_irq_type) { | 439 | switch (host_irq_type) { |
440 | case KVM_DEV_IRQ_HOST_INTX: | 440 | case KVM_DEV_IRQ_HOST_INTX: |
441 | r = assigned_device_enable_host_intx(kvm, dev); | 441 | r = assigned_device_enable_host_intx(kvm, dev); |
442 | break; | 442 | break; |
443 | #ifdef __KVM_HAVE_MSI | 443 | #ifdef __KVM_HAVE_MSI |
444 | case KVM_DEV_IRQ_HOST_MSI: | 444 | case KVM_DEV_IRQ_HOST_MSI: |
445 | r = assigned_device_enable_host_msi(kvm, dev); | 445 | r = assigned_device_enable_host_msi(kvm, dev); |
446 | break; | 446 | break; |
447 | #endif | 447 | #endif |
448 | #ifdef __KVM_HAVE_MSIX | 448 | #ifdef __KVM_HAVE_MSIX |
449 | case KVM_DEV_IRQ_HOST_MSIX: | 449 | case KVM_DEV_IRQ_HOST_MSIX: |
450 | r = assigned_device_enable_host_msix(kvm, dev); | 450 | r = assigned_device_enable_host_msix(kvm, dev); |
451 | break; | 451 | break; |
452 | #endif | 452 | #endif |
453 | default: | 453 | default: |
454 | r = -EINVAL; | 454 | r = -EINVAL; |
455 | } | 455 | } |
456 | 456 | ||
457 | if (!r) | 457 | if (!r) |
458 | dev->irq_requested_type |= host_irq_type; | 458 | dev->irq_requested_type |= host_irq_type; |
459 | 459 | ||
460 | return r; | 460 | return r; |
461 | } | 461 | } |
462 | 462 | ||
463 | static int assign_guest_irq(struct kvm *kvm, | 463 | static int assign_guest_irq(struct kvm *kvm, |
464 | struct kvm_assigned_dev_kernel *dev, | 464 | struct kvm_assigned_dev_kernel *dev, |
465 | struct kvm_assigned_irq *irq, | 465 | struct kvm_assigned_irq *irq, |
466 | unsigned long guest_irq_type) | 466 | unsigned long guest_irq_type) |
467 | { | 467 | { |
468 | int id; | 468 | int id; |
469 | int r = -EEXIST; | 469 | int r = -EEXIST; |
470 | 470 | ||
471 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | 471 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) |
472 | return r; | 472 | return r; |
473 | 473 | ||
474 | id = kvm_request_irq_source_id(kvm); | 474 | id = kvm_request_irq_source_id(kvm); |
475 | if (id < 0) | 475 | if (id < 0) |
476 | return id; | 476 | return id; |
477 | 477 | ||
478 | dev->irq_source_id = id; | 478 | dev->irq_source_id = id; |
479 | 479 | ||
480 | switch (guest_irq_type) { | 480 | switch (guest_irq_type) { |
481 | case KVM_DEV_IRQ_GUEST_INTX: | 481 | case KVM_DEV_IRQ_GUEST_INTX: |
482 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | 482 | r = assigned_device_enable_guest_intx(kvm, dev, irq); |
483 | break; | 483 | break; |
484 | #ifdef __KVM_HAVE_MSI | 484 | #ifdef __KVM_HAVE_MSI |
485 | case KVM_DEV_IRQ_GUEST_MSI: | 485 | case KVM_DEV_IRQ_GUEST_MSI: |
486 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | 486 | r = assigned_device_enable_guest_msi(kvm, dev, irq); |
487 | break; | 487 | break; |
488 | #endif | 488 | #endif |
489 | #ifdef __KVM_HAVE_MSIX | 489 | #ifdef __KVM_HAVE_MSIX |
490 | case KVM_DEV_IRQ_GUEST_MSIX: | 490 | case KVM_DEV_IRQ_GUEST_MSIX: |
491 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | 491 | r = assigned_device_enable_guest_msix(kvm, dev, irq); |
492 | break; | 492 | break; |
493 | #endif | 493 | #endif |
494 | default: | 494 | default: |
495 | r = -EINVAL; | 495 | r = -EINVAL; |
496 | } | 496 | } |
497 | 497 | ||
498 | if (!r) { | 498 | if (!r) { |
499 | dev->irq_requested_type |= guest_irq_type; | 499 | dev->irq_requested_type |= guest_irq_type; |
500 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | 500 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); |
501 | } else | 501 | } else |
502 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | 502 | kvm_free_irq_source_id(kvm, dev->irq_source_id); |
503 | 503 | ||
504 | return r; | 504 | return r; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | 507 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ |
508 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | 508 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, |
509 | struct kvm_assigned_irq *assigned_irq) | 509 | struct kvm_assigned_irq *assigned_irq) |
510 | { | 510 | { |
511 | int r = -EINVAL; | 511 | int r = -EINVAL; |
512 | struct kvm_assigned_dev_kernel *match; | 512 | struct kvm_assigned_dev_kernel *match; |
513 | unsigned long host_irq_type, guest_irq_type; | 513 | unsigned long host_irq_type, guest_irq_type; |
514 | 514 | ||
515 | if (!capable(CAP_SYS_RAWIO)) | 515 | if (!capable(CAP_SYS_RAWIO)) |
516 | return -EPERM; | 516 | return -EPERM; |
517 | 517 | ||
518 | if (!irqchip_in_kernel(kvm)) | 518 | if (!irqchip_in_kernel(kvm)) |
519 | return r; | 519 | return r; |
520 | 520 | ||
521 | mutex_lock(&kvm->lock); | 521 | mutex_lock(&kvm->lock); |
522 | r = -ENODEV; | 522 | r = -ENODEV; |
523 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | 523 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, |
524 | assigned_irq->assigned_dev_id); | 524 | assigned_irq->assigned_dev_id); |
525 | if (!match) | 525 | if (!match) |
526 | goto out; | 526 | goto out; |
527 | 527 | ||
528 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | 528 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); |
529 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | 529 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); |
530 | 530 | ||
531 | r = -EINVAL; | 531 | r = -EINVAL; |
532 | /* can only assign one type at a time */ | 532 | /* can only assign one type at a time */ |
533 | if (hweight_long(host_irq_type) > 1) | 533 | if (hweight_long(host_irq_type) > 1) |
534 | goto out; | 534 | goto out; |
535 | if (hweight_long(guest_irq_type) > 1) | 535 | if (hweight_long(guest_irq_type) > 1) |
536 | goto out; | 536 | goto out; |
537 | if (host_irq_type == 0 && guest_irq_type == 0) | 537 | if (host_irq_type == 0 && guest_irq_type == 0) |
538 | goto out; | 538 | goto out; |
539 | 539 | ||
540 | r = 0; | 540 | r = 0; |
541 | if (host_irq_type) | 541 | if (host_irq_type) |
542 | r = assign_host_irq(kvm, match, host_irq_type); | 542 | r = assign_host_irq(kvm, match, host_irq_type); |
543 | if (r) | 543 | if (r) |
544 | goto out; | 544 | goto out; |
545 | 545 | ||
546 | if (guest_irq_type) | 546 | if (guest_irq_type) |
547 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | 547 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); |
548 | out: | 548 | out: |
549 | mutex_unlock(&kvm->lock); | 549 | mutex_unlock(&kvm->lock); |
550 | return r; | 550 | return r; |
551 | } | 551 | } |
552 | 552 | ||
553 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | 553 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, |
554 | struct kvm_assigned_irq | 554 | struct kvm_assigned_irq |
555 | *assigned_irq) | 555 | *assigned_irq) |
556 | { | 556 | { |
557 | int r = -ENODEV; | 557 | int r = -ENODEV; |
558 | struct kvm_assigned_dev_kernel *match; | 558 | struct kvm_assigned_dev_kernel *match; |
559 | 559 | ||
560 | mutex_lock(&kvm->lock); | 560 | mutex_lock(&kvm->lock); |
561 | 561 | ||
562 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | 562 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, |
563 | assigned_irq->assigned_dev_id); | 563 | assigned_irq->assigned_dev_id); |
564 | if (!match) | 564 | if (!match) |
565 | goto out; | 565 | goto out; |
566 | 566 | ||
567 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | 567 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); |
568 | out: | 568 | out: |
569 | mutex_unlock(&kvm->lock); | 569 | mutex_unlock(&kvm->lock); |
570 | return r; | 570 | return r; |
571 | } | 571 | } |
572 | 572 | ||
573 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | 573 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, |
574 | struct kvm_assigned_pci_dev *assigned_dev) | 574 | struct kvm_assigned_pci_dev *assigned_dev) |
575 | { | 575 | { |
576 | int r = 0; | 576 | int r = 0; |
577 | struct kvm_assigned_dev_kernel *match; | 577 | struct kvm_assigned_dev_kernel *match; |
578 | struct pci_dev *dev; | 578 | struct pci_dev *dev; |
579 | 579 | ||
580 | down_read(&kvm->slots_lock); | 580 | down_read(&kvm->slots_lock); |
581 | mutex_lock(&kvm->lock); | 581 | mutex_lock(&kvm->lock); |
582 | 582 | ||
583 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | 583 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, |
584 | assigned_dev->assigned_dev_id); | 584 | assigned_dev->assigned_dev_id); |
585 | if (match) { | 585 | if (match) { |
586 | /* device already assigned */ | 586 | /* device already assigned */ |
587 | r = -EEXIST; | 587 | r = -EEXIST; |
588 | goto out; | 588 | goto out; |
589 | } | 589 | } |
590 | 590 | ||
591 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | 591 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); |
592 | if (match == NULL) { | 592 | if (match == NULL) { |
593 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | 593 | printk(KERN_INFO "%s: Couldn't allocate memory\n", |
594 | __func__); | 594 | __func__); |
595 | r = -ENOMEM; | 595 | r = -ENOMEM; |
596 | goto out; | 596 | goto out; |
597 | } | 597 | } |
598 | dev = pci_get_bus_and_slot(assigned_dev->busnr, | 598 | dev = pci_get_bus_and_slot(assigned_dev->busnr, |
599 | assigned_dev->devfn); | 599 | assigned_dev->devfn); |
600 | if (!dev) { | 600 | if (!dev) { |
601 | printk(KERN_INFO "%s: host device not found\n", __func__); | 601 | printk(KERN_INFO "%s: host device not found\n", __func__); |
602 | r = -EINVAL; | 602 | r = -EINVAL; |
603 | goto out_free; | 603 | goto out_free; |
604 | } | 604 | } |
605 | if (pci_enable_device(dev)) { | 605 | if (pci_enable_device(dev)) { |
606 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | 606 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); |
607 | r = -EBUSY; | 607 | r = -EBUSY; |
608 | goto out_put; | 608 | goto out_put; |
609 | } | 609 | } |
610 | r = pci_request_regions(dev, "kvm_assigned_device"); | 610 | r = pci_request_regions(dev, "kvm_assigned_device"); |
611 | if (r) { | 611 | if (r) { |
612 | printk(KERN_INFO "%s: Could not get access to device regions\n", | 612 | printk(KERN_INFO "%s: Could not get access to device regions\n", |
613 | __func__); | 613 | __func__); |
614 | goto out_disable; | 614 | goto out_disable; |
615 | } | 615 | } |
616 | 616 | ||
617 | pci_reset_function(dev); | 617 | pci_reset_function(dev); |
618 | 618 | ||
619 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | 619 | match->assigned_dev_id = assigned_dev->assigned_dev_id; |
620 | match->host_busnr = assigned_dev->busnr; | 620 | match->host_busnr = assigned_dev->busnr; |
621 | match->host_devfn = assigned_dev->devfn; | 621 | match->host_devfn = assigned_dev->devfn; |
622 | match->flags = assigned_dev->flags; | 622 | match->flags = assigned_dev->flags; |
623 | match->dev = dev; | 623 | match->dev = dev; |
624 | spin_lock_init(&match->assigned_dev_lock); | 624 | spin_lock_init(&match->assigned_dev_lock); |
625 | match->irq_source_id = -1; | 625 | match->irq_source_id = -1; |
626 | match->kvm = kvm; | 626 | match->kvm = kvm; |
627 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | 627 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; |
628 | INIT_WORK(&match->interrupt_work, | 628 | INIT_WORK(&match->interrupt_work, |
629 | kvm_assigned_dev_interrupt_work_handler); | 629 | kvm_assigned_dev_interrupt_work_handler); |
630 | 630 | ||
631 | list_add(&match->list, &kvm->arch.assigned_dev_head); | 631 | list_add(&match->list, &kvm->arch.assigned_dev_head); |
632 | 632 | ||
633 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | 633 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { |
634 | if (!kvm->arch.iommu_domain) { | 634 | if (!kvm->arch.iommu_domain) { |
635 | r = kvm_iommu_map_guest(kvm); | 635 | r = kvm_iommu_map_guest(kvm); |
636 | if (r) | 636 | if (r) |
637 | goto out_list_del; | 637 | goto out_list_del; |
638 | } | 638 | } |
639 | r = kvm_assign_device(kvm, match); | 639 | r = kvm_assign_device(kvm, match); |
640 | if (r) | 640 | if (r) |
641 | goto out_list_del; | 641 | goto out_list_del; |
642 | } | 642 | } |
643 | 643 | ||
644 | out: | 644 | out: |
645 | mutex_unlock(&kvm->lock); | 645 | mutex_unlock(&kvm->lock); |
646 | up_read(&kvm->slots_lock); | 646 | up_read(&kvm->slots_lock); |
647 | return r; | 647 | return r; |
648 | out_list_del: | 648 | out_list_del: |
649 | list_del(&match->list); | 649 | list_del(&match->list); |
650 | pci_release_regions(dev); | 650 | pci_release_regions(dev); |
651 | out_disable: | 651 | out_disable: |
652 | pci_disable_device(dev); | 652 | pci_disable_device(dev); |
653 | out_put: | 653 | out_put: |
654 | pci_dev_put(dev); | 654 | pci_dev_put(dev); |
655 | out_free: | 655 | out_free: |
656 | kfree(match); | 656 | kfree(match); |
657 | mutex_unlock(&kvm->lock); | 657 | mutex_unlock(&kvm->lock); |
658 | up_read(&kvm->slots_lock); | 658 | up_read(&kvm->slots_lock); |
659 | return r; | 659 | return r; |
660 | } | 660 | } |
661 | #endif | 661 | #endif |
662 | 662 | ||
663 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | 663 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT |
664 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | 664 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, |
665 | struct kvm_assigned_pci_dev *assigned_dev) | 665 | struct kvm_assigned_pci_dev *assigned_dev) |
666 | { | 666 | { |
667 | int r = 0; | 667 | int r = 0; |
668 | struct kvm_assigned_dev_kernel *match; | 668 | struct kvm_assigned_dev_kernel *match; |
669 | 669 | ||
670 | mutex_lock(&kvm->lock); | 670 | mutex_lock(&kvm->lock); |
671 | 671 | ||
672 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | 672 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, |
673 | assigned_dev->assigned_dev_id); | 673 | assigned_dev->assigned_dev_id); |
674 | if (!match) { | 674 | if (!match) { |
675 | printk(KERN_INFO "%s: device hasn't been assigned before, " | 675 | printk(KERN_INFO "%s: device hasn't been assigned before, " |
676 | "so cannot be deassigned\n", __func__); | 676 | "so cannot be deassigned\n", __func__); |
677 | r = -EINVAL; | 677 | r = -EINVAL; |
678 | goto out; | 678 | goto out; |
679 | } | 679 | } |
680 | 680 | ||
681 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | 681 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) |
682 | kvm_deassign_device(kvm, match); | 682 | kvm_deassign_device(kvm, match); |
683 | 683 | ||
684 | kvm_free_assigned_device(kvm, match); | 684 | kvm_free_assigned_device(kvm, match); |
685 | 685 | ||
686 | out: | 686 | out: |
687 | mutex_unlock(&kvm->lock); | 687 | mutex_unlock(&kvm->lock); |
688 | return r; | 688 | return r; |
689 | } | 689 | } |
690 | #endif | 690 | #endif |
691 | 691 | ||
692 | static inline int valid_vcpu(int n) | ||
693 | { | ||
694 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | ||
695 | } | ||
696 | |||
697 | inline int kvm_is_mmio_pfn(pfn_t pfn) | 692 | inline int kvm_is_mmio_pfn(pfn_t pfn) |
698 | { | 693 | { |
699 | if (pfn_valid(pfn)) { | 694 | if (pfn_valid(pfn)) { |
700 | struct page *page = compound_head(pfn_to_page(pfn)); | 695 | struct page *page = compound_head(pfn_to_page(pfn)); |
701 | return PageReserved(page); | 696 | return PageReserved(page); |
702 | } | 697 | } |
703 | 698 | ||
704 | return true; | 699 | return true; |
705 | } | 700 | } |
706 | 701 | ||
707 | /* | 702 | /* |
708 | * Switches to specified vcpu, until a matching vcpu_put() | 703 | * Switches to specified vcpu, until a matching vcpu_put() |
709 | */ | 704 | */ |
710 | void vcpu_load(struct kvm_vcpu *vcpu) | 705 | void vcpu_load(struct kvm_vcpu *vcpu) |
711 | { | 706 | { |
712 | int cpu; | 707 | int cpu; |
713 | 708 | ||
714 | mutex_lock(&vcpu->mutex); | 709 | mutex_lock(&vcpu->mutex); |
715 | cpu = get_cpu(); | 710 | cpu = get_cpu(); |
716 | preempt_notifier_register(&vcpu->preempt_notifier); | 711 | preempt_notifier_register(&vcpu->preempt_notifier); |
717 | kvm_arch_vcpu_load(vcpu, cpu); | 712 | kvm_arch_vcpu_load(vcpu, cpu); |
718 | put_cpu(); | 713 | put_cpu(); |
719 | } | 714 | } |
720 | 715 | ||
721 | void vcpu_put(struct kvm_vcpu *vcpu) | 716 | void vcpu_put(struct kvm_vcpu *vcpu) |
722 | { | 717 | { |
723 | preempt_disable(); | 718 | preempt_disable(); |
724 | kvm_arch_vcpu_put(vcpu); | 719 | kvm_arch_vcpu_put(vcpu); |
725 | preempt_notifier_unregister(&vcpu->preempt_notifier); | 720 | preempt_notifier_unregister(&vcpu->preempt_notifier); |
726 | preempt_enable(); | 721 | preempt_enable(); |
727 | mutex_unlock(&vcpu->mutex); | 722 | mutex_unlock(&vcpu->mutex); |
728 | } | 723 | } |
729 | 724 | ||
730 | static void ack_flush(void *_completed) | 725 | static void ack_flush(void *_completed) |
731 | { | 726 | { |
732 | } | 727 | } |
733 | 728 | ||
734 | static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) | 729 | static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) |
735 | { | 730 | { |
736 | int i, cpu, me; | 731 | int i, cpu, me; |
737 | cpumask_var_t cpus; | 732 | cpumask_var_t cpus; |
738 | bool called = true; | 733 | bool called = true; |
739 | struct kvm_vcpu *vcpu; | 734 | struct kvm_vcpu *vcpu; |
740 | 735 | ||
741 | if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) | 736 | if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) |
742 | cpumask_clear(cpus); | 737 | cpumask_clear(cpus); |
743 | 738 | ||
744 | me = get_cpu(); | 739 | me = get_cpu(); |
745 | spin_lock(&kvm->requests_lock); | 740 | spin_lock(&kvm->requests_lock); |
746 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 741 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
747 | vcpu = kvm->vcpus[i]; | 742 | vcpu = kvm->vcpus[i]; |
748 | if (!vcpu) | 743 | if (!vcpu) |
749 | continue; | 744 | continue; |
750 | if (test_and_set_bit(req, &vcpu->requests)) | 745 | if (test_and_set_bit(req, &vcpu->requests)) |
751 | continue; | 746 | continue; |
752 | cpu = vcpu->cpu; | 747 | cpu = vcpu->cpu; |
753 | if (cpus != NULL && cpu != -1 && cpu != me) | 748 | if (cpus != NULL && cpu != -1 && cpu != me) |
754 | cpumask_set_cpu(cpu, cpus); | 749 | cpumask_set_cpu(cpu, cpus); |
755 | } | 750 | } |
756 | if (unlikely(cpus == NULL)) | 751 | if (unlikely(cpus == NULL)) |
757 | smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); | 752 | smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); |
758 | else if (!cpumask_empty(cpus)) | 753 | else if (!cpumask_empty(cpus)) |
759 | smp_call_function_many(cpus, ack_flush, NULL, 1); | 754 | smp_call_function_many(cpus, ack_flush, NULL, 1); |
760 | else | 755 | else |
761 | called = false; | 756 | called = false; |
762 | spin_unlock(&kvm->requests_lock); | 757 | spin_unlock(&kvm->requests_lock); |
763 | put_cpu(); | 758 | put_cpu(); |
764 | free_cpumask_var(cpus); | 759 | free_cpumask_var(cpus); |
765 | return called; | 760 | return called; |
766 | } | 761 | } |
767 | 762 | ||
768 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 763 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
769 | { | 764 | { |
770 | if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) | 765 | if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
771 | ++kvm->stat.remote_tlb_flush; | 766 | ++kvm->stat.remote_tlb_flush; |
772 | } | 767 | } |
773 | 768 | ||
774 | void kvm_reload_remote_mmus(struct kvm *kvm) | 769 | void kvm_reload_remote_mmus(struct kvm *kvm) |
775 | { | 770 | { |
776 | make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); | 771 | make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); |
777 | } | 772 | } |
778 | 773 | ||
779 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 774 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
780 | { | 775 | { |
781 | struct page *page; | 776 | struct page *page; |
782 | int r; | 777 | int r; |
783 | 778 | ||
784 | mutex_init(&vcpu->mutex); | 779 | mutex_init(&vcpu->mutex); |
785 | vcpu->cpu = -1; | 780 | vcpu->cpu = -1; |
786 | vcpu->kvm = kvm; | 781 | vcpu->kvm = kvm; |
787 | vcpu->vcpu_id = id; | 782 | vcpu->vcpu_id = id; |
788 | init_waitqueue_head(&vcpu->wq); | 783 | init_waitqueue_head(&vcpu->wq); |
789 | 784 | ||
790 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 785 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
791 | if (!page) { | 786 | if (!page) { |
792 | r = -ENOMEM; | 787 | r = -ENOMEM; |
793 | goto fail; | 788 | goto fail; |
794 | } | 789 | } |
795 | vcpu->run = page_address(page); | 790 | vcpu->run = page_address(page); |
796 | 791 | ||
797 | r = kvm_arch_vcpu_init(vcpu); | 792 | r = kvm_arch_vcpu_init(vcpu); |
798 | if (r < 0) | 793 | if (r < 0) |
799 | goto fail_free_run; | 794 | goto fail_free_run; |
800 | return 0; | 795 | return 0; |
801 | 796 | ||
802 | fail_free_run: | 797 | fail_free_run: |
803 | free_page((unsigned long)vcpu->run); | 798 | free_page((unsigned long)vcpu->run); |
804 | fail: | 799 | fail: |
805 | return r; | 800 | return r; |
806 | } | 801 | } |
807 | EXPORT_SYMBOL_GPL(kvm_vcpu_init); | 802 | EXPORT_SYMBOL_GPL(kvm_vcpu_init); |
808 | 803 | ||
809 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) | 804 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) |
810 | { | 805 | { |
811 | kvm_arch_vcpu_uninit(vcpu); | 806 | kvm_arch_vcpu_uninit(vcpu); |
812 | free_page((unsigned long)vcpu->run); | 807 | free_page((unsigned long)vcpu->run); |
813 | } | 808 | } |
814 | EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); | 809 | EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); |
815 | 810 | ||
816 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 811 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
817 | static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) | 812 | static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) |
818 | { | 813 | { |
819 | return container_of(mn, struct kvm, mmu_notifier); | 814 | return container_of(mn, struct kvm, mmu_notifier); |
820 | } | 815 | } |
821 | 816 | ||
822 | static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | 817 | static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, |
823 | struct mm_struct *mm, | 818 | struct mm_struct *mm, |
824 | unsigned long address) | 819 | unsigned long address) |
825 | { | 820 | { |
826 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 821 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
827 | int need_tlb_flush; | 822 | int need_tlb_flush; |
828 | 823 | ||
829 | /* | 824 | /* |
830 | * When ->invalidate_page runs, the linux pte has been zapped | 825 | * When ->invalidate_page runs, the linux pte has been zapped |
831 | * already but the page is still allocated until | 826 | * already but the page is still allocated until |
832 | * ->invalidate_page returns. So if we increase the sequence | 827 | * ->invalidate_page returns. So if we increase the sequence |
833 | * here the kvm page fault will notice if the spte can't be | 828 | * here the kvm page fault will notice if the spte can't be |
834 | * established because the page is going to be freed. If | 829 | * established because the page is going to be freed. If |
835 | * instead the kvm page fault establishes the spte before | 830 | * instead the kvm page fault establishes the spte before |
836 | * ->invalidate_page runs, kvm_unmap_hva will release it | 831 | * ->invalidate_page runs, kvm_unmap_hva will release it |
837 | * before returning. | 832 | * before returning. |
838 | * | 833 | * |
839 | * The sequence increase only need to be seen at spin_unlock | 834 | * The sequence increase only need to be seen at spin_unlock |
840 | * time, and not at spin_lock time. | 835 | * time, and not at spin_lock time. |
841 | * | 836 | * |
842 | * Increasing the sequence after the spin_unlock would be | 837 | * Increasing the sequence after the spin_unlock would be |
843 | * unsafe because the kvm page fault could then establish the | 838 | * unsafe because the kvm page fault could then establish the |
844 | * pte after kvm_unmap_hva returned, without noticing the page | 839 | * pte after kvm_unmap_hva returned, without noticing the page |
845 | * is going to be freed. | 840 | * is going to be freed. |
846 | */ | 841 | */ |
847 | spin_lock(&kvm->mmu_lock); | 842 | spin_lock(&kvm->mmu_lock); |
848 | kvm->mmu_notifier_seq++; | 843 | kvm->mmu_notifier_seq++; |
849 | need_tlb_flush = kvm_unmap_hva(kvm, address); | 844 | need_tlb_flush = kvm_unmap_hva(kvm, address); |
850 | spin_unlock(&kvm->mmu_lock); | 845 | spin_unlock(&kvm->mmu_lock); |
851 | 846 | ||
852 | /* we've to flush the tlb before the pages can be freed */ | 847 | /* we've to flush the tlb before the pages can be freed */ |
853 | if (need_tlb_flush) | 848 | if (need_tlb_flush) |
854 | kvm_flush_remote_tlbs(kvm); | 849 | kvm_flush_remote_tlbs(kvm); |
855 | 850 | ||
856 | } | 851 | } |
857 | 852 | ||
858 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | 853 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
859 | struct mm_struct *mm, | 854 | struct mm_struct *mm, |
860 | unsigned long start, | 855 | unsigned long start, |
861 | unsigned long end) | 856 | unsigned long end) |
862 | { | 857 | { |
863 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 858 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
864 | int need_tlb_flush = 0; | 859 | int need_tlb_flush = 0; |
865 | 860 | ||
866 | spin_lock(&kvm->mmu_lock); | 861 | spin_lock(&kvm->mmu_lock); |
867 | /* | 862 | /* |
868 | * The count increase must become visible at unlock time as no | 863 | * The count increase must become visible at unlock time as no |
869 | * spte can be established without taking the mmu_lock and | 864 | * spte can be established without taking the mmu_lock and |
870 | * count is also read inside the mmu_lock critical section. | 865 | * count is also read inside the mmu_lock critical section. |
871 | */ | 866 | */ |
872 | kvm->mmu_notifier_count++; | 867 | kvm->mmu_notifier_count++; |
873 | for (; start < end; start += PAGE_SIZE) | 868 | for (; start < end; start += PAGE_SIZE) |
874 | need_tlb_flush |= kvm_unmap_hva(kvm, start); | 869 | need_tlb_flush |= kvm_unmap_hva(kvm, start); |
875 | spin_unlock(&kvm->mmu_lock); | 870 | spin_unlock(&kvm->mmu_lock); |
876 | 871 | ||
877 | /* we've to flush the tlb before the pages can be freed */ | 872 | /* we've to flush the tlb before the pages can be freed */ |
878 | if (need_tlb_flush) | 873 | if (need_tlb_flush) |
879 | kvm_flush_remote_tlbs(kvm); | 874 | kvm_flush_remote_tlbs(kvm); |
880 | } | 875 | } |
881 | 876 | ||
882 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | 877 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
883 | struct mm_struct *mm, | 878 | struct mm_struct *mm, |
884 | unsigned long start, | 879 | unsigned long start, |
885 | unsigned long end) | 880 | unsigned long end) |
886 | { | 881 | { |
887 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 882 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
888 | 883 | ||
889 | spin_lock(&kvm->mmu_lock); | 884 | spin_lock(&kvm->mmu_lock); |
890 | /* | 885 | /* |
891 | * This sequence increase will notify the kvm page fault that | 886 | * This sequence increase will notify the kvm page fault that |
892 | * the page that is going to be mapped in the spte could have | 887 | * the page that is going to be mapped in the spte could have |
893 | * been freed. | 888 | * been freed. |
894 | */ | 889 | */ |
895 | kvm->mmu_notifier_seq++; | 890 | kvm->mmu_notifier_seq++; |
896 | /* | 891 | /* |
897 | * The above sequence increase must be visible before the | 892 | * The above sequence increase must be visible before the |
898 | * below count decrease but both values are read by the kvm | 893 | * below count decrease but both values are read by the kvm |
899 | * page fault under mmu_lock spinlock so we don't need to add | 894 | * page fault under mmu_lock spinlock so we don't need to add |
900 | * a smb_wmb() here in between the two. | 895 | * a smb_wmb() here in between the two. |
901 | */ | 896 | */ |
902 | kvm->mmu_notifier_count--; | 897 | kvm->mmu_notifier_count--; |
903 | spin_unlock(&kvm->mmu_lock); | 898 | spin_unlock(&kvm->mmu_lock); |
904 | 899 | ||
905 | BUG_ON(kvm->mmu_notifier_count < 0); | 900 | BUG_ON(kvm->mmu_notifier_count < 0); |
906 | } | 901 | } |
907 | 902 | ||
908 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, | 903 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, |
909 | struct mm_struct *mm, | 904 | struct mm_struct *mm, |
910 | unsigned long address) | 905 | unsigned long address) |
911 | { | 906 | { |
912 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 907 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
913 | int young; | 908 | int young; |
914 | 909 | ||
915 | spin_lock(&kvm->mmu_lock); | 910 | spin_lock(&kvm->mmu_lock); |
916 | young = kvm_age_hva(kvm, address); | 911 | young = kvm_age_hva(kvm, address); |
917 | spin_unlock(&kvm->mmu_lock); | 912 | spin_unlock(&kvm->mmu_lock); |
918 | 913 | ||
919 | if (young) | 914 | if (young) |
920 | kvm_flush_remote_tlbs(kvm); | 915 | kvm_flush_remote_tlbs(kvm); |
921 | 916 | ||
922 | return young; | 917 | return young; |
923 | } | 918 | } |
924 | 919 | ||
925 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | 920 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
926 | struct mm_struct *mm) | 921 | struct mm_struct *mm) |
927 | { | 922 | { |
928 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 923 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
929 | kvm_arch_flush_shadow(kvm); | 924 | kvm_arch_flush_shadow(kvm); |
930 | } | 925 | } |
931 | 926 | ||
932 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | 927 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
933 | .invalidate_page = kvm_mmu_notifier_invalidate_page, | 928 | .invalidate_page = kvm_mmu_notifier_invalidate_page, |
934 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, | 929 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
935 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, | 930 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
936 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, | 931 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
937 | .release = kvm_mmu_notifier_release, | 932 | .release = kvm_mmu_notifier_release, |
938 | }; | 933 | }; |
939 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ | 934 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
940 | 935 | ||
941 | static struct kvm *kvm_create_vm(void) | 936 | static struct kvm *kvm_create_vm(void) |
942 | { | 937 | { |
943 | struct kvm *kvm = kvm_arch_create_vm(); | 938 | struct kvm *kvm = kvm_arch_create_vm(); |
944 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 939 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
945 | struct page *page; | 940 | struct page *page; |
946 | #endif | 941 | #endif |
947 | 942 | ||
948 | if (IS_ERR(kvm)) | 943 | if (IS_ERR(kvm)) |
949 | goto out; | 944 | goto out; |
950 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 945 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
951 | INIT_LIST_HEAD(&kvm->irq_routing); | 946 | INIT_LIST_HEAD(&kvm->irq_routing); |
952 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); | 947 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); |
953 | #endif | 948 | #endif |
954 | 949 | ||
955 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 950 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
956 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 951 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
957 | if (!page) { | 952 | if (!page) { |
958 | kfree(kvm); | 953 | kfree(kvm); |
959 | return ERR_PTR(-ENOMEM); | 954 | return ERR_PTR(-ENOMEM); |
960 | } | 955 | } |
961 | kvm->coalesced_mmio_ring = | 956 | kvm->coalesced_mmio_ring = |
962 | (struct kvm_coalesced_mmio_ring *)page_address(page); | 957 | (struct kvm_coalesced_mmio_ring *)page_address(page); |
963 | #endif | 958 | #endif |
964 | 959 | ||
965 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 960 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
966 | { | 961 | { |
967 | int err; | 962 | int err; |
968 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; | 963 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
969 | err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); | 964 | err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
970 | if (err) { | 965 | if (err) { |
971 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 966 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
972 | put_page(page); | 967 | put_page(page); |
973 | #endif | 968 | #endif |
974 | kfree(kvm); | 969 | kfree(kvm); |
975 | return ERR_PTR(err); | 970 | return ERR_PTR(err); |
976 | } | 971 | } |
977 | } | 972 | } |
978 | #endif | 973 | #endif |
979 | 974 | ||
980 | kvm->mm = current->mm; | 975 | kvm->mm = current->mm; |
981 | atomic_inc(&kvm->mm->mm_count); | 976 | atomic_inc(&kvm->mm->mm_count); |
982 | spin_lock_init(&kvm->mmu_lock); | 977 | spin_lock_init(&kvm->mmu_lock); |
983 | spin_lock_init(&kvm->requests_lock); | 978 | spin_lock_init(&kvm->requests_lock); |
984 | kvm_io_bus_init(&kvm->pio_bus); | 979 | kvm_io_bus_init(&kvm->pio_bus); |
985 | kvm_irqfd_init(kvm); | 980 | kvm_irqfd_init(kvm); |
986 | mutex_init(&kvm->lock); | 981 | mutex_init(&kvm->lock); |
987 | mutex_init(&kvm->irq_lock); | 982 | mutex_init(&kvm->irq_lock); |
988 | kvm_io_bus_init(&kvm->mmio_bus); | 983 | kvm_io_bus_init(&kvm->mmio_bus); |
989 | init_rwsem(&kvm->slots_lock); | 984 | init_rwsem(&kvm->slots_lock); |
990 | atomic_set(&kvm->users_count, 1); | 985 | atomic_set(&kvm->users_count, 1); |
991 | spin_lock(&kvm_lock); | 986 | spin_lock(&kvm_lock); |
992 | list_add(&kvm->vm_list, &vm_list); | 987 | list_add(&kvm->vm_list, &vm_list); |
993 | spin_unlock(&kvm_lock); | 988 | spin_unlock(&kvm_lock); |
994 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 989 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
995 | kvm_coalesced_mmio_init(kvm); | 990 | kvm_coalesced_mmio_init(kvm); |
996 | #endif | 991 | #endif |
997 | out: | 992 | out: |
998 | return kvm; | 993 | return kvm; |
999 | } | 994 | } |
1000 | 995 | ||
1001 | /* | 996 | /* |
1002 | * Free any memory in @free but not in @dont. | 997 | * Free any memory in @free but not in @dont. |
1003 | */ | 998 | */ |
1004 | static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | 999 | static void kvm_free_physmem_slot(struct kvm_memory_slot *free, |
1005 | struct kvm_memory_slot *dont) | 1000 | struct kvm_memory_slot *dont) |
1006 | { | 1001 | { |
1007 | if (!dont || free->rmap != dont->rmap) | 1002 | if (!dont || free->rmap != dont->rmap) |
1008 | vfree(free->rmap); | 1003 | vfree(free->rmap); |
1009 | 1004 | ||
1010 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) | 1005 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) |
1011 | vfree(free->dirty_bitmap); | 1006 | vfree(free->dirty_bitmap); |
1012 | 1007 | ||
1013 | if (!dont || free->lpage_info != dont->lpage_info) | 1008 | if (!dont || free->lpage_info != dont->lpage_info) |
1014 | vfree(free->lpage_info); | 1009 | vfree(free->lpage_info); |
1015 | 1010 | ||
1016 | free->npages = 0; | 1011 | free->npages = 0; |
1017 | free->dirty_bitmap = NULL; | 1012 | free->dirty_bitmap = NULL; |
1018 | free->rmap = NULL; | 1013 | free->rmap = NULL; |
1019 | free->lpage_info = NULL; | 1014 | free->lpage_info = NULL; |
1020 | } | 1015 | } |
1021 | 1016 | ||
1022 | void kvm_free_physmem(struct kvm *kvm) | 1017 | void kvm_free_physmem(struct kvm *kvm) |
1023 | { | 1018 | { |
1024 | int i; | 1019 | int i; |
1025 | 1020 | ||
1026 | for (i = 0; i < kvm->nmemslots; ++i) | 1021 | for (i = 0; i < kvm->nmemslots; ++i) |
1027 | kvm_free_physmem_slot(&kvm->memslots[i], NULL); | 1022 | kvm_free_physmem_slot(&kvm->memslots[i], NULL); |
1028 | } | 1023 | } |
1029 | 1024 | ||
1030 | static void kvm_destroy_vm(struct kvm *kvm) | 1025 | static void kvm_destroy_vm(struct kvm *kvm) |
1031 | { | 1026 | { |
1032 | struct mm_struct *mm = kvm->mm; | 1027 | struct mm_struct *mm = kvm->mm; |
1033 | 1028 | ||
1034 | kvm_arch_sync_events(kvm); | 1029 | kvm_arch_sync_events(kvm); |
1035 | spin_lock(&kvm_lock); | 1030 | spin_lock(&kvm_lock); |
1036 | list_del(&kvm->vm_list); | 1031 | list_del(&kvm->vm_list); |
1037 | spin_unlock(&kvm_lock); | 1032 | spin_unlock(&kvm_lock); |
1038 | kvm_free_irq_routing(kvm); | 1033 | kvm_free_irq_routing(kvm); |
1039 | kvm_io_bus_destroy(&kvm->pio_bus); | 1034 | kvm_io_bus_destroy(&kvm->pio_bus); |
1040 | kvm_io_bus_destroy(&kvm->mmio_bus); | 1035 | kvm_io_bus_destroy(&kvm->mmio_bus); |
1041 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 1036 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
1042 | if (kvm->coalesced_mmio_ring != NULL) | 1037 | if (kvm->coalesced_mmio_ring != NULL) |
1043 | free_page((unsigned long)kvm->coalesced_mmio_ring); | 1038 | free_page((unsigned long)kvm->coalesced_mmio_ring); |
1044 | #endif | 1039 | #endif |
1045 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 1040 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
1046 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); | 1041 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
1047 | #else | 1042 | #else |
1048 | kvm_arch_flush_shadow(kvm); | 1043 | kvm_arch_flush_shadow(kvm); |
1049 | #endif | 1044 | #endif |
1050 | kvm_arch_destroy_vm(kvm); | 1045 | kvm_arch_destroy_vm(kvm); |
1051 | mmdrop(mm); | 1046 | mmdrop(mm); |
1052 | } | 1047 | } |
1053 | 1048 | ||
1054 | void kvm_get_kvm(struct kvm *kvm) | 1049 | void kvm_get_kvm(struct kvm *kvm) |
1055 | { | 1050 | { |
1056 | atomic_inc(&kvm->users_count); | 1051 | atomic_inc(&kvm->users_count); |
1057 | } | 1052 | } |
1058 | EXPORT_SYMBOL_GPL(kvm_get_kvm); | 1053 | EXPORT_SYMBOL_GPL(kvm_get_kvm); |
1059 | 1054 | ||
1060 | void kvm_put_kvm(struct kvm *kvm) | 1055 | void kvm_put_kvm(struct kvm *kvm) |
1061 | { | 1056 | { |
1062 | if (atomic_dec_and_test(&kvm->users_count)) | 1057 | if (atomic_dec_and_test(&kvm->users_count)) |
1063 | kvm_destroy_vm(kvm); | 1058 | kvm_destroy_vm(kvm); |
1064 | } | 1059 | } |
1065 | EXPORT_SYMBOL_GPL(kvm_put_kvm); | 1060 | EXPORT_SYMBOL_GPL(kvm_put_kvm); |
1066 | 1061 | ||
1067 | 1062 | ||
1068 | static int kvm_vm_release(struct inode *inode, struct file *filp) | 1063 | static int kvm_vm_release(struct inode *inode, struct file *filp) |
1069 | { | 1064 | { |
1070 | struct kvm *kvm = filp->private_data; | 1065 | struct kvm *kvm = filp->private_data; |
1071 | 1066 | ||
1072 | kvm_irqfd_release(kvm); | 1067 | kvm_irqfd_release(kvm); |
1073 | 1068 | ||
1074 | kvm_put_kvm(kvm); | 1069 | kvm_put_kvm(kvm); |
1075 | return 0; | 1070 | return 0; |
1076 | } | 1071 | } |
1077 | 1072 | ||
1078 | /* | 1073 | /* |
1079 | * Allocate some memory and give it an address in the guest physical address | 1074 | * Allocate some memory and give it an address in the guest physical address |
1080 | * space. | 1075 | * space. |
1081 | * | 1076 | * |
1082 | * Discontiguous memory is allowed, mostly for framebuffers. | 1077 | * Discontiguous memory is allowed, mostly for framebuffers. |
1083 | * | 1078 | * |
1084 | * Must be called holding mmap_sem for write. | 1079 | * Must be called holding mmap_sem for write. |
1085 | */ | 1080 | */ |
1086 | int __kvm_set_memory_region(struct kvm *kvm, | 1081 | int __kvm_set_memory_region(struct kvm *kvm, |
1087 | struct kvm_userspace_memory_region *mem, | 1082 | struct kvm_userspace_memory_region *mem, |
1088 | int user_alloc) | 1083 | int user_alloc) |
1089 | { | 1084 | { |
1090 | int r; | 1085 | int r; |
1091 | gfn_t base_gfn; | 1086 | gfn_t base_gfn; |
1092 | unsigned long npages, ugfn; | 1087 | unsigned long npages, ugfn; |
1093 | unsigned long largepages, i; | 1088 | unsigned long largepages, i; |
1094 | struct kvm_memory_slot *memslot; | 1089 | struct kvm_memory_slot *memslot; |
1095 | struct kvm_memory_slot old, new; | 1090 | struct kvm_memory_slot old, new; |
1096 | 1091 | ||
1097 | r = -EINVAL; | 1092 | r = -EINVAL; |
1098 | /* General sanity checks */ | 1093 | /* General sanity checks */ |
1099 | if (mem->memory_size & (PAGE_SIZE - 1)) | 1094 | if (mem->memory_size & (PAGE_SIZE - 1)) |
1100 | goto out; | 1095 | goto out; |
1101 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | 1096 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
1102 | goto out; | 1097 | goto out; |
1103 | if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) | 1098 | if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) |
1104 | goto out; | 1099 | goto out; |
1105 | if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) | 1100 | if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
1106 | goto out; | 1101 | goto out; |
1107 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | 1102 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
1108 | goto out; | 1103 | goto out; |
1109 | 1104 | ||
1110 | memslot = &kvm->memslots[mem->slot]; | 1105 | memslot = &kvm->memslots[mem->slot]; |
1111 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | 1106 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
1112 | npages = mem->memory_size >> PAGE_SHIFT; | 1107 | npages = mem->memory_size >> PAGE_SHIFT; |
1113 | 1108 | ||
1114 | if (!npages) | 1109 | if (!npages) |
1115 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; | 1110 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; |
1116 | 1111 | ||
1117 | new = old = *memslot; | 1112 | new = old = *memslot; |
1118 | 1113 | ||
1119 | new.base_gfn = base_gfn; | 1114 | new.base_gfn = base_gfn; |
1120 | new.npages = npages; | 1115 | new.npages = npages; |
1121 | new.flags = mem->flags; | 1116 | new.flags = mem->flags; |
1122 | 1117 | ||
1123 | /* Disallow changing a memory slot's size. */ | 1118 | /* Disallow changing a memory slot's size. */ |
1124 | r = -EINVAL; | 1119 | r = -EINVAL; |
1125 | if (npages && old.npages && npages != old.npages) | 1120 | if (npages && old.npages && npages != old.npages) |
1126 | goto out_free; | 1121 | goto out_free; |
1127 | 1122 | ||
1128 | /* Check for overlaps */ | 1123 | /* Check for overlaps */ |
1129 | r = -EEXIST; | 1124 | r = -EEXIST; |
1130 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 1125 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
1131 | struct kvm_memory_slot *s = &kvm->memslots[i]; | 1126 | struct kvm_memory_slot *s = &kvm->memslots[i]; |
1132 | 1127 | ||
1133 | if (s == memslot || !s->npages) | 1128 | if (s == memslot || !s->npages) |
1134 | continue; | 1129 | continue; |
1135 | if (!((base_gfn + npages <= s->base_gfn) || | 1130 | if (!((base_gfn + npages <= s->base_gfn) || |
1136 | (base_gfn >= s->base_gfn + s->npages))) | 1131 | (base_gfn >= s->base_gfn + s->npages))) |
1137 | goto out_free; | 1132 | goto out_free; |
1138 | } | 1133 | } |
1139 | 1134 | ||
1140 | /* Free page dirty bitmap if unneeded */ | 1135 | /* Free page dirty bitmap if unneeded */ |
1141 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) | 1136 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) |
1142 | new.dirty_bitmap = NULL; | 1137 | new.dirty_bitmap = NULL; |
1143 | 1138 | ||
1144 | r = -ENOMEM; | 1139 | r = -ENOMEM; |
1145 | 1140 | ||
1146 | /* Allocate if a slot is being created */ | 1141 | /* Allocate if a slot is being created */ |
1147 | #ifndef CONFIG_S390 | 1142 | #ifndef CONFIG_S390 |
1148 | if (npages && !new.rmap) { | 1143 | if (npages && !new.rmap) { |
1149 | new.rmap = vmalloc(npages * sizeof(struct page *)); | 1144 | new.rmap = vmalloc(npages * sizeof(struct page *)); |
1150 | 1145 | ||
1151 | if (!new.rmap) | 1146 | if (!new.rmap) |
1152 | goto out_free; | 1147 | goto out_free; |
1153 | 1148 | ||
1154 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | 1149 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); |
1155 | 1150 | ||
1156 | new.user_alloc = user_alloc; | 1151 | new.user_alloc = user_alloc; |
1157 | /* | 1152 | /* |
1158 | * hva_to_rmmap() serialzies with the mmu_lock and to be | 1153 | * hva_to_rmmap() serialzies with the mmu_lock and to be |
1159 | * safe it has to ignore memslots with !user_alloc && | 1154 | * safe it has to ignore memslots with !user_alloc && |
1160 | * !userspace_addr. | 1155 | * !userspace_addr. |
1161 | */ | 1156 | */ |
1162 | if (user_alloc) | 1157 | if (user_alloc) |
1163 | new.userspace_addr = mem->userspace_addr; | 1158 | new.userspace_addr = mem->userspace_addr; |
1164 | else | 1159 | else |
1165 | new.userspace_addr = 0; | 1160 | new.userspace_addr = 0; |
1166 | } | 1161 | } |
1167 | if (npages && !new.lpage_info) { | 1162 | if (npages && !new.lpage_info) { |
1168 | largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE; | 1163 | largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE; |
1169 | largepages -= base_gfn / KVM_PAGES_PER_HPAGE; | 1164 | largepages -= base_gfn / KVM_PAGES_PER_HPAGE; |
1170 | 1165 | ||
1171 | new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); | 1166 | new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); |
1172 | 1167 | ||
1173 | if (!new.lpage_info) | 1168 | if (!new.lpage_info) |
1174 | goto out_free; | 1169 | goto out_free; |
1175 | 1170 | ||
1176 | memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info)); | 1171 | memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info)); |
1177 | 1172 | ||
1178 | if (base_gfn % KVM_PAGES_PER_HPAGE) | 1173 | if (base_gfn % KVM_PAGES_PER_HPAGE) |
1179 | new.lpage_info[0].write_count = 1; | 1174 | new.lpage_info[0].write_count = 1; |
1180 | if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE) | 1175 | if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE) |
1181 | new.lpage_info[largepages-1].write_count = 1; | 1176 | new.lpage_info[largepages-1].write_count = 1; |
1182 | ugfn = new.userspace_addr >> PAGE_SHIFT; | 1177 | ugfn = new.userspace_addr >> PAGE_SHIFT; |
1183 | /* | 1178 | /* |
1184 | * If the gfn and userspace address are not aligned wrt each | 1179 | * If the gfn and userspace address are not aligned wrt each |
1185 | * other, disable large page support for this slot | 1180 | * other, disable large page support for this slot |
1186 | */ | 1181 | */ |
1187 | if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1)) | 1182 | if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1)) |
1188 | for (i = 0; i < largepages; ++i) | 1183 | for (i = 0; i < largepages; ++i) |
1189 | new.lpage_info[i].write_count = 1; | 1184 | new.lpage_info[i].write_count = 1; |
1190 | } | 1185 | } |
1191 | 1186 | ||
1192 | /* Allocate page dirty bitmap if needed */ | 1187 | /* Allocate page dirty bitmap if needed */ |
1193 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 1188 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
1194 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 1189 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; |
1195 | 1190 | ||
1196 | new.dirty_bitmap = vmalloc(dirty_bytes); | 1191 | new.dirty_bitmap = vmalloc(dirty_bytes); |
1197 | if (!new.dirty_bitmap) | 1192 | if (!new.dirty_bitmap) |
1198 | goto out_free; | 1193 | goto out_free; |
1199 | memset(new.dirty_bitmap, 0, dirty_bytes); | 1194 | memset(new.dirty_bitmap, 0, dirty_bytes); |
1200 | if (old.npages) | 1195 | if (old.npages) |
1201 | kvm_arch_flush_shadow(kvm); | 1196 | kvm_arch_flush_shadow(kvm); |
1202 | } | 1197 | } |
1203 | #endif /* not defined CONFIG_S390 */ | 1198 | #endif /* not defined CONFIG_S390 */ |
1204 | 1199 | ||
1205 | if (!npages) | 1200 | if (!npages) |
1206 | kvm_arch_flush_shadow(kvm); | 1201 | kvm_arch_flush_shadow(kvm); |
1207 | 1202 | ||
1208 | spin_lock(&kvm->mmu_lock); | 1203 | spin_lock(&kvm->mmu_lock); |
1209 | if (mem->slot >= kvm->nmemslots) | 1204 | if (mem->slot >= kvm->nmemslots) |
1210 | kvm->nmemslots = mem->slot + 1; | 1205 | kvm->nmemslots = mem->slot + 1; |
1211 | 1206 | ||
1212 | *memslot = new; | 1207 | *memslot = new; |
1213 | spin_unlock(&kvm->mmu_lock); | 1208 | spin_unlock(&kvm->mmu_lock); |
1214 | 1209 | ||
1215 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); | 1210 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); |
1216 | if (r) { | 1211 | if (r) { |
1217 | spin_lock(&kvm->mmu_lock); | 1212 | spin_lock(&kvm->mmu_lock); |
1218 | *memslot = old; | 1213 | *memslot = old; |
1219 | spin_unlock(&kvm->mmu_lock); | 1214 | spin_unlock(&kvm->mmu_lock); |
1220 | goto out_free; | 1215 | goto out_free; |
1221 | } | 1216 | } |
1222 | 1217 | ||
1223 | kvm_free_physmem_slot(&old, npages ? &new : NULL); | 1218 | kvm_free_physmem_slot(&old, npages ? &new : NULL); |
1224 | /* Slot deletion case: we have to update the current slot */ | 1219 | /* Slot deletion case: we have to update the current slot */ |
1225 | spin_lock(&kvm->mmu_lock); | 1220 | spin_lock(&kvm->mmu_lock); |
1226 | if (!npages) | 1221 | if (!npages) |
1227 | *memslot = old; | 1222 | *memslot = old; |
1228 | spin_unlock(&kvm->mmu_lock); | 1223 | spin_unlock(&kvm->mmu_lock); |
1229 | #ifdef CONFIG_DMAR | 1224 | #ifdef CONFIG_DMAR |
1230 | /* map the pages in iommu page table */ | 1225 | /* map the pages in iommu page table */ |
1231 | r = kvm_iommu_map_pages(kvm, base_gfn, npages); | 1226 | r = kvm_iommu_map_pages(kvm, base_gfn, npages); |
1232 | if (r) | 1227 | if (r) |
1233 | goto out; | 1228 | goto out; |
1234 | #endif | 1229 | #endif |
1235 | return 0; | 1230 | return 0; |
1236 | 1231 | ||
1237 | out_free: | 1232 | out_free: |
1238 | kvm_free_physmem_slot(&new, &old); | 1233 | kvm_free_physmem_slot(&new, &old); |
1239 | out: | 1234 | out: |
1240 | return r; | 1235 | return r; |
1241 | 1236 | ||
1242 | } | 1237 | } |
1243 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); | 1238 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
1244 | 1239 | ||
1245 | int kvm_set_memory_region(struct kvm *kvm, | 1240 | int kvm_set_memory_region(struct kvm *kvm, |
1246 | struct kvm_userspace_memory_region *mem, | 1241 | struct kvm_userspace_memory_region *mem, |
1247 | int user_alloc) | 1242 | int user_alloc) |
1248 | { | 1243 | { |
1249 | int r; | 1244 | int r; |
1250 | 1245 | ||
1251 | down_write(&kvm->slots_lock); | 1246 | down_write(&kvm->slots_lock); |
1252 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | 1247 | r = __kvm_set_memory_region(kvm, mem, user_alloc); |
1253 | up_write(&kvm->slots_lock); | 1248 | up_write(&kvm->slots_lock); |
1254 | return r; | 1249 | return r; |
1255 | } | 1250 | } |
1256 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 1251 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
1257 | 1252 | ||
1258 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 1253 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
1259 | struct | 1254 | struct |
1260 | kvm_userspace_memory_region *mem, | 1255 | kvm_userspace_memory_region *mem, |
1261 | int user_alloc) | 1256 | int user_alloc) |
1262 | { | 1257 | { |
1263 | if (mem->slot >= KVM_MEMORY_SLOTS) | 1258 | if (mem->slot >= KVM_MEMORY_SLOTS) |
1264 | return -EINVAL; | 1259 | return -EINVAL; |
1265 | return kvm_set_memory_region(kvm, mem, user_alloc); | 1260 | return kvm_set_memory_region(kvm, mem, user_alloc); |
1266 | } | 1261 | } |
1267 | 1262 | ||
1268 | int kvm_get_dirty_log(struct kvm *kvm, | 1263 | int kvm_get_dirty_log(struct kvm *kvm, |
1269 | struct kvm_dirty_log *log, int *is_dirty) | 1264 | struct kvm_dirty_log *log, int *is_dirty) |
1270 | { | 1265 | { |
1271 | struct kvm_memory_slot *memslot; | 1266 | struct kvm_memory_slot *memslot; |
1272 | int r, i; | 1267 | int r, i; |
1273 | int n; | 1268 | int n; |
1274 | unsigned long any = 0; | 1269 | unsigned long any = 0; |
1275 | 1270 | ||
1276 | r = -EINVAL; | 1271 | r = -EINVAL; |
1277 | if (log->slot >= KVM_MEMORY_SLOTS) | 1272 | if (log->slot >= KVM_MEMORY_SLOTS) |
1278 | goto out; | 1273 | goto out; |
1279 | 1274 | ||
1280 | memslot = &kvm->memslots[log->slot]; | 1275 | memslot = &kvm->memslots[log->slot]; |
1281 | r = -ENOENT; | 1276 | r = -ENOENT; |
1282 | if (!memslot->dirty_bitmap) | 1277 | if (!memslot->dirty_bitmap) |
1283 | goto out; | 1278 | goto out; |
1284 | 1279 | ||
1285 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1280 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
1286 | 1281 | ||
1287 | for (i = 0; !any && i < n/sizeof(long); ++i) | 1282 | for (i = 0; !any && i < n/sizeof(long); ++i) |
1288 | any = memslot->dirty_bitmap[i]; | 1283 | any = memslot->dirty_bitmap[i]; |
1289 | 1284 | ||
1290 | r = -EFAULT; | 1285 | r = -EFAULT; |
1291 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | 1286 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) |
1292 | goto out; | 1287 | goto out; |
1293 | 1288 | ||
1294 | if (any) | 1289 | if (any) |
1295 | *is_dirty = 1; | 1290 | *is_dirty = 1; |
1296 | 1291 | ||
1297 | r = 0; | 1292 | r = 0; |
1298 | out: | 1293 | out: |
1299 | return r; | 1294 | return r; |
1300 | } | 1295 | } |
1301 | 1296 | ||
1302 | int is_error_page(struct page *page) | 1297 | int is_error_page(struct page *page) |
1303 | { | 1298 | { |
1304 | return page == bad_page; | 1299 | return page == bad_page; |
1305 | } | 1300 | } |
1306 | EXPORT_SYMBOL_GPL(is_error_page); | 1301 | EXPORT_SYMBOL_GPL(is_error_page); |
1307 | 1302 | ||
1308 | int is_error_pfn(pfn_t pfn) | 1303 | int is_error_pfn(pfn_t pfn) |
1309 | { | 1304 | { |
1310 | return pfn == bad_pfn; | 1305 | return pfn == bad_pfn; |
1311 | } | 1306 | } |
1312 | EXPORT_SYMBOL_GPL(is_error_pfn); | 1307 | EXPORT_SYMBOL_GPL(is_error_pfn); |
1313 | 1308 | ||
1314 | static inline unsigned long bad_hva(void) | 1309 | static inline unsigned long bad_hva(void) |
1315 | { | 1310 | { |
1316 | return PAGE_OFFSET; | 1311 | return PAGE_OFFSET; |
1317 | } | 1312 | } |
1318 | 1313 | ||
1319 | int kvm_is_error_hva(unsigned long addr) | 1314 | int kvm_is_error_hva(unsigned long addr) |
1320 | { | 1315 | { |
1321 | return addr == bad_hva(); | 1316 | return addr == bad_hva(); |
1322 | } | 1317 | } |
1323 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); | 1318 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); |
1324 | 1319 | ||
1325 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) | 1320 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) |
1326 | { | 1321 | { |
1327 | int i; | 1322 | int i; |
1328 | 1323 | ||
1329 | for (i = 0; i < kvm->nmemslots; ++i) { | 1324 | for (i = 0; i < kvm->nmemslots; ++i) { |
1330 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | 1325 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; |
1331 | 1326 | ||
1332 | if (gfn >= memslot->base_gfn | 1327 | if (gfn >= memslot->base_gfn |
1333 | && gfn < memslot->base_gfn + memslot->npages) | 1328 | && gfn < memslot->base_gfn + memslot->npages) |
1334 | return memslot; | 1329 | return memslot; |
1335 | } | 1330 | } |
1336 | return NULL; | 1331 | return NULL; |
1337 | } | 1332 | } |
1338 | EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); | 1333 | EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); |
1339 | 1334 | ||
1340 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | 1335 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
1341 | { | 1336 | { |
1342 | gfn = unalias_gfn(kvm, gfn); | 1337 | gfn = unalias_gfn(kvm, gfn); |
1343 | return gfn_to_memslot_unaliased(kvm, gfn); | 1338 | return gfn_to_memslot_unaliased(kvm, gfn); |
1344 | } | 1339 | } |
1345 | 1340 | ||
1346 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | 1341 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
1347 | { | 1342 | { |
1348 | int i; | 1343 | int i; |
1349 | 1344 | ||
1350 | gfn = unalias_gfn(kvm, gfn); | 1345 | gfn = unalias_gfn(kvm, gfn); |
1351 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 1346 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
1352 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; | 1347 | struct kvm_memory_slot *memslot = &kvm->memslots[i]; |
1353 | 1348 | ||
1354 | if (gfn >= memslot->base_gfn | 1349 | if (gfn >= memslot->base_gfn |
1355 | && gfn < memslot->base_gfn + memslot->npages) | 1350 | && gfn < memslot->base_gfn + memslot->npages) |
1356 | return 1; | 1351 | return 1; |
1357 | } | 1352 | } |
1358 | return 0; | 1353 | return 0; |
1359 | } | 1354 | } |
1360 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); | 1355 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
1361 | 1356 | ||
1362 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | 1357 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
1363 | { | 1358 | { |
1364 | struct kvm_memory_slot *slot; | 1359 | struct kvm_memory_slot *slot; |
1365 | 1360 | ||
1366 | gfn = unalias_gfn(kvm, gfn); | 1361 | gfn = unalias_gfn(kvm, gfn); |
1367 | slot = gfn_to_memslot_unaliased(kvm, gfn); | 1362 | slot = gfn_to_memslot_unaliased(kvm, gfn); |
1368 | if (!slot) | 1363 | if (!slot) |
1369 | return bad_hva(); | 1364 | return bad_hva(); |
1370 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); | 1365 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); |
1371 | } | 1366 | } |
1372 | EXPORT_SYMBOL_GPL(gfn_to_hva); | 1367 | EXPORT_SYMBOL_GPL(gfn_to_hva); |
1373 | 1368 | ||
1374 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | 1369 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
1375 | { | 1370 | { |
1376 | struct page *page[1]; | 1371 | struct page *page[1]; |
1377 | unsigned long addr; | 1372 | unsigned long addr; |
1378 | int npages; | 1373 | int npages; |
1379 | pfn_t pfn; | 1374 | pfn_t pfn; |
1380 | 1375 | ||
1381 | might_sleep(); | 1376 | might_sleep(); |
1382 | 1377 | ||
1383 | addr = gfn_to_hva(kvm, gfn); | 1378 | addr = gfn_to_hva(kvm, gfn); |
1384 | if (kvm_is_error_hva(addr)) { | 1379 | if (kvm_is_error_hva(addr)) { |
1385 | get_page(bad_page); | 1380 | get_page(bad_page); |
1386 | return page_to_pfn(bad_page); | 1381 | return page_to_pfn(bad_page); |
1387 | } | 1382 | } |
1388 | 1383 | ||
1389 | npages = get_user_pages_fast(addr, 1, 1, page); | 1384 | npages = get_user_pages_fast(addr, 1, 1, page); |
1390 | 1385 | ||
1391 | if (unlikely(npages != 1)) { | 1386 | if (unlikely(npages != 1)) { |
1392 | struct vm_area_struct *vma; | 1387 | struct vm_area_struct *vma; |
1393 | 1388 | ||
1394 | down_read(¤t->mm->mmap_sem); | 1389 | down_read(¤t->mm->mmap_sem); |
1395 | vma = find_vma(current->mm, addr); | 1390 | vma = find_vma(current->mm, addr); |
1396 | 1391 | ||
1397 | if (vma == NULL || addr < vma->vm_start || | 1392 | if (vma == NULL || addr < vma->vm_start || |
1398 | !(vma->vm_flags & VM_PFNMAP)) { | 1393 | !(vma->vm_flags & VM_PFNMAP)) { |
1399 | up_read(¤t->mm->mmap_sem); | 1394 | up_read(¤t->mm->mmap_sem); |
1400 | get_page(bad_page); | 1395 | get_page(bad_page); |
1401 | return page_to_pfn(bad_page); | 1396 | return page_to_pfn(bad_page); |
1402 | } | 1397 | } |
1403 | 1398 | ||
1404 | pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 1399 | pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
1405 | up_read(¤t->mm->mmap_sem); | 1400 | up_read(¤t->mm->mmap_sem); |
1406 | BUG_ON(!kvm_is_mmio_pfn(pfn)); | 1401 | BUG_ON(!kvm_is_mmio_pfn(pfn)); |
1407 | } else | 1402 | } else |
1408 | pfn = page_to_pfn(page[0]); | 1403 | pfn = page_to_pfn(page[0]); |
1409 | 1404 | ||
1410 | return pfn; | 1405 | return pfn; |
1411 | } | 1406 | } |
1412 | 1407 | ||
1413 | EXPORT_SYMBOL_GPL(gfn_to_pfn); | 1408 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
1414 | 1409 | ||
1415 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | 1410 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
1416 | { | 1411 | { |
1417 | pfn_t pfn; | 1412 | pfn_t pfn; |
1418 | 1413 | ||
1419 | pfn = gfn_to_pfn(kvm, gfn); | 1414 | pfn = gfn_to_pfn(kvm, gfn); |
1420 | if (!kvm_is_mmio_pfn(pfn)) | 1415 | if (!kvm_is_mmio_pfn(pfn)) |
1421 | return pfn_to_page(pfn); | 1416 | return pfn_to_page(pfn); |
1422 | 1417 | ||
1423 | WARN_ON(kvm_is_mmio_pfn(pfn)); | 1418 | WARN_ON(kvm_is_mmio_pfn(pfn)); |
1424 | 1419 | ||
1425 | get_page(bad_page); | 1420 | get_page(bad_page); |
1426 | return bad_page; | 1421 | return bad_page; |
1427 | } | 1422 | } |
1428 | 1423 | ||
1429 | EXPORT_SYMBOL_GPL(gfn_to_page); | 1424 | EXPORT_SYMBOL_GPL(gfn_to_page); |
1430 | 1425 | ||
1431 | void kvm_release_page_clean(struct page *page) | 1426 | void kvm_release_page_clean(struct page *page) |
1432 | { | 1427 | { |
1433 | kvm_release_pfn_clean(page_to_pfn(page)); | 1428 | kvm_release_pfn_clean(page_to_pfn(page)); |
1434 | } | 1429 | } |
1435 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); | 1430 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
1436 | 1431 | ||
1437 | void kvm_release_pfn_clean(pfn_t pfn) | 1432 | void kvm_release_pfn_clean(pfn_t pfn) |
1438 | { | 1433 | { |
1439 | if (!kvm_is_mmio_pfn(pfn)) | 1434 | if (!kvm_is_mmio_pfn(pfn)) |
1440 | put_page(pfn_to_page(pfn)); | 1435 | put_page(pfn_to_page(pfn)); |
1441 | } | 1436 | } |
1442 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | 1437 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
1443 | 1438 | ||
1444 | void kvm_release_page_dirty(struct page *page) | 1439 | void kvm_release_page_dirty(struct page *page) |
1445 | { | 1440 | { |
1446 | kvm_release_pfn_dirty(page_to_pfn(page)); | 1441 | kvm_release_pfn_dirty(page_to_pfn(page)); |
1447 | } | 1442 | } |
1448 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | 1443 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
1449 | 1444 | ||
1450 | void kvm_release_pfn_dirty(pfn_t pfn) | 1445 | void kvm_release_pfn_dirty(pfn_t pfn) |
1451 | { | 1446 | { |
1452 | kvm_set_pfn_dirty(pfn); | 1447 | kvm_set_pfn_dirty(pfn); |
1453 | kvm_release_pfn_clean(pfn); | 1448 | kvm_release_pfn_clean(pfn); |
1454 | } | 1449 | } |
1455 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); | 1450 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); |
1456 | 1451 | ||
1457 | void kvm_set_page_dirty(struct page *page) | 1452 | void kvm_set_page_dirty(struct page *page) |
1458 | { | 1453 | { |
1459 | kvm_set_pfn_dirty(page_to_pfn(page)); | 1454 | kvm_set_pfn_dirty(page_to_pfn(page)); |
1460 | } | 1455 | } |
1461 | EXPORT_SYMBOL_GPL(kvm_set_page_dirty); | 1456 | EXPORT_SYMBOL_GPL(kvm_set_page_dirty); |
1462 | 1457 | ||
1463 | void kvm_set_pfn_dirty(pfn_t pfn) | 1458 | void kvm_set_pfn_dirty(pfn_t pfn) |
1464 | { | 1459 | { |
1465 | if (!kvm_is_mmio_pfn(pfn)) { | 1460 | if (!kvm_is_mmio_pfn(pfn)) { |
1466 | struct page *page = pfn_to_page(pfn); | 1461 | struct page *page = pfn_to_page(pfn); |
1467 | if (!PageReserved(page)) | 1462 | if (!PageReserved(page)) |
1468 | SetPageDirty(page); | 1463 | SetPageDirty(page); |
1469 | } | 1464 | } |
1470 | } | 1465 | } |
1471 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); | 1466 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
1472 | 1467 | ||
1473 | void kvm_set_pfn_accessed(pfn_t pfn) | 1468 | void kvm_set_pfn_accessed(pfn_t pfn) |
1474 | { | 1469 | { |
1475 | if (!kvm_is_mmio_pfn(pfn)) | 1470 | if (!kvm_is_mmio_pfn(pfn)) |
1476 | mark_page_accessed(pfn_to_page(pfn)); | 1471 | mark_page_accessed(pfn_to_page(pfn)); |
1477 | } | 1472 | } |
1478 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); | 1473 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
1479 | 1474 | ||
1480 | void kvm_get_pfn(pfn_t pfn) | 1475 | void kvm_get_pfn(pfn_t pfn) |
1481 | { | 1476 | { |
1482 | if (!kvm_is_mmio_pfn(pfn)) | 1477 | if (!kvm_is_mmio_pfn(pfn)) |
1483 | get_page(pfn_to_page(pfn)); | 1478 | get_page(pfn_to_page(pfn)); |
1484 | } | 1479 | } |
1485 | EXPORT_SYMBOL_GPL(kvm_get_pfn); | 1480 | EXPORT_SYMBOL_GPL(kvm_get_pfn); |
1486 | 1481 | ||
1487 | static int next_segment(unsigned long len, int offset) | 1482 | static int next_segment(unsigned long len, int offset) |
1488 | { | 1483 | { |
1489 | if (len > PAGE_SIZE - offset) | 1484 | if (len > PAGE_SIZE - offset) |
1490 | return PAGE_SIZE - offset; | 1485 | return PAGE_SIZE - offset; |
1491 | else | 1486 | else |
1492 | return len; | 1487 | return len; |
1493 | } | 1488 | } |
1494 | 1489 | ||
1495 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | 1490 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
1496 | int len) | 1491 | int len) |
1497 | { | 1492 | { |
1498 | int r; | 1493 | int r; |
1499 | unsigned long addr; | 1494 | unsigned long addr; |
1500 | 1495 | ||
1501 | addr = gfn_to_hva(kvm, gfn); | 1496 | addr = gfn_to_hva(kvm, gfn); |
1502 | if (kvm_is_error_hva(addr)) | 1497 | if (kvm_is_error_hva(addr)) |
1503 | return -EFAULT; | 1498 | return -EFAULT; |
1504 | r = copy_from_user(data, (void __user *)addr + offset, len); | 1499 | r = copy_from_user(data, (void __user *)addr + offset, len); |
1505 | if (r) | 1500 | if (r) |
1506 | return -EFAULT; | 1501 | return -EFAULT; |
1507 | return 0; | 1502 | return 0; |
1508 | } | 1503 | } |
1509 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); | 1504 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); |
1510 | 1505 | ||
1511 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) | 1506 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) |
1512 | { | 1507 | { |
1513 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1508 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1514 | int seg; | 1509 | int seg; |
1515 | int offset = offset_in_page(gpa); | 1510 | int offset = offset_in_page(gpa); |
1516 | int ret; | 1511 | int ret; |
1517 | 1512 | ||
1518 | while ((seg = next_segment(len, offset)) != 0) { | 1513 | while ((seg = next_segment(len, offset)) != 0) { |
1519 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); | 1514 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); |
1520 | if (ret < 0) | 1515 | if (ret < 0) |
1521 | return ret; | 1516 | return ret; |
1522 | offset = 0; | 1517 | offset = 0; |
1523 | len -= seg; | 1518 | len -= seg; |
1524 | data += seg; | 1519 | data += seg; |
1525 | ++gfn; | 1520 | ++gfn; |
1526 | } | 1521 | } |
1527 | return 0; | 1522 | return 0; |
1528 | } | 1523 | } |
1529 | EXPORT_SYMBOL_GPL(kvm_read_guest); | 1524 | EXPORT_SYMBOL_GPL(kvm_read_guest); |
1530 | 1525 | ||
1531 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | 1526 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
1532 | unsigned long len) | 1527 | unsigned long len) |
1533 | { | 1528 | { |
1534 | int r; | 1529 | int r; |
1535 | unsigned long addr; | 1530 | unsigned long addr; |
1536 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1531 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1537 | int offset = offset_in_page(gpa); | 1532 | int offset = offset_in_page(gpa); |
1538 | 1533 | ||
1539 | addr = gfn_to_hva(kvm, gfn); | 1534 | addr = gfn_to_hva(kvm, gfn); |
1540 | if (kvm_is_error_hva(addr)) | 1535 | if (kvm_is_error_hva(addr)) |
1541 | return -EFAULT; | 1536 | return -EFAULT; |
1542 | pagefault_disable(); | 1537 | pagefault_disable(); |
1543 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); | 1538 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); |
1544 | pagefault_enable(); | 1539 | pagefault_enable(); |
1545 | if (r) | 1540 | if (r) |
1546 | return -EFAULT; | 1541 | return -EFAULT; |
1547 | return 0; | 1542 | return 0; |
1548 | } | 1543 | } |
1549 | EXPORT_SYMBOL(kvm_read_guest_atomic); | 1544 | EXPORT_SYMBOL(kvm_read_guest_atomic); |
1550 | 1545 | ||
1551 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | 1546 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
1552 | int offset, int len) | 1547 | int offset, int len) |
1553 | { | 1548 | { |
1554 | int r; | 1549 | int r; |
1555 | unsigned long addr; | 1550 | unsigned long addr; |
1556 | 1551 | ||
1557 | addr = gfn_to_hva(kvm, gfn); | 1552 | addr = gfn_to_hva(kvm, gfn); |
1558 | if (kvm_is_error_hva(addr)) | 1553 | if (kvm_is_error_hva(addr)) |
1559 | return -EFAULT; | 1554 | return -EFAULT; |
1560 | r = copy_to_user((void __user *)addr + offset, data, len); | 1555 | r = copy_to_user((void __user *)addr + offset, data, len); |
1561 | if (r) | 1556 | if (r) |
1562 | return -EFAULT; | 1557 | return -EFAULT; |
1563 | mark_page_dirty(kvm, gfn); | 1558 | mark_page_dirty(kvm, gfn); |
1564 | return 0; | 1559 | return 0; |
1565 | } | 1560 | } |
1566 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); | 1561 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); |
1567 | 1562 | ||
1568 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | 1563 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
1569 | unsigned long len) | 1564 | unsigned long len) |
1570 | { | 1565 | { |
1571 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1566 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1572 | int seg; | 1567 | int seg; |
1573 | int offset = offset_in_page(gpa); | 1568 | int offset = offset_in_page(gpa); |
1574 | int ret; | 1569 | int ret; |
1575 | 1570 | ||
1576 | while ((seg = next_segment(len, offset)) != 0) { | 1571 | while ((seg = next_segment(len, offset)) != 0) { |
1577 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); | 1572 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); |
1578 | if (ret < 0) | 1573 | if (ret < 0) |
1579 | return ret; | 1574 | return ret; |
1580 | offset = 0; | 1575 | offset = 0; |
1581 | len -= seg; | 1576 | len -= seg; |
1582 | data += seg; | 1577 | data += seg; |
1583 | ++gfn; | 1578 | ++gfn; |
1584 | } | 1579 | } |
1585 | return 0; | 1580 | return 0; |
1586 | } | 1581 | } |
1587 | 1582 | ||
1588 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) | 1583 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) |
1589 | { | 1584 | { |
1590 | return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); | 1585 | return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); |
1591 | } | 1586 | } |
1592 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); | 1587 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); |
1593 | 1588 | ||
1594 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | 1589 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) |
1595 | { | 1590 | { |
1596 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1591 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1597 | int seg; | 1592 | int seg; |
1598 | int offset = offset_in_page(gpa); | 1593 | int offset = offset_in_page(gpa); |
1599 | int ret; | 1594 | int ret; |
1600 | 1595 | ||
1601 | while ((seg = next_segment(len, offset)) != 0) { | 1596 | while ((seg = next_segment(len, offset)) != 0) { |
1602 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); | 1597 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); |
1603 | if (ret < 0) | 1598 | if (ret < 0) |
1604 | return ret; | 1599 | return ret; |
1605 | offset = 0; | 1600 | offset = 0; |
1606 | len -= seg; | 1601 | len -= seg; |
1607 | ++gfn; | 1602 | ++gfn; |
1608 | } | 1603 | } |
1609 | return 0; | 1604 | return 0; |
1610 | } | 1605 | } |
1611 | EXPORT_SYMBOL_GPL(kvm_clear_guest); | 1606 | EXPORT_SYMBOL_GPL(kvm_clear_guest); |
1612 | 1607 | ||
1613 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | 1608 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
1614 | { | 1609 | { |
1615 | struct kvm_memory_slot *memslot; | 1610 | struct kvm_memory_slot *memslot; |
1616 | 1611 | ||
1617 | gfn = unalias_gfn(kvm, gfn); | 1612 | gfn = unalias_gfn(kvm, gfn); |
1618 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1613 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1619 | if (memslot && memslot->dirty_bitmap) { | 1614 | if (memslot && memslot->dirty_bitmap) { |
1620 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1615 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1621 | 1616 | ||
1622 | /* avoid RMW */ | 1617 | /* avoid RMW */ |
1623 | if (!test_bit(rel_gfn, memslot->dirty_bitmap)) | 1618 | if (!test_bit(rel_gfn, memslot->dirty_bitmap)) |
1624 | set_bit(rel_gfn, memslot->dirty_bitmap); | 1619 | set_bit(rel_gfn, memslot->dirty_bitmap); |
1625 | } | 1620 | } |
1626 | } | 1621 | } |
1627 | 1622 | ||
1628 | /* | 1623 | /* |
1629 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. | 1624 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. |
1630 | */ | 1625 | */ |
1631 | void kvm_vcpu_block(struct kvm_vcpu *vcpu) | 1626 | void kvm_vcpu_block(struct kvm_vcpu *vcpu) |
1632 | { | 1627 | { |
1633 | DEFINE_WAIT(wait); | 1628 | DEFINE_WAIT(wait); |
1634 | 1629 | ||
1635 | for (;;) { | 1630 | for (;;) { |
1636 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); | 1631 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); |
1637 | 1632 | ||
1638 | if ((kvm_arch_interrupt_allowed(vcpu) && | 1633 | if ((kvm_arch_interrupt_allowed(vcpu) && |
1639 | kvm_cpu_has_interrupt(vcpu)) || | 1634 | kvm_cpu_has_interrupt(vcpu)) || |
1640 | kvm_arch_vcpu_runnable(vcpu)) { | 1635 | kvm_arch_vcpu_runnable(vcpu)) { |
1641 | set_bit(KVM_REQ_UNHALT, &vcpu->requests); | 1636 | set_bit(KVM_REQ_UNHALT, &vcpu->requests); |
1642 | break; | 1637 | break; |
1643 | } | 1638 | } |
1644 | if (kvm_cpu_has_pending_timer(vcpu)) | 1639 | if (kvm_cpu_has_pending_timer(vcpu)) |
1645 | break; | 1640 | break; |
1646 | if (signal_pending(current)) | 1641 | if (signal_pending(current)) |
1647 | break; | 1642 | break; |
1648 | 1643 | ||
1649 | vcpu_put(vcpu); | 1644 | vcpu_put(vcpu); |
1650 | schedule(); | 1645 | schedule(); |
1651 | vcpu_load(vcpu); | 1646 | vcpu_load(vcpu); |
1652 | } | 1647 | } |
1653 | 1648 | ||
1654 | finish_wait(&vcpu->wq, &wait); | 1649 | finish_wait(&vcpu->wq, &wait); |
1655 | } | 1650 | } |
1656 | 1651 | ||
1657 | void kvm_resched(struct kvm_vcpu *vcpu) | 1652 | void kvm_resched(struct kvm_vcpu *vcpu) |
1658 | { | 1653 | { |
1659 | if (!need_resched()) | 1654 | if (!need_resched()) |
1660 | return; | 1655 | return; |
1661 | cond_resched(); | 1656 | cond_resched(); |
1662 | } | 1657 | } |
1663 | EXPORT_SYMBOL_GPL(kvm_resched); | 1658 | EXPORT_SYMBOL_GPL(kvm_resched); |
1664 | 1659 | ||
1665 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1660 | static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1666 | { | 1661 | { |
1667 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; | 1662 | struct kvm_vcpu *vcpu = vma->vm_file->private_data; |
1668 | struct page *page; | 1663 | struct page *page; |
1669 | 1664 | ||
1670 | if (vmf->pgoff == 0) | 1665 | if (vmf->pgoff == 0) |
1671 | page = virt_to_page(vcpu->run); | 1666 | page = virt_to_page(vcpu->run); |
1672 | #ifdef CONFIG_X86 | 1667 | #ifdef CONFIG_X86 |
1673 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) | 1668 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) |
1674 | page = virt_to_page(vcpu->arch.pio_data); | 1669 | page = virt_to_page(vcpu->arch.pio_data); |
1675 | #endif | 1670 | #endif |
1676 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 1671 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
1677 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) | 1672 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) |
1678 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); | 1673 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); |
1679 | #endif | 1674 | #endif |
1680 | else | 1675 | else |
1681 | return VM_FAULT_SIGBUS; | 1676 | return VM_FAULT_SIGBUS; |
1682 | get_page(page); | 1677 | get_page(page); |
1683 | vmf->page = page; | 1678 | vmf->page = page; |
1684 | return 0; | 1679 | return 0; |
1685 | } | 1680 | } |
1686 | 1681 | ||
1687 | static struct vm_operations_struct kvm_vcpu_vm_ops = { | 1682 | static struct vm_operations_struct kvm_vcpu_vm_ops = { |
1688 | .fault = kvm_vcpu_fault, | 1683 | .fault = kvm_vcpu_fault, |
1689 | }; | 1684 | }; |
1690 | 1685 | ||
1691 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) | 1686 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) |
1692 | { | 1687 | { |
1693 | vma->vm_ops = &kvm_vcpu_vm_ops; | 1688 | vma->vm_ops = &kvm_vcpu_vm_ops; |
1694 | return 0; | 1689 | return 0; |
1695 | } | 1690 | } |
1696 | 1691 | ||
1697 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) | 1692 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) |
1698 | { | 1693 | { |
1699 | struct kvm_vcpu *vcpu = filp->private_data; | 1694 | struct kvm_vcpu *vcpu = filp->private_data; |
1700 | 1695 | ||
1701 | kvm_put_kvm(vcpu->kvm); | 1696 | kvm_put_kvm(vcpu->kvm); |
1702 | return 0; | 1697 | return 0; |
1703 | } | 1698 | } |
1704 | 1699 | ||
1705 | static struct file_operations kvm_vcpu_fops = { | 1700 | static struct file_operations kvm_vcpu_fops = { |
1706 | .release = kvm_vcpu_release, | 1701 | .release = kvm_vcpu_release, |
1707 | .unlocked_ioctl = kvm_vcpu_ioctl, | 1702 | .unlocked_ioctl = kvm_vcpu_ioctl, |
1708 | .compat_ioctl = kvm_vcpu_ioctl, | 1703 | .compat_ioctl = kvm_vcpu_ioctl, |
1709 | .mmap = kvm_vcpu_mmap, | 1704 | .mmap = kvm_vcpu_mmap, |
1710 | }; | 1705 | }; |
1711 | 1706 | ||
1712 | /* | 1707 | /* |
1713 | * Allocates an inode for the vcpu. | 1708 | * Allocates an inode for the vcpu. |
1714 | */ | 1709 | */ |
1715 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) | 1710 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) |
1716 | { | 1711 | { |
1717 | int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); | 1712 | return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); |
1718 | if (fd < 0) | ||
1719 | kvm_put_kvm(vcpu->kvm); | ||
1720 | return fd; | ||
1721 | } | 1713 | } |
1722 | 1714 | ||
1723 | /* | 1715 | /* |
1724 | * Creates some virtual cpus. Good luck creating more than one. | 1716 | * Creates some virtual cpus. Good luck creating more than one. |
1725 | */ | 1717 | */ |
1726 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | 1718 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) |
1727 | { | 1719 | { |
1728 | int r; | 1720 | int r; |
1729 | struct kvm_vcpu *vcpu; | 1721 | struct kvm_vcpu *vcpu; |
1730 | 1722 | ||
1731 | if (!valid_vcpu(n)) | 1723 | vcpu = kvm_arch_vcpu_create(kvm, id); |
1732 | return -EINVAL; | ||
1733 | |||
1734 | vcpu = kvm_arch_vcpu_create(kvm, n); | ||
1735 | if (IS_ERR(vcpu)) | 1724 | if (IS_ERR(vcpu)) |
1736 | return PTR_ERR(vcpu); | 1725 | return PTR_ERR(vcpu); |
1737 | 1726 | ||
1738 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); | 1727 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); |
1739 | 1728 | ||
1740 | r = kvm_arch_vcpu_setup(vcpu); | 1729 | r = kvm_arch_vcpu_setup(vcpu); |
1741 | if (r) | 1730 | if (r) |
1742 | return r; | 1731 | return r; |
1743 | 1732 | ||
1744 | mutex_lock(&kvm->lock); | 1733 | mutex_lock(&kvm->lock); |
1745 | if (kvm->vcpus[n]) { | 1734 | if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { |
1746 | r = -EEXIST; | 1735 | r = -EINVAL; |
1747 | goto vcpu_destroy; | 1736 | goto vcpu_destroy; |
1748 | } | 1737 | } |
1749 | kvm->vcpus[n] = vcpu; | ||
1750 | if (n == 0) | ||
1751 | kvm->bsp_vcpu = vcpu; | ||
1752 | mutex_unlock(&kvm->lock); | ||
1753 | 1738 | ||
1739 | for (r = 0; r < atomic_read(&kvm->online_vcpus); r++) | ||
1740 | if (kvm->vcpus[r]->vcpu_id == id) { | ||
1741 | r = -EEXIST; | ||
1742 | goto vcpu_destroy; | ||
1743 | } | ||
1744 | |||
1745 | BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); | ||
1746 | |||
1754 | /* Now it's all set up, let userspace reach it */ | 1747 | /* Now it's all set up, let userspace reach it */ |
1755 | kvm_get_kvm(kvm); | 1748 | kvm_get_kvm(kvm); |
1756 | r = create_vcpu_fd(vcpu); | 1749 | r = create_vcpu_fd(vcpu); |
1757 | if (r < 0) | 1750 | if (r < 0) { |
1758 | goto unlink; | 1751 | kvm_put_kvm(kvm); |
1752 | goto vcpu_destroy; | ||
1753 | } | ||
1754 | |||
1755 | kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; | ||
1756 | smp_wmb(); | ||
1757 | atomic_inc(&kvm->online_vcpus); | ||
1758 | |||
1759 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
1760 | if (kvm->bsp_vcpu_id == id) | ||
1761 | kvm->bsp_vcpu = vcpu; | ||
1762 | #endif | ||
1763 | mutex_unlock(&kvm->lock); | ||
1759 | return r; | 1764 | return r; |
1760 | 1765 | ||
1761 | unlink: | ||
1762 | mutex_lock(&kvm->lock); | ||
1763 | kvm->vcpus[n] = NULL; | ||
1764 | vcpu_destroy: | 1766 | vcpu_destroy: |
1765 | mutex_unlock(&kvm->lock); | 1767 | mutex_unlock(&kvm->lock); |
1766 | kvm_arch_vcpu_destroy(vcpu); | 1768 | kvm_arch_vcpu_destroy(vcpu); |
1767 | return r; | 1769 | return r; |
1768 | } | 1770 | } |
1769 | 1771 | ||
1770 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) | 1772 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) |
1771 | { | 1773 | { |
1772 | if (sigset) { | 1774 | if (sigset) { |
1773 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | 1775 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
1774 | vcpu->sigset_active = 1; | 1776 | vcpu->sigset_active = 1; |
1775 | vcpu->sigset = *sigset; | 1777 | vcpu->sigset = *sigset; |
1776 | } else | 1778 | } else |
1777 | vcpu->sigset_active = 0; | 1779 | vcpu->sigset_active = 0; |
1778 | return 0; | 1780 | return 0; |
1779 | } | 1781 | } |
1780 | 1782 | ||
1781 | #ifdef __KVM_HAVE_MSIX | 1783 | #ifdef __KVM_HAVE_MSIX |
1782 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | 1784 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, |
1783 | struct kvm_assigned_msix_nr *entry_nr) | 1785 | struct kvm_assigned_msix_nr *entry_nr) |
1784 | { | 1786 | { |
1785 | int r = 0; | 1787 | int r = 0; |
1786 | struct kvm_assigned_dev_kernel *adev; | 1788 | struct kvm_assigned_dev_kernel *adev; |
1787 | 1789 | ||
1788 | mutex_lock(&kvm->lock); | 1790 | mutex_lock(&kvm->lock); |
1789 | 1791 | ||
1790 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | 1792 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, |
1791 | entry_nr->assigned_dev_id); | 1793 | entry_nr->assigned_dev_id); |
1792 | if (!adev) { | 1794 | if (!adev) { |
1793 | r = -EINVAL; | 1795 | r = -EINVAL; |
1794 | goto msix_nr_out; | 1796 | goto msix_nr_out; |
1795 | } | 1797 | } |
1796 | 1798 | ||
1797 | if (adev->entries_nr == 0) { | 1799 | if (adev->entries_nr == 0) { |
1798 | adev->entries_nr = entry_nr->entry_nr; | 1800 | adev->entries_nr = entry_nr->entry_nr; |
1799 | if (adev->entries_nr == 0 || | 1801 | if (adev->entries_nr == 0 || |
1800 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | 1802 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { |
1801 | r = -EINVAL; | 1803 | r = -EINVAL; |
1802 | goto msix_nr_out; | 1804 | goto msix_nr_out; |
1803 | } | 1805 | } |
1804 | 1806 | ||
1805 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | 1807 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * |
1806 | entry_nr->entry_nr, | 1808 | entry_nr->entry_nr, |
1807 | GFP_KERNEL); | 1809 | GFP_KERNEL); |
1808 | if (!adev->host_msix_entries) { | 1810 | if (!adev->host_msix_entries) { |
1809 | r = -ENOMEM; | 1811 | r = -ENOMEM; |
1810 | goto msix_nr_out; | 1812 | goto msix_nr_out; |
1811 | } | 1813 | } |
1812 | adev->guest_msix_entries = kzalloc( | 1814 | adev->guest_msix_entries = kzalloc( |
1813 | sizeof(struct kvm_guest_msix_entry) * | 1815 | sizeof(struct kvm_guest_msix_entry) * |
1814 | entry_nr->entry_nr, GFP_KERNEL); | 1816 | entry_nr->entry_nr, GFP_KERNEL); |
1815 | if (!adev->guest_msix_entries) { | 1817 | if (!adev->guest_msix_entries) { |
1816 | kfree(adev->host_msix_entries); | 1818 | kfree(adev->host_msix_entries); |
1817 | r = -ENOMEM; | 1819 | r = -ENOMEM; |
1818 | goto msix_nr_out; | 1820 | goto msix_nr_out; |
1819 | } | 1821 | } |
1820 | } else /* Not allowed set MSI-X number twice */ | 1822 | } else /* Not allowed set MSI-X number twice */ |
1821 | r = -EINVAL; | 1823 | r = -EINVAL; |
1822 | msix_nr_out: | 1824 | msix_nr_out: |
1823 | mutex_unlock(&kvm->lock); | 1825 | mutex_unlock(&kvm->lock); |
1824 | return r; | 1826 | return r; |
1825 | } | 1827 | } |
1826 | 1828 | ||
1827 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | 1829 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, |
1828 | struct kvm_assigned_msix_entry *entry) | 1830 | struct kvm_assigned_msix_entry *entry) |
1829 | { | 1831 | { |
1830 | int r = 0, i; | 1832 | int r = 0, i; |
1831 | struct kvm_assigned_dev_kernel *adev; | 1833 | struct kvm_assigned_dev_kernel *adev; |
1832 | 1834 | ||
1833 | mutex_lock(&kvm->lock); | 1835 | mutex_lock(&kvm->lock); |
1834 | 1836 | ||
1835 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | 1837 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, |
1836 | entry->assigned_dev_id); | 1838 | entry->assigned_dev_id); |
1837 | 1839 | ||
1838 | if (!adev) { | 1840 | if (!adev) { |
1839 | r = -EINVAL; | 1841 | r = -EINVAL; |
1840 | goto msix_entry_out; | 1842 | goto msix_entry_out; |
1841 | } | 1843 | } |
1842 | 1844 | ||
1843 | for (i = 0; i < adev->entries_nr; i++) | 1845 | for (i = 0; i < adev->entries_nr; i++) |
1844 | if (adev->guest_msix_entries[i].vector == 0 || | 1846 | if (adev->guest_msix_entries[i].vector == 0 || |
1845 | adev->guest_msix_entries[i].entry == entry->entry) { | 1847 | adev->guest_msix_entries[i].entry == entry->entry) { |
1846 | adev->guest_msix_entries[i].entry = entry->entry; | 1848 | adev->guest_msix_entries[i].entry = entry->entry; |
1847 | adev->guest_msix_entries[i].vector = entry->gsi; | 1849 | adev->guest_msix_entries[i].vector = entry->gsi; |
1848 | adev->host_msix_entries[i].entry = entry->entry; | 1850 | adev->host_msix_entries[i].entry = entry->entry; |
1849 | break; | 1851 | break; |
1850 | } | 1852 | } |
1851 | if (i == adev->entries_nr) { | 1853 | if (i == adev->entries_nr) { |
1852 | r = -ENOSPC; | 1854 | r = -ENOSPC; |
1853 | goto msix_entry_out; | 1855 | goto msix_entry_out; |
1854 | } | 1856 | } |
1855 | 1857 | ||
1856 | msix_entry_out: | 1858 | msix_entry_out: |
1857 | mutex_unlock(&kvm->lock); | 1859 | mutex_unlock(&kvm->lock); |
1858 | 1860 | ||
1859 | return r; | 1861 | return r; |
1860 | } | 1862 | } |
1861 | #endif | 1863 | #endif |
1862 | 1864 | ||
1863 | static long kvm_vcpu_ioctl(struct file *filp, | 1865 | static long kvm_vcpu_ioctl(struct file *filp, |
1864 | unsigned int ioctl, unsigned long arg) | 1866 | unsigned int ioctl, unsigned long arg) |
1865 | { | 1867 | { |
1866 | struct kvm_vcpu *vcpu = filp->private_data; | 1868 | struct kvm_vcpu *vcpu = filp->private_data; |
1867 | void __user *argp = (void __user *)arg; | 1869 | void __user *argp = (void __user *)arg; |
1868 | int r; | 1870 | int r; |
1869 | struct kvm_fpu *fpu = NULL; | 1871 | struct kvm_fpu *fpu = NULL; |
1870 | struct kvm_sregs *kvm_sregs = NULL; | 1872 | struct kvm_sregs *kvm_sregs = NULL; |
1871 | 1873 | ||
1872 | if (vcpu->kvm->mm != current->mm) | 1874 | if (vcpu->kvm->mm != current->mm) |
1873 | return -EIO; | 1875 | return -EIO; |
1874 | switch (ioctl) { | 1876 | switch (ioctl) { |
1875 | case KVM_RUN: | 1877 | case KVM_RUN: |
1876 | r = -EINVAL; | 1878 | r = -EINVAL; |
1877 | if (arg) | 1879 | if (arg) |
1878 | goto out; | 1880 | goto out; |
1879 | r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); | 1881 | r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); |
1880 | break; | 1882 | break; |
1881 | case KVM_GET_REGS: { | 1883 | case KVM_GET_REGS: { |
1882 | struct kvm_regs *kvm_regs; | 1884 | struct kvm_regs *kvm_regs; |
1883 | 1885 | ||
1884 | r = -ENOMEM; | 1886 | r = -ENOMEM; |
1885 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); | 1887 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); |
1886 | if (!kvm_regs) | 1888 | if (!kvm_regs) |
1887 | goto out; | 1889 | goto out; |
1888 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); | 1890 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); |
1889 | if (r) | 1891 | if (r) |
1890 | goto out_free1; | 1892 | goto out_free1; |
1891 | r = -EFAULT; | 1893 | r = -EFAULT; |
1892 | if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) | 1894 | if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) |
1893 | goto out_free1; | 1895 | goto out_free1; |
1894 | r = 0; | 1896 | r = 0; |
1895 | out_free1: | 1897 | out_free1: |
1896 | kfree(kvm_regs); | 1898 | kfree(kvm_regs); |
1897 | break; | 1899 | break; |
1898 | } | 1900 | } |
1899 | case KVM_SET_REGS: { | 1901 | case KVM_SET_REGS: { |
1900 | struct kvm_regs *kvm_regs; | 1902 | struct kvm_regs *kvm_regs; |
1901 | 1903 | ||
1902 | r = -ENOMEM; | 1904 | r = -ENOMEM; |
1903 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); | 1905 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); |
1904 | if (!kvm_regs) | 1906 | if (!kvm_regs) |
1905 | goto out; | 1907 | goto out; |
1906 | r = -EFAULT; | 1908 | r = -EFAULT; |
1907 | if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) | 1909 | if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) |
1908 | goto out_free2; | 1910 | goto out_free2; |
1909 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); | 1911 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); |
1910 | if (r) | 1912 | if (r) |
1911 | goto out_free2; | 1913 | goto out_free2; |
1912 | r = 0; | 1914 | r = 0; |
1913 | out_free2: | 1915 | out_free2: |
1914 | kfree(kvm_regs); | 1916 | kfree(kvm_regs); |
1915 | break; | 1917 | break; |
1916 | } | 1918 | } |
1917 | case KVM_GET_SREGS: { | 1919 | case KVM_GET_SREGS: { |
1918 | kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); | 1920 | kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); |
1919 | r = -ENOMEM; | 1921 | r = -ENOMEM; |
1920 | if (!kvm_sregs) | 1922 | if (!kvm_sregs) |
1921 | goto out; | 1923 | goto out; |
1922 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); | 1924 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); |
1923 | if (r) | 1925 | if (r) |
1924 | goto out; | 1926 | goto out; |
1925 | r = -EFAULT; | 1927 | r = -EFAULT; |
1926 | if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) | 1928 | if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) |
1927 | goto out; | 1929 | goto out; |
1928 | r = 0; | 1930 | r = 0; |
1929 | break; | 1931 | break; |
1930 | } | 1932 | } |
1931 | case KVM_SET_SREGS: { | 1933 | case KVM_SET_SREGS: { |
1932 | kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); | 1934 | kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); |
1933 | r = -ENOMEM; | 1935 | r = -ENOMEM; |
1934 | if (!kvm_sregs) | 1936 | if (!kvm_sregs) |
1935 | goto out; | 1937 | goto out; |
1936 | r = -EFAULT; | 1938 | r = -EFAULT; |
1937 | if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) | 1939 | if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) |
1938 | goto out; | 1940 | goto out; |
1939 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); | 1941 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); |
1940 | if (r) | 1942 | if (r) |
1941 | goto out; | 1943 | goto out; |
1942 | r = 0; | 1944 | r = 0; |
1943 | break; | 1945 | break; |
1944 | } | 1946 | } |
1945 | case KVM_GET_MP_STATE: { | 1947 | case KVM_GET_MP_STATE: { |
1946 | struct kvm_mp_state mp_state; | 1948 | struct kvm_mp_state mp_state; |
1947 | 1949 | ||
1948 | r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); | 1950 | r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); |
1949 | if (r) | 1951 | if (r) |
1950 | goto out; | 1952 | goto out; |
1951 | r = -EFAULT; | 1953 | r = -EFAULT; |
1952 | if (copy_to_user(argp, &mp_state, sizeof mp_state)) | 1954 | if (copy_to_user(argp, &mp_state, sizeof mp_state)) |
1953 | goto out; | 1955 | goto out; |
1954 | r = 0; | 1956 | r = 0; |
1955 | break; | 1957 | break; |
1956 | } | 1958 | } |
1957 | case KVM_SET_MP_STATE: { | 1959 | case KVM_SET_MP_STATE: { |
1958 | struct kvm_mp_state mp_state; | 1960 | struct kvm_mp_state mp_state; |
1959 | 1961 | ||
1960 | r = -EFAULT; | 1962 | r = -EFAULT; |
1961 | if (copy_from_user(&mp_state, argp, sizeof mp_state)) | 1963 | if (copy_from_user(&mp_state, argp, sizeof mp_state)) |
1962 | goto out; | 1964 | goto out; |
1963 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); | 1965 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); |
1964 | if (r) | 1966 | if (r) |
1965 | goto out; | 1967 | goto out; |
1966 | r = 0; | 1968 | r = 0; |
1967 | break; | 1969 | break; |
1968 | } | 1970 | } |
1969 | case KVM_TRANSLATE: { | 1971 | case KVM_TRANSLATE: { |
1970 | struct kvm_translation tr; | 1972 | struct kvm_translation tr; |
1971 | 1973 | ||
1972 | r = -EFAULT; | 1974 | r = -EFAULT; |
1973 | if (copy_from_user(&tr, argp, sizeof tr)) | 1975 | if (copy_from_user(&tr, argp, sizeof tr)) |
1974 | goto out; | 1976 | goto out; |
1975 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); | 1977 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); |
1976 | if (r) | 1978 | if (r) |
1977 | goto out; | 1979 | goto out; |
1978 | r = -EFAULT; | 1980 | r = -EFAULT; |
1979 | if (copy_to_user(argp, &tr, sizeof tr)) | 1981 | if (copy_to_user(argp, &tr, sizeof tr)) |
1980 | goto out; | 1982 | goto out; |
1981 | r = 0; | 1983 | r = 0; |
1982 | break; | 1984 | break; |
1983 | } | 1985 | } |
1984 | case KVM_SET_GUEST_DEBUG: { | 1986 | case KVM_SET_GUEST_DEBUG: { |
1985 | struct kvm_guest_debug dbg; | 1987 | struct kvm_guest_debug dbg; |
1986 | 1988 | ||
1987 | r = -EFAULT; | 1989 | r = -EFAULT; |
1988 | if (copy_from_user(&dbg, argp, sizeof dbg)) | 1990 | if (copy_from_user(&dbg, argp, sizeof dbg)) |
1989 | goto out; | 1991 | goto out; |
1990 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); | 1992 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); |
1991 | if (r) | 1993 | if (r) |
1992 | goto out; | 1994 | goto out; |
1993 | r = 0; | 1995 | r = 0; |
1994 | break; | 1996 | break; |
1995 | } | 1997 | } |
1996 | case KVM_SET_SIGNAL_MASK: { | 1998 | case KVM_SET_SIGNAL_MASK: { |
1997 | struct kvm_signal_mask __user *sigmask_arg = argp; | 1999 | struct kvm_signal_mask __user *sigmask_arg = argp; |
1998 | struct kvm_signal_mask kvm_sigmask; | 2000 | struct kvm_signal_mask kvm_sigmask; |
1999 | sigset_t sigset, *p; | 2001 | sigset_t sigset, *p; |
2000 | 2002 | ||
2001 | p = NULL; | 2003 | p = NULL; |
2002 | if (argp) { | 2004 | if (argp) { |
2003 | r = -EFAULT; | 2005 | r = -EFAULT; |
2004 | if (copy_from_user(&kvm_sigmask, argp, | 2006 | if (copy_from_user(&kvm_sigmask, argp, |
2005 | sizeof kvm_sigmask)) | 2007 | sizeof kvm_sigmask)) |
2006 | goto out; | 2008 | goto out; |
2007 | r = -EINVAL; | 2009 | r = -EINVAL; |
2008 | if (kvm_sigmask.len != sizeof sigset) | 2010 | if (kvm_sigmask.len != sizeof sigset) |
2009 | goto out; | 2011 | goto out; |
2010 | r = -EFAULT; | 2012 | r = -EFAULT; |
2011 | if (copy_from_user(&sigset, sigmask_arg->sigset, | 2013 | if (copy_from_user(&sigset, sigmask_arg->sigset, |
2012 | sizeof sigset)) | 2014 | sizeof sigset)) |
2013 | goto out; | 2015 | goto out; |
2014 | p = &sigset; | 2016 | p = &sigset; |
2015 | } | 2017 | } |
2016 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); | 2018 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); |
2017 | break; | 2019 | break; |
2018 | } | 2020 | } |
2019 | case KVM_GET_FPU: { | 2021 | case KVM_GET_FPU: { |
2020 | fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); | 2022 | fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); |
2021 | r = -ENOMEM; | 2023 | r = -ENOMEM; |
2022 | if (!fpu) | 2024 | if (!fpu) |
2023 | goto out; | 2025 | goto out; |
2024 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); | 2026 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); |
2025 | if (r) | 2027 | if (r) |
2026 | goto out; | 2028 | goto out; |
2027 | r = -EFAULT; | 2029 | r = -EFAULT; |
2028 | if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) | 2030 | if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) |
2029 | goto out; | 2031 | goto out; |
2030 | r = 0; | 2032 | r = 0; |
2031 | break; | 2033 | break; |
2032 | } | 2034 | } |
2033 | case KVM_SET_FPU: { | 2035 | case KVM_SET_FPU: { |
2034 | fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); | 2036 | fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); |
2035 | r = -ENOMEM; | 2037 | r = -ENOMEM; |
2036 | if (!fpu) | 2038 | if (!fpu) |
2037 | goto out; | 2039 | goto out; |
2038 | r = -EFAULT; | 2040 | r = -EFAULT; |
2039 | if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) | 2041 | if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) |
2040 | goto out; | 2042 | goto out; |
2041 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); | 2043 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); |
2042 | if (r) | 2044 | if (r) |
2043 | goto out; | 2045 | goto out; |
2044 | r = 0; | 2046 | r = 0; |
2045 | break; | 2047 | break; |
2046 | } | 2048 | } |
2047 | default: | 2049 | default: |
2048 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); | 2050 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); |
2049 | } | 2051 | } |
2050 | out: | 2052 | out: |
2051 | kfree(fpu); | 2053 | kfree(fpu); |
2052 | kfree(kvm_sregs); | 2054 | kfree(kvm_sregs); |
2053 | return r; | 2055 | return r; |
2054 | } | 2056 | } |
2055 | 2057 | ||
2056 | static long kvm_vm_ioctl(struct file *filp, | 2058 | static long kvm_vm_ioctl(struct file *filp, |
2057 | unsigned int ioctl, unsigned long arg) | 2059 | unsigned int ioctl, unsigned long arg) |
2058 | { | 2060 | { |
2059 | struct kvm *kvm = filp->private_data; | 2061 | struct kvm *kvm = filp->private_data; |
2060 | void __user *argp = (void __user *)arg; | 2062 | void __user *argp = (void __user *)arg; |
2061 | int r; | 2063 | int r; |
2062 | 2064 | ||
2063 | if (kvm->mm != current->mm) | 2065 | if (kvm->mm != current->mm) |
2064 | return -EIO; | 2066 | return -EIO; |
2065 | switch (ioctl) { | 2067 | switch (ioctl) { |
2066 | case KVM_CREATE_VCPU: | 2068 | case KVM_CREATE_VCPU: |
2067 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); | 2069 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); |
2068 | if (r < 0) | 2070 | if (r < 0) |
2069 | goto out; | 2071 | goto out; |
2070 | break; | 2072 | break; |
2071 | case KVM_SET_USER_MEMORY_REGION: { | 2073 | case KVM_SET_USER_MEMORY_REGION: { |
2072 | struct kvm_userspace_memory_region kvm_userspace_mem; | 2074 | struct kvm_userspace_memory_region kvm_userspace_mem; |
2073 | 2075 | ||
2074 | r = -EFAULT; | 2076 | r = -EFAULT; |
2075 | if (copy_from_user(&kvm_userspace_mem, argp, | 2077 | if (copy_from_user(&kvm_userspace_mem, argp, |
2076 | sizeof kvm_userspace_mem)) | 2078 | sizeof kvm_userspace_mem)) |
2077 | goto out; | 2079 | goto out; |
2078 | 2080 | ||
2079 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); | 2081 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); |
2080 | if (r) | 2082 | if (r) |
2081 | goto out; | 2083 | goto out; |
2082 | break; | 2084 | break; |
2083 | } | 2085 | } |
2084 | case KVM_GET_DIRTY_LOG: { | 2086 | case KVM_GET_DIRTY_LOG: { |
2085 | struct kvm_dirty_log log; | 2087 | struct kvm_dirty_log log; |
2086 | 2088 | ||
2087 | r = -EFAULT; | 2089 | r = -EFAULT; |
2088 | if (copy_from_user(&log, argp, sizeof log)) | 2090 | if (copy_from_user(&log, argp, sizeof log)) |
2089 | goto out; | 2091 | goto out; |
2090 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); | 2092 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
2091 | if (r) | 2093 | if (r) |
2092 | goto out; | 2094 | goto out; |
2093 | break; | 2095 | break; |
2094 | } | 2096 | } |
2095 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 2097 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
2096 | case KVM_REGISTER_COALESCED_MMIO: { | 2098 | case KVM_REGISTER_COALESCED_MMIO: { |
2097 | struct kvm_coalesced_mmio_zone zone; | 2099 | struct kvm_coalesced_mmio_zone zone; |
2098 | r = -EFAULT; | 2100 | r = -EFAULT; |
2099 | if (copy_from_user(&zone, argp, sizeof zone)) | 2101 | if (copy_from_user(&zone, argp, sizeof zone)) |
2100 | goto out; | 2102 | goto out; |
2101 | r = -ENXIO; | 2103 | r = -ENXIO; |
2102 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); | 2104 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); |
2103 | if (r) | 2105 | if (r) |
2104 | goto out; | 2106 | goto out; |
2105 | r = 0; | 2107 | r = 0; |
2106 | break; | 2108 | break; |
2107 | } | 2109 | } |
2108 | case KVM_UNREGISTER_COALESCED_MMIO: { | 2110 | case KVM_UNREGISTER_COALESCED_MMIO: { |
2109 | struct kvm_coalesced_mmio_zone zone; | 2111 | struct kvm_coalesced_mmio_zone zone; |
2110 | r = -EFAULT; | 2112 | r = -EFAULT; |
2111 | if (copy_from_user(&zone, argp, sizeof zone)) | 2113 | if (copy_from_user(&zone, argp, sizeof zone)) |
2112 | goto out; | 2114 | goto out; |
2113 | r = -ENXIO; | 2115 | r = -ENXIO; |
2114 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); | 2116 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); |
2115 | if (r) | 2117 | if (r) |
2116 | goto out; | 2118 | goto out; |
2117 | r = 0; | 2119 | r = 0; |
2118 | break; | 2120 | break; |
2119 | } | 2121 | } |
2120 | #endif | 2122 | #endif |
2121 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | 2123 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT |
2122 | case KVM_ASSIGN_PCI_DEVICE: { | 2124 | case KVM_ASSIGN_PCI_DEVICE: { |
2123 | struct kvm_assigned_pci_dev assigned_dev; | 2125 | struct kvm_assigned_pci_dev assigned_dev; |
2124 | 2126 | ||
2125 | r = -EFAULT; | 2127 | r = -EFAULT; |
2126 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | 2128 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) |
2127 | goto out; | 2129 | goto out; |
2128 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | 2130 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); |
2129 | if (r) | 2131 | if (r) |
2130 | goto out; | 2132 | goto out; |
2131 | break; | 2133 | break; |
2132 | } | 2134 | } |
2133 | case KVM_ASSIGN_IRQ: { | 2135 | case KVM_ASSIGN_IRQ: { |
2134 | r = -EOPNOTSUPP; | 2136 | r = -EOPNOTSUPP; |
2135 | break; | 2137 | break; |
2136 | } | 2138 | } |
2137 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ | 2139 | #ifdef KVM_CAP_ASSIGN_DEV_IRQ |
2138 | case KVM_ASSIGN_DEV_IRQ: { | 2140 | case KVM_ASSIGN_DEV_IRQ: { |
2139 | struct kvm_assigned_irq assigned_irq; | 2141 | struct kvm_assigned_irq assigned_irq; |
2140 | 2142 | ||
2141 | r = -EFAULT; | 2143 | r = -EFAULT; |
2142 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | 2144 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) |
2143 | goto out; | 2145 | goto out; |
2144 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | 2146 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); |
2145 | if (r) | 2147 | if (r) |
2146 | goto out; | 2148 | goto out; |
2147 | break; | 2149 | break; |
2148 | } | 2150 | } |
2149 | case KVM_DEASSIGN_DEV_IRQ: { | 2151 | case KVM_DEASSIGN_DEV_IRQ: { |
2150 | struct kvm_assigned_irq assigned_irq; | 2152 | struct kvm_assigned_irq assigned_irq; |
2151 | 2153 | ||
2152 | r = -EFAULT; | 2154 | r = -EFAULT; |
2153 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | 2155 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) |
2154 | goto out; | 2156 | goto out; |
2155 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | 2157 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); |
2156 | if (r) | 2158 | if (r) |
2157 | goto out; | 2159 | goto out; |
2158 | break; | 2160 | break; |
2159 | } | 2161 | } |
2160 | #endif | 2162 | #endif |
2161 | #endif | 2163 | #endif |
2162 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT | 2164 | #ifdef KVM_CAP_DEVICE_DEASSIGNMENT |
2163 | case KVM_DEASSIGN_PCI_DEVICE: { | 2165 | case KVM_DEASSIGN_PCI_DEVICE: { |
2164 | struct kvm_assigned_pci_dev assigned_dev; | 2166 | struct kvm_assigned_pci_dev assigned_dev; |
2165 | 2167 | ||
2166 | r = -EFAULT; | 2168 | r = -EFAULT; |
2167 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | 2169 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) |
2168 | goto out; | 2170 | goto out; |
2169 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | 2171 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); |
2170 | if (r) | 2172 | if (r) |
2171 | goto out; | 2173 | goto out; |
2172 | break; | 2174 | break; |
2173 | } | 2175 | } |
2174 | #endif | 2176 | #endif |
2175 | #ifdef KVM_CAP_IRQ_ROUTING | 2177 | #ifdef KVM_CAP_IRQ_ROUTING |
2176 | case KVM_SET_GSI_ROUTING: { | 2178 | case KVM_SET_GSI_ROUTING: { |
2177 | struct kvm_irq_routing routing; | 2179 | struct kvm_irq_routing routing; |
2178 | struct kvm_irq_routing __user *urouting; | 2180 | struct kvm_irq_routing __user *urouting; |
2179 | struct kvm_irq_routing_entry *entries; | 2181 | struct kvm_irq_routing_entry *entries; |
2180 | 2182 | ||
2181 | r = -EFAULT; | 2183 | r = -EFAULT; |
2182 | if (copy_from_user(&routing, argp, sizeof(routing))) | 2184 | if (copy_from_user(&routing, argp, sizeof(routing))) |
2183 | goto out; | 2185 | goto out; |
2184 | r = -EINVAL; | 2186 | r = -EINVAL; |
2185 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | 2187 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) |
2186 | goto out; | 2188 | goto out; |
2187 | if (routing.flags) | 2189 | if (routing.flags) |
2188 | goto out; | 2190 | goto out; |
2189 | r = -ENOMEM; | 2191 | r = -ENOMEM; |
2190 | entries = vmalloc(routing.nr * sizeof(*entries)); | 2192 | entries = vmalloc(routing.nr * sizeof(*entries)); |
2191 | if (!entries) | 2193 | if (!entries) |
2192 | goto out; | 2194 | goto out; |
2193 | r = -EFAULT; | 2195 | r = -EFAULT; |
2194 | urouting = argp; | 2196 | urouting = argp; |
2195 | if (copy_from_user(entries, urouting->entries, | 2197 | if (copy_from_user(entries, urouting->entries, |
2196 | routing.nr * sizeof(*entries))) | 2198 | routing.nr * sizeof(*entries))) |
2197 | goto out_free_irq_routing; | 2199 | goto out_free_irq_routing; |
2198 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | 2200 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
2199 | routing.flags); | 2201 | routing.flags); |
2200 | out_free_irq_routing: | 2202 | out_free_irq_routing: |
2201 | vfree(entries); | 2203 | vfree(entries); |
2202 | break; | 2204 | break; |
2203 | } | 2205 | } |
2204 | #ifdef __KVM_HAVE_MSIX | 2206 | #ifdef __KVM_HAVE_MSIX |
2205 | case KVM_ASSIGN_SET_MSIX_NR: { | 2207 | case KVM_ASSIGN_SET_MSIX_NR: { |
2206 | struct kvm_assigned_msix_nr entry_nr; | 2208 | struct kvm_assigned_msix_nr entry_nr; |
2207 | r = -EFAULT; | 2209 | r = -EFAULT; |
2208 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | 2210 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) |
2209 | goto out; | 2211 | goto out; |
2210 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | 2212 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); |
2211 | if (r) | 2213 | if (r) |
2212 | goto out; | 2214 | goto out; |
2213 | break; | 2215 | break; |
2214 | } | 2216 | } |
2215 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | 2217 | case KVM_ASSIGN_SET_MSIX_ENTRY: { |
2216 | struct kvm_assigned_msix_entry entry; | 2218 | struct kvm_assigned_msix_entry entry; |
2217 | r = -EFAULT; | 2219 | r = -EFAULT; |
2218 | if (copy_from_user(&entry, argp, sizeof entry)) | 2220 | if (copy_from_user(&entry, argp, sizeof entry)) |
2219 | goto out; | 2221 | goto out; |
2220 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | 2222 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); |
2221 | if (r) | 2223 | if (r) |
2222 | goto out; | 2224 | goto out; |
2223 | break; | 2225 | break; |
2224 | } | 2226 | } |
2225 | #endif | 2227 | #endif |
2226 | #endif /* KVM_CAP_IRQ_ROUTING */ | 2228 | #endif /* KVM_CAP_IRQ_ROUTING */ |
2227 | case KVM_IRQFD: { | 2229 | case KVM_IRQFD: { |
2228 | struct kvm_irqfd data; | 2230 | struct kvm_irqfd data; |
2229 | 2231 | ||
2230 | r = -EFAULT; | 2232 | r = -EFAULT; |
2231 | if (copy_from_user(&data, argp, sizeof data)) | 2233 | if (copy_from_user(&data, argp, sizeof data)) |
2232 | goto out; | 2234 | goto out; |
2233 | r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); | 2235 | r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); |
2234 | break; | 2236 | break; |
2235 | } | 2237 | } |
2238 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
2239 | case KVM_SET_BOOT_CPU_ID: | ||
2240 | r = 0; | ||
2241 | if (atomic_read(&kvm->online_vcpus) != 0) | ||
2242 | r = -EBUSY; | ||
2243 | else | ||
2244 | kvm->bsp_vcpu_id = arg; | ||
2245 | break; | ||
2246 | #endif | ||
2236 | default: | 2247 | default: |
2237 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 2248 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
2238 | } | 2249 | } |
2239 | out: | 2250 | out: |
2240 | return r; | 2251 | return r; |
2241 | } | 2252 | } |
2242 | 2253 | ||
2243 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2254 | static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
2244 | { | 2255 | { |
2245 | struct page *page[1]; | 2256 | struct page *page[1]; |
2246 | unsigned long addr; | 2257 | unsigned long addr; |
2247 | int npages; | 2258 | int npages; |
2248 | gfn_t gfn = vmf->pgoff; | 2259 | gfn_t gfn = vmf->pgoff; |
2249 | struct kvm *kvm = vma->vm_file->private_data; | 2260 | struct kvm *kvm = vma->vm_file->private_data; |
2250 | 2261 | ||
2251 | addr = gfn_to_hva(kvm, gfn); | 2262 | addr = gfn_to_hva(kvm, gfn); |
2252 | if (kvm_is_error_hva(addr)) | 2263 | if (kvm_is_error_hva(addr)) |
2253 | return VM_FAULT_SIGBUS; | 2264 | return VM_FAULT_SIGBUS; |
2254 | 2265 | ||
2255 | npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, | 2266 | npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, |
2256 | NULL); | 2267 | NULL); |
2257 | if (unlikely(npages != 1)) | 2268 | if (unlikely(npages != 1)) |
2258 | return VM_FAULT_SIGBUS; | 2269 | return VM_FAULT_SIGBUS; |
2259 | 2270 | ||
2260 | vmf->page = page[0]; | 2271 | vmf->page = page[0]; |
2261 | return 0; | 2272 | return 0; |
2262 | } | 2273 | } |
2263 | 2274 | ||
2264 | static struct vm_operations_struct kvm_vm_vm_ops = { | 2275 | static struct vm_operations_struct kvm_vm_vm_ops = { |
2265 | .fault = kvm_vm_fault, | 2276 | .fault = kvm_vm_fault, |
2266 | }; | 2277 | }; |
2267 | 2278 | ||
2268 | static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | 2279 | static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) |
2269 | { | 2280 | { |
2270 | vma->vm_ops = &kvm_vm_vm_ops; | 2281 | vma->vm_ops = &kvm_vm_vm_ops; |
2271 | return 0; | 2282 | return 0; |
2272 | } | 2283 | } |
2273 | 2284 | ||
2274 | static struct file_operations kvm_vm_fops = { | 2285 | static struct file_operations kvm_vm_fops = { |
2275 | .release = kvm_vm_release, | 2286 | .release = kvm_vm_release, |
2276 | .unlocked_ioctl = kvm_vm_ioctl, | 2287 | .unlocked_ioctl = kvm_vm_ioctl, |
2277 | .compat_ioctl = kvm_vm_ioctl, | 2288 | .compat_ioctl = kvm_vm_ioctl, |
2278 | .mmap = kvm_vm_mmap, | 2289 | .mmap = kvm_vm_mmap, |
2279 | }; | 2290 | }; |
2280 | 2291 | ||
2281 | static int kvm_dev_ioctl_create_vm(void) | 2292 | static int kvm_dev_ioctl_create_vm(void) |
2282 | { | 2293 | { |
2283 | int fd; | 2294 | int fd; |
2284 | struct kvm *kvm; | 2295 | struct kvm *kvm; |
2285 | 2296 | ||
2286 | kvm = kvm_create_vm(); | 2297 | kvm = kvm_create_vm(); |
2287 | if (IS_ERR(kvm)) | 2298 | if (IS_ERR(kvm)) |
2288 | return PTR_ERR(kvm); | 2299 | return PTR_ERR(kvm); |
2289 | fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0); | 2300 | fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0); |
2290 | if (fd < 0) | 2301 | if (fd < 0) |
2291 | kvm_put_kvm(kvm); | 2302 | kvm_put_kvm(kvm); |
2292 | 2303 | ||
2293 | return fd; | 2304 | return fd; |
2294 | } | 2305 | } |
2295 | 2306 | ||
2296 | static long kvm_dev_ioctl_check_extension_generic(long arg) | 2307 | static long kvm_dev_ioctl_check_extension_generic(long arg) |
2297 | { | 2308 | { |
2298 | switch (arg) { | 2309 | switch (arg) { |
2299 | case KVM_CAP_USER_MEMORY: | 2310 | case KVM_CAP_USER_MEMORY: |
2300 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | 2311 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
2301 | case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: | 2312 | case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: |
2313 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
2314 | case KVM_CAP_SET_BOOT_CPU_ID: | ||
2315 | #endif | ||
2302 | return 1; | 2316 | return 1; |
2303 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 2317 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
2304 | case KVM_CAP_IRQ_ROUTING: | 2318 | case KVM_CAP_IRQ_ROUTING: |
2305 | return KVM_MAX_IRQ_ROUTES; | 2319 | return KVM_MAX_IRQ_ROUTES; |
2306 | #endif | 2320 | #endif |
2307 | default: | 2321 | default: |
2308 | break; | 2322 | break; |
2309 | } | 2323 | } |
2310 | return kvm_dev_ioctl_check_extension(arg); | 2324 | return kvm_dev_ioctl_check_extension(arg); |
2311 | } | 2325 | } |
2312 | 2326 | ||
2313 | static long kvm_dev_ioctl(struct file *filp, | 2327 | static long kvm_dev_ioctl(struct file *filp, |
2314 | unsigned int ioctl, unsigned long arg) | 2328 | unsigned int ioctl, unsigned long arg) |
2315 | { | 2329 | { |
2316 | long r = -EINVAL; | 2330 | long r = -EINVAL; |
2317 | 2331 | ||
2318 | switch (ioctl) { | 2332 | switch (ioctl) { |
2319 | case KVM_GET_API_VERSION: | 2333 | case KVM_GET_API_VERSION: |
2320 | r = -EINVAL; | 2334 | r = -EINVAL; |
2321 | if (arg) | 2335 | if (arg) |
2322 | goto out; | 2336 | goto out; |
2323 | r = KVM_API_VERSION; | 2337 | r = KVM_API_VERSION; |
2324 | break; | 2338 | break; |
2325 | case KVM_CREATE_VM: | 2339 | case KVM_CREATE_VM: |
2326 | r = -EINVAL; | 2340 | r = -EINVAL; |
2327 | if (arg) | 2341 | if (arg) |
2328 | goto out; | 2342 | goto out; |
2329 | r = kvm_dev_ioctl_create_vm(); | 2343 | r = kvm_dev_ioctl_create_vm(); |
2330 | break; | 2344 | break; |
2331 | case KVM_CHECK_EXTENSION: | 2345 | case KVM_CHECK_EXTENSION: |
2332 | r = kvm_dev_ioctl_check_extension_generic(arg); | 2346 | r = kvm_dev_ioctl_check_extension_generic(arg); |
2333 | break; | 2347 | break; |
2334 | case KVM_GET_VCPU_MMAP_SIZE: | 2348 | case KVM_GET_VCPU_MMAP_SIZE: |
2335 | r = -EINVAL; | 2349 | r = -EINVAL; |
2336 | if (arg) | 2350 | if (arg) |
2337 | goto out; | 2351 | goto out; |
2338 | r = PAGE_SIZE; /* struct kvm_run */ | 2352 | r = PAGE_SIZE; /* struct kvm_run */ |
2339 | #ifdef CONFIG_X86 | 2353 | #ifdef CONFIG_X86 |
2340 | r += PAGE_SIZE; /* pio data page */ | 2354 | r += PAGE_SIZE; /* pio data page */ |
2341 | #endif | 2355 | #endif |
2342 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 2356 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
2343 | r += PAGE_SIZE; /* coalesced mmio ring page */ | 2357 | r += PAGE_SIZE; /* coalesced mmio ring page */ |
2344 | #endif | 2358 | #endif |
2345 | break; | 2359 | break; |
2346 | case KVM_TRACE_ENABLE: | 2360 | case KVM_TRACE_ENABLE: |
2347 | case KVM_TRACE_PAUSE: | 2361 | case KVM_TRACE_PAUSE: |
2348 | case KVM_TRACE_DISABLE: | 2362 | case KVM_TRACE_DISABLE: |
2349 | r = kvm_trace_ioctl(ioctl, arg); | 2363 | r = kvm_trace_ioctl(ioctl, arg); |
2350 | break; | 2364 | break; |
2351 | default: | 2365 | default: |
2352 | return kvm_arch_dev_ioctl(filp, ioctl, arg); | 2366 | return kvm_arch_dev_ioctl(filp, ioctl, arg); |
2353 | } | 2367 | } |
2354 | out: | 2368 | out: |
2355 | return r; | 2369 | return r; |
2356 | } | 2370 | } |
2357 | 2371 | ||
2358 | static struct file_operations kvm_chardev_ops = { | 2372 | static struct file_operations kvm_chardev_ops = { |
2359 | .unlocked_ioctl = kvm_dev_ioctl, | 2373 | .unlocked_ioctl = kvm_dev_ioctl, |
2360 | .compat_ioctl = kvm_dev_ioctl, | 2374 | .compat_ioctl = kvm_dev_ioctl, |
2361 | }; | 2375 | }; |
2362 | 2376 | ||
2363 | static struct miscdevice kvm_dev = { | 2377 | static struct miscdevice kvm_dev = { |
2364 | KVM_MINOR, | 2378 | KVM_MINOR, |
2365 | "kvm", | 2379 | "kvm", |
2366 | &kvm_chardev_ops, | 2380 | &kvm_chardev_ops, |
2367 | }; | 2381 | }; |
2368 | 2382 | ||
2369 | static void hardware_enable(void *junk) | 2383 | static void hardware_enable(void *junk) |
2370 | { | 2384 | { |
2371 | int cpu = raw_smp_processor_id(); | 2385 | int cpu = raw_smp_processor_id(); |
2372 | 2386 | ||
2373 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) | 2387 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
2374 | return; | 2388 | return; |
2375 | cpumask_set_cpu(cpu, cpus_hardware_enabled); | 2389 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
2376 | kvm_arch_hardware_enable(NULL); | 2390 | kvm_arch_hardware_enable(NULL); |
2377 | } | 2391 | } |
2378 | 2392 | ||
2379 | static void hardware_disable(void *junk) | 2393 | static void hardware_disable(void *junk) |
2380 | { | 2394 | { |
2381 | int cpu = raw_smp_processor_id(); | 2395 | int cpu = raw_smp_processor_id(); |
2382 | 2396 | ||
2383 | if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) | 2397 | if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
2384 | return; | 2398 | return; |
2385 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); | 2399 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
2386 | kvm_arch_hardware_disable(NULL); | 2400 | kvm_arch_hardware_disable(NULL); |
2387 | } | 2401 | } |
2388 | 2402 | ||
2389 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | 2403 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, |
2390 | void *v) | 2404 | void *v) |
2391 | { | 2405 | { |
2392 | int cpu = (long)v; | 2406 | int cpu = (long)v; |
2393 | 2407 | ||
2394 | val &= ~CPU_TASKS_FROZEN; | 2408 | val &= ~CPU_TASKS_FROZEN; |
2395 | switch (val) { | 2409 | switch (val) { |
2396 | case CPU_DYING: | 2410 | case CPU_DYING: |
2397 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | 2411 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", |
2398 | cpu); | 2412 | cpu); |
2399 | hardware_disable(NULL); | 2413 | hardware_disable(NULL); |
2400 | break; | 2414 | break; |
2401 | case CPU_UP_CANCELED: | 2415 | case CPU_UP_CANCELED: |
2402 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | 2416 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", |
2403 | cpu); | 2417 | cpu); |
2404 | smp_call_function_single(cpu, hardware_disable, NULL, 1); | 2418 | smp_call_function_single(cpu, hardware_disable, NULL, 1); |
2405 | break; | 2419 | break; |
2406 | case CPU_ONLINE: | 2420 | case CPU_ONLINE: |
2407 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | 2421 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", |
2408 | cpu); | 2422 | cpu); |
2409 | smp_call_function_single(cpu, hardware_enable, NULL, 1); | 2423 | smp_call_function_single(cpu, hardware_enable, NULL, 1); |
2410 | break; | 2424 | break; |
2411 | } | 2425 | } |
2412 | return NOTIFY_OK; | 2426 | return NOTIFY_OK; |
2413 | } | 2427 | } |
2414 | 2428 | ||
2415 | 2429 | ||
2416 | asmlinkage void kvm_handle_fault_on_reboot(void) | 2430 | asmlinkage void kvm_handle_fault_on_reboot(void) |
2417 | { | 2431 | { |
2418 | if (kvm_rebooting) | 2432 | if (kvm_rebooting) |
2419 | /* spin while reset goes on */ | 2433 | /* spin while reset goes on */ |
2420 | while (true) | 2434 | while (true) |
2421 | ; | 2435 | ; |
2422 | /* Fault while not rebooting. We want the trace. */ | 2436 | /* Fault while not rebooting. We want the trace. */ |
2423 | BUG(); | 2437 | BUG(); |
2424 | } | 2438 | } |
2425 | EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); | 2439 | EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); |
2426 | 2440 | ||
2427 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 2441 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
2428 | void *v) | 2442 | void *v) |
2429 | { | 2443 | { |
2430 | /* | 2444 | /* |
2431 | * Some (well, at least mine) BIOSes hang on reboot if | 2445 | * Some (well, at least mine) BIOSes hang on reboot if |
2432 | * in vmx root mode. | 2446 | * in vmx root mode. |
2433 | * | 2447 | * |
2434 | * And Intel TXT required VMX off for all cpu when system shutdown. | 2448 | * And Intel TXT required VMX off for all cpu when system shutdown. |
2435 | */ | 2449 | */ |
2436 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | 2450 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); |
2437 | kvm_rebooting = true; | 2451 | kvm_rebooting = true; |
2438 | on_each_cpu(hardware_disable, NULL, 1); | 2452 | on_each_cpu(hardware_disable, NULL, 1); |
2439 | return NOTIFY_OK; | 2453 | return NOTIFY_OK; |
2440 | } | 2454 | } |
2441 | 2455 | ||
2442 | static struct notifier_block kvm_reboot_notifier = { | 2456 | static struct notifier_block kvm_reboot_notifier = { |
2443 | .notifier_call = kvm_reboot, | 2457 | .notifier_call = kvm_reboot, |
2444 | .priority = 0, | 2458 | .priority = 0, |
2445 | }; | 2459 | }; |
2446 | 2460 | ||
2447 | void kvm_io_bus_init(struct kvm_io_bus *bus) | 2461 | void kvm_io_bus_init(struct kvm_io_bus *bus) |
2448 | { | 2462 | { |
2449 | memset(bus, 0, sizeof(*bus)); | 2463 | memset(bus, 0, sizeof(*bus)); |
2450 | } | 2464 | } |
2451 | 2465 | ||
2452 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) | 2466 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) |
2453 | { | 2467 | { |
2454 | int i; | 2468 | int i; |
2455 | 2469 | ||
2456 | for (i = 0; i < bus->dev_count; i++) { | 2470 | for (i = 0; i < bus->dev_count; i++) { |
2457 | struct kvm_io_device *pos = bus->devs[i]; | 2471 | struct kvm_io_device *pos = bus->devs[i]; |
2458 | 2472 | ||
2459 | kvm_iodevice_destructor(pos); | 2473 | kvm_iodevice_destructor(pos); |
2460 | } | 2474 | } |
2461 | } | 2475 | } |
2462 | 2476 | ||
2463 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, | 2477 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, |
2464 | gpa_t addr, int len, int is_write) | 2478 | gpa_t addr, int len, int is_write) |
2465 | { | 2479 | { |
2466 | int i; | 2480 | int i; |
2467 | 2481 | ||
2468 | for (i = 0; i < bus->dev_count; i++) { | 2482 | for (i = 0; i < bus->dev_count; i++) { |
2469 | struct kvm_io_device *pos = bus->devs[i]; | 2483 | struct kvm_io_device *pos = bus->devs[i]; |
2470 | 2484 | ||
2471 | if (kvm_iodevice_in_range(pos, addr, len, is_write)) | 2485 | if (kvm_iodevice_in_range(pos, addr, len, is_write)) |
2472 | return pos; | 2486 | return pos; |
2473 | } | 2487 | } |
2474 | 2488 | ||
2475 | return NULL; | 2489 | return NULL; |
2476 | } | 2490 | } |
2477 | 2491 | ||
2478 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) | 2492 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) |
2479 | { | 2493 | { |
2480 | BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); | 2494 | BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); |
2481 | 2495 | ||
2482 | bus->devs[bus->dev_count++] = dev; | 2496 | bus->devs[bus->dev_count++] = dev; |
2483 | } | 2497 | } |
2484 | 2498 | ||
2485 | static struct notifier_block kvm_cpu_notifier = { | 2499 | static struct notifier_block kvm_cpu_notifier = { |
2486 | .notifier_call = kvm_cpu_hotplug, | 2500 | .notifier_call = kvm_cpu_hotplug, |
2487 | .priority = 20, /* must be > scheduler priority */ | 2501 | .priority = 20, /* must be > scheduler priority */ |
2488 | }; | 2502 | }; |
2489 | 2503 | ||
2490 | static int vm_stat_get(void *_offset, u64 *val) | 2504 | static int vm_stat_get(void *_offset, u64 *val) |
2491 | { | 2505 | { |
2492 | unsigned offset = (long)_offset; | 2506 | unsigned offset = (long)_offset; |
2493 | struct kvm *kvm; | 2507 | struct kvm *kvm; |
2494 | 2508 | ||
2495 | *val = 0; | 2509 | *val = 0; |
2496 | spin_lock(&kvm_lock); | 2510 | spin_lock(&kvm_lock); |
2497 | list_for_each_entry(kvm, &vm_list, vm_list) | 2511 | list_for_each_entry(kvm, &vm_list, vm_list) |
2498 | *val += *(u32 *)((void *)kvm + offset); | 2512 | *val += *(u32 *)((void *)kvm + offset); |
2499 | spin_unlock(&kvm_lock); | 2513 | spin_unlock(&kvm_lock); |
2500 | return 0; | 2514 | return 0; |
2501 | } | 2515 | } |
2502 | 2516 | ||
2503 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); | 2517 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); |
2504 | 2518 | ||
2505 | static int vcpu_stat_get(void *_offset, u64 *val) | 2519 | static int vcpu_stat_get(void *_offset, u64 *val) |
2506 | { | 2520 | { |
2507 | unsigned offset = (long)_offset; | 2521 | unsigned offset = (long)_offset; |
2508 | struct kvm *kvm; | 2522 | struct kvm *kvm; |
2509 | struct kvm_vcpu *vcpu; | 2523 | struct kvm_vcpu *vcpu; |
2510 | int i; | 2524 | int i; |
2511 | 2525 | ||
2512 | *val = 0; | 2526 | *val = 0; |
2513 | spin_lock(&kvm_lock); | 2527 | spin_lock(&kvm_lock); |
2514 | list_for_each_entry(kvm, &vm_list, vm_list) | 2528 | list_for_each_entry(kvm, &vm_list, vm_list) |
2515 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 2529 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
2516 | vcpu = kvm->vcpus[i]; | 2530 | vcpu = kvm->vcpus[i]; |
2517 | if (vcpu) | 2531 | if (vcpu) |
2518 | *val += *(u32 *)((void *)vcpu + offset); | 2532 | *val += *(u32 *)((void *)vcpu + offset); |
2519 | } | 2533 | } |
2520 | spin_unlock(&kvm_lock); | 2534 | spin_unlock(&kvm_lock); |
2521 | return 0; | 2535 | return 0; |
2522 | } | 2536 | } |
2523 | 2537 | ||
2524 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); | 2538 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); |
2525 | 2539 | ||
2526 | static struct file_operations *stat_fops[] = { | 2540 | static struct file_operations *stat_fops[] = { |
2527 | [KVM_STAT_VCPU] = &vcpu_stat_fops, | 2541 | [KVM_STAT_VCPU] = &vcpu_stat_fops, |
2528 | [KVM_STAT_VM] = &vm_stat_fops, | 2542 | [KVM_STAT_VM] = &vm_stat_fops, |
2529 | }; | 2543 | }; |
2530 | 2544 | ||
2531 | static void kvm_init_debug(void) | 2545 | static void kvm_init_debug(void) |
2532 | { | 2546 | { |
2533 | struct kvm_stats_debugfs_item *p; | 2547 | struct kvm_stats_debugfs_item *p; |
2534 | 2548 | ||
2535 | kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); | 2549 | kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); |
2536 | for (p = debugfs_entries; p->name; ++p) | 2550 | for (p = debugfs_entries; p->name; ++p) |
2537 | p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, | 2551 | p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, |
2538 | (void *)(long)p->offset, | 2552 | (void *)(long)p->offset, |
2539 | stat_fops[p->kind]); | 2553 | stat_fops[p->kind]); |
2540 | } | 2554 | } |
2541 | 2555 | ||
2542 | static void kvm_exit_debug(void) | 2556 | static void kvm_exit_debug(void) |
2543 | { | 2557 | { |
2544 | struct kvm_stats_debugfs_item *p; | 2558 | struct kvm_stats_debugfs_item *p; |
2545 | 2559 | ||
2546 | for (p = debugfs_entries; p->name; ++p) | 2560 | for (p = debugfs_entries; p->name; ++p) |
2547 | debugfs_remove(p->dentry); | 2561 | debugfs_remove(p->dentry); |
2548 | debugfs_remove(kvm_debugfs_dir); | 2562 | debugfs_remove(kvm_debugfs_dir); |
2549 | } | 2563 | } |
2550 | 2564 | ||
2551 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) | 2565 | static int kvm_suspend(struct sys_device *dev, pm_message_t state) |
2552 | { | 2566 | { |
2553 | hardware_disable(NULL); | 2567 | hardware_disable(NULL); |
2554 | return 0; | 2568 | return 0; |
2555 | } | 2569 | } |
2556 | 2570 | ||
2557 | static int kvm_resume(struct sys_device *dev) | 2571 | static int kvm_resume(struct sys_device *dev) |
2558 | { | 2572 | { |
2559 | hardware_enable(NULL); | 2573 | hardware_enable(NULL); |
2560 | return 0; | 2574 | return 0; |
2561 | } | 2575 | } |
2562 | 2576 | ||
2563 | static struct sysdev_class kvm_sysdev_class = { | 2577 | static struct sysdev_class kvm_sysdev_class = { |
2564 | .name = "kvm", | 2578 | .name = "kvm", |
2565 | .suspend = kvm_suspend, | 2579 | .suspend = kvm_suspend, |
2566 | .resume = kvm_resume, | 2580 | .resume = kvm_resume, |
2567 | }; | 2581 | }; |
2568 | 2582 | ||
2569 | static struct sys_device kvm_sysdev = { | 2583 | static struct sys_device kvm_sysdev = { |
2570 | .id = 0, | 2584 | .id = 0, |
2571 | .cls = &kvm_sysdev_class, | 2585 | .cls = &kvm_sysdev_class, |
2572 | }; | 2586 | }; |
2573 | 2587 | ||
2574 | struct page *bad_page; | 2588 | struct page *bad_page; |
2575 | pfn_t bad_pfn; | 2589 | pfn_t bad_pfn; |
2576 | 2590 | ||
2577 | static inline | 2591 | static inline |
2578 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | 2592 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) |
2579 | { | 2593 | { |
2580 | return container_of(pn, struct kvm_vcpu, preempt_notifier); | 2594 | return container_of(pn, struct kvm_vcpu, preempt_notifier); |
2581 | } | 2595 | } |
2582 | 2596 | ||
2583 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | 2597 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
2584 | { | 2598 | { |
2585 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 2599 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
2586 | 2600 | ||
2587 | kvm_arch_vcpu_load(vcpu, cpu); | 2601 | kvm_arch_vcpu_load(vcpu, cpu); |
2588 | } | 2602 | } |
2589 | 2603 | ||
2590 | static void kvm_sched_out(struct preempt_notifier *pn, | 2604 | static void kvm_sched_out(struct preempt_notifier *pn, |
2591 | struct task_struct *next) | 2605 | struct task_struct *next) |
2592 | { | 2606 | { |
2593 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 2607 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
2594 | 2608 | ||
2595 | kvm_arch_vcpu_put(vcpu); | 2609 | kvm_arch_vcpu_put(vcpu); |
2596 | } | 2610 | } |
2597 | 2611 | ||
2598 | int kvm_init(void *opaque, unsigned int vcpu_size, | 2612 | int kvm_init(void *opaque, unsigned int vcpu_size, |
2599 | struct module *module) | 2613 | struct module *module) |
2600 | { | 2614 | { |
2601 | int r; | 2615 | int r; |
2602 | int cpu; | 2616 | int cpu; |
2603 | 2617 | ||
2604 | kvm_init_debug(); | 2618 | kvm_init_debug(); |
2605 | 2619 | ||
2606 | r = kvm_arch_init(opaque); | 2620 | r = kvm_arch_init(opaque); |
2607 | if (r) | 2621 | if (r) |
2608 | goto out_fail; | 2622 | goto out_fail; |
2609 | 2623 | ||
2610 | bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 2624 | bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
2611 | 2625 | ||
2612 | if (bad_page == NULL) { | 2626 | if (bad_page == NULL) { |
2613 | r = -ENOMEM; | 2627 | r = -ENOMEM; |
2614 | goto out; | 2628 | goto out; |
2615 | } | 2629 | } |
2616 | 2630 | ||
2617 | bad_pfn = page_to_pfn(bad_page); | 2631 | bad_pfn = page_to_pfn(bad_page); |
2618 | 2632 | ||
2619 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { | 2633 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { |
2620 | r = -ENOMEM; | 2634 | r = -ENOMEM; |
2621 | goto out_free_0; | 2635 | goto out_free_0; |
2622 | } | 2636 | } |
2623 | 2637 | ||
2624 | r = kvm_arch_hardware_setup(); | 2638 | r = kvm_arch_hardware_setup(); |
2625 | if (r < 0) | 2639 | if (r < 0) |
2626 | goto out_free_0a; | 2640 | goto out_free_0a; |
2627 | 2641 | ||
2628 | for_each_online_cpu(cpu) { | 2642 | for_each_online_cpu(cpu) { |
2629 | smp_call_function_single(cpu, | 2643 | smp_call_function_single(cpu, |
2630 | kvm_arch_check_processor_compat, | 2644 | kvm_arch_check_processor_compat, |
2631 | &r, 1); | 2645 | &r, 1); |
2632 | if (r < 0) | 2646 | if (r < 0) |
2633 | goto out_free_1; | 2647 | goto out_free_1; |
2634 | } | 2648 | } |
2635 | 2649 | ||
2636 | on_each_cpu(hardware_enable, NULL, 1); | 2650 | on_each_cpu(hardware_enable, NULL, 1); |
2637 | r = register_cpu_notifier(&kvm_cpu_notifier); | 2651 | r = register_cpu_notifier(&kvm_cpu_notifier); |
2638 | if (r) | 2652 | if (r) |
2639 | goto out_free_2; | 2653 | goto out_free_2; |
2640 | register_reboot_notifier(&kvm_reboot_notifier); | 2654 | register_reboot_notifier(&kvm_reboot_notifier); |
2641 | 2655 | ||
2642 | r = sysdev_class_register(&kvm_sysdev_class); | 2656 | r = sysdev_class_register(&kvm_sysdev_class); |
2643 | if (r) | 2657 | if (r) |
2644 | goto out_free_3; | 2658 | goto out_free_3; |
2645 | 2659 | ||
2646 | r = sysdev_register(&kvm_sysdev); | 2660 | r = sysdev_register(&kvm_sysdev); |
2647 | if (r) | 2661 | if (r) |
2648 | goto out_free_4; | 2662 | goto out_free_4; |
2649 | 2663 | ||
2650 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ | 2664 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ |
2651 | kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, | 2665 | kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, |
2652 | __alignof__(struct kvm_vcpu), | 2666 | __alignof__(struct kvm_vcpu), |
2653 | 0, NULL); | 2667 | 0, NULL); |
2654 | if (!kvm_vcpu_cache) { | 2668 | if (!kvm_vcpu_cache) { |
2655 | r = -ENOMEM; | 2669 | r = -ENOMEM; |
2656 | goto out_free_5; | 2670 | goto out_free_5; |
2657 | } | 2671 | } |
2658 | 2672 | ||
2659 | kvm_chardev_ops.owner = module; | 2673 | kvm_chardev_ops.owner = module; |
2660 | kvm_vm_fops.owner = module; | 2674 | kvm_vm_fops.owner = module; |
2661 | kvm_vcpu_fops.owner = module; | 2675 | kvm_vcpu_fops.owner = module; |
2662 | 2676 | ||
2663 | r = misc_register(&kvm_dev); | 2677 | r = misc_register(&kvm_dev); |
2664 | if (r) { | 2678 | if (r) { |
2665 | printk(KERN_ERR "kvm: misc device register failed\n"); | 2679 | printk(KERN_ERR "kvm: misc device register failed\n"); |
2666 | goto out_free; | 2680 | goto out_free; |
2667 | } | 2681 | } |
2668 | 2682 | ||
2669 | kvm_preempt_ops.sched_in = kvm_sched_in; | 2683 | kvm_preempt_ops.sched_in = kvm_sched_in; |
2670 | kvm_preempt_ops.sched_out = kvm_sched_out; | 2684 | kvm_preempt_ops.sched_out = kvm_sched_out; |
2671 | 2685 | ||
2672 | return 0; | 2686 | return 0; |
2673 | 2687 | ||
2674 | out_free: | 2688 | out_free: |
2675 | kmem_cache_destroy(kvm_vcpu_cache); | 2689 | kmem_cache_destroy(kvm_vcpu_cache); |
2676 | out_free_5: | 2690 | out_free_5: |
2677 | sysdev_unregister(&kvm_sysdev); | 2691 | sysdev_unregister(&kvm_sysdev); |
2678 | out_free_4: | 2692 | out_free_4: |
2679 | sysdev_class_unregister(&kvm_sysdev_class); | 2693 | sysdev_class_unregister(&kvm_sysdev_class); |
2680 | out_free_3: | 2694 | out_free_3: |
2681 | unregister_reboot_notifier(&kvm_reboot_notifier); | 2695 | unregister_reboot_notifier(&kvm_reboot_notifier); |
2682 | unregister_cpu_notifier(&kvm_cpu_notifier); | 2696 | unregister_cpu_notifier(&kvm_cpu_notifier); |
2683 | out_free_2: | 2697 | out_free_2: |
2684 | on_each_cpu(hardware_disable, NULL, 1); | 2698 | on_each_cpu(hardware_disable, NULL, 1); |
2685 | out_free_1: | 2699 | out_free_1: |
2686 | kvm_arch_hardware_unsetup(); | 2700 | kvm_arch_hardware_unsetup(); |
2687 | out_free_0a: | 2701 | out_free_0a: |
2688 | free_cpumask_var(cpus_hardware_enabled); | 2702 | free_cpumask_var(cpus_hardware_enabled); |
2689 | out_free_0: | 2703 | out_free_0: |
2690 | __free_page(bad_page); | 2704 | __free_page(bad_page); |
2691 | out: | 2705 | out: |
2692 | kvm_arch_exit(); | 2706 | kvm_arch_exit(); |
2693 | kvm_exit_debug(); | 2707 | kvm_exit_debug(); |
2694 | out_fail: | 2708 | out_fail: |
2695 | return r; | 2709 | return r; |
2696 | } | 2710 | } |
2697 | EXPORT_SYMBOL_GPL(kvm_init); | 2711 | EXPORT_SYMBOL_GPL(kvm_init); |
2698 | 2712 |