Blame view
include/linux/kvm_host.h
35.3 KB
edf884172 KVM: Move arch de... |
1 2 |
#ifndef __KVM_HOST_H #define __KVM_HOST_H |
6aa8b732c [PATCH] kvm: user... |
3 4 5 6 7 8 9 |
/* * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/types.h> |
e56a7a28e KVM: Use virtual ... |
10 |
#include <linux/hardirq.h> |
6aa8b732c [PATCH] kvm: user... |
11 12 13 |
#include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> |
06ff0d372 KVM: Fix includes |
14 15 |
#include <linux/signal.h> #include <linux/sched.h> |
187f1882b BUG: headers with... |
16 |
#include <linux/bug.h> |
6aa8b732c [PATCH] kvm: user... |
17 |
#include <linux/mm.h> |
b297e672e KVM: Fix include ... |
18 |
#include <linux/mmu_notifier.h> |
15ad71460 KVM: Use the sche... |
19 |
#include <linux/preempt.h> |
0937c48d0 KVM: Add fields f... |
20 |
#include <linux/msi.h> |
d89f5eff7 KVM: Clean up vm ... |
21 |
#include <linux/slab.h> |
bd2b53b20 KVM: fast-path ms... |
22 |
#include <linux/rcupdate.h> |
bd80158af KVM: Clean up and... |
23 |
#include <linux/ratelimit.h> |
83f09228d KVM: inline is_*_... |
24 |
#include <linux/err.h> |
c11f11fcb kvm: Prepare to a... |
25 |
#include <linux/irqflags.h> |
521921bad kvm: Move guest e... |
26 |
#include <linux/context_tracking.h> |
1a02b2703 KVM: introduce kv... |
27 |
#include <linux/irqbypass.h> |
8577370fb KVM: Use simple w... |
28 |
#include <linux/swait.h> |
e3736c3eb kvm: convert kvm.... |
29 |
#include <linux/refcount.h> |
e8edc6e03 Detach sched.h fr... |
30 |
#include <asm/signal.h> |
6aa8b732c [PATCH] kvm: user... |
31 |
|
6aa8b732c [PATCH] kvm: user... |
32 |
#include <linux/kvm.h> |
102d8325a KVM: add MSR base... |
33 |
#include <linux/kvm_para.h> |
6aa8b732c [PATCH] kvm: user... |
34 |
|
edf884172 KVM: Move arch de... |
35 |
#include <linux/kvm_types.h> |
d77a39d98 KVM: Portability:... |
36 |
|
edf884172 KVM: Move arch de... |
37 |
#include <asm/kvm_host.h> |
d657a98e3 KVM: Portability:... |
38 |
|
0b1b1dfd5 kvm: introduce KV... |
39 40 41 |
#ifndef KVM_MAX_VCPU_ID #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS #endif |
6aa8b732c [PATCH] kvm: user... |
42 |
/* |
67b29204c KVM: hide KVM_MEM... |
43 44 45 46 47 |
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used * in kvm, other bits are visible for userspace which are defined in * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) |
87da7e66a KVM: x86: fix vcp... |
48 49 |
/* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 |
f78146b0f KVM: Fix page-cro... |
50 |
|
f481b069e KVM: implement mu... |
51 52 53 |
#ifndef KVM_ADDRESS_SPACE_NUM #define KVM_ADDRESS_SPACE_NUM 1 #endif |
f78146b0f KVM: Fix page-cro... |
54 |
/* |
9c5b11728 KVM: let the erro... |
55 |
* For the normal pfn, the highest 12 bits should be zero, |
81c52c56e KVM: do not treat... |
56 57 |
* so we can mask bit 62 ~ bit 52 to indicate the error pfn, * mask bit 63 to indicate the noslot pfn. |
9c5b11728 KVM: let the erro... |
58 |
*/ |
81c52c56e KVM: do not treat... |
59 60 61 |
#define KVM_PFN_ERR_MASK (0x7ffULL << 52) #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) #define KVM_PFN_NOSLOT (0x1ULL << 63) |
9c5b11728 KVM: let the erro... |
62 63 64 |
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
81c52c56e KVM: do not treat... |
65 |
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
6c8ee57be KVM: introduce KV... |
66 |
|
81c52c56e KVM: do not treat... |
67 68 69 70 |
/* * error pfns indicate that the gfn is in slot but faild to * translate it to pfn on host. */ |
ba049e93a kvm: rename pfn_t... |
71 |
static inline bool is_error_pfn(kvm_pfn_t pfn) |
83f09228d KVM: inline is_*_... |
72 |
{ |
9c5b11728 KVM: let the erro... |
73 |
return !!(pfn & KVM_PFN_ERR_MASK); |
83f09228d KVM: inline is_*_... |
74 |
} |
81c52c56e KVM: do not treat... |
75 76 77 78 79 |
/* * error_noslot pfns indicate that the gfn can not be * translated to pfn - it is not in slot or failed to * translate it to pfn. */ |
ba049e93a kvm: rename pfn_t... |
80 |
static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
83f09228d KVM: inline is_*_... |
81 |
{ |
81c52c56e KVM: do not treat... |
82 |
return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
83f09228d KVM: inline is_*_... |
83 |
} |
81c52c56e KVM: do not treat... |
84 |
/* noslot pfn indicates that the gfn is not in slot. */ |
ba049e93a kvm: rename pfn_t... |
85 |
static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
83f09228d KVM: inline is_*_... |
86 |
{ |
81c52c56e KVM: do not treat... |
87 |
return pfn == KVM_PFN_NOSLOT; |
83f09228d KVM: inline is_*_... |
88 |
} |
bf640876e KVM: s390: Make K... |
89 90 91 92 93 |
/* * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) * provide own defines and kvm_is_error_hva */ #ifndef KVM_HVA_ERR_BAD |
7068d0971 KVM: introduce KV... |
94 95 |
#define KVM_HVA_ERR_BAD (PAGE_OFFSET) #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) |
ca3a490c7 KVM: introduce KV... |
96 97 98 |
static inline bool kvm_is_error_hva(unsigned long addr) { |
7068d0971 KVM: introduce KV... |
99 |
return addr >= PAGE_OFFSET; |
ca3a490c7 KVM: introduce KV... |
100 |
} |
bf640876e KVM: s390: Make K... |
101 |
#endif |
6cede2e67 KVM: introduce KV... |
102 |
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
9c5b11728 KVM: let the erro... |
103 |
static inline bool is_error_page(struct page *page) |
6cede2e67 KVM: introduce KV... |
104 105 106 |
{ return IS_ERR(page); } |
930f7fd6d KVM: mark request... |
107 108 |
#define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) |
7a97cec26 KVM: mark request... |
109 |
#define KVM_REQUEST_WAIT BIT(9) |
f78146b0f KVM: Fix page-cro... |
110 |
/* |
2860c4b16 KVM: move archite... |
111 112 |
* Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. |
d9e368d61 KVM: Flush remote... |
113 |
*/ |
7a97cec26 KVM: mark request... |
114 115 116 117 |
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_PENDING_TIMER 2 #define KVM_REQ_UNHALT 3 |
2387149ea KVM: improve arch... |
118 119 120 121 122 123 124 |
#define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \ (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ }) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) |
0cd310437 KVM: document whi... |
125 |
|
7a84428af KVM: Add resampli... |
126 127 |
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
5550af4df KVM: Fix guest sh... |
128 |
|
c16f862d0 KVM: Use kmem cac... |
129 |
extern struct kmem_cache *kvm_vcpu_cache; |
6aa8b732c [PATCH] kvm: user... |
130 |
|
2f303b74a KVM: Convert kvm_... |
131 |
extern spinlock_t kvm_lock; |
fc1b74925 KVM: Move vm_list... |
132 |
extern struct list_head vm_list; |
743eeb0b0 KVM: Intelligent ... |
133 134 135 136 137 |
struct kvm_io_range { gpa_t addr; int len; struct kvm_io_device *dev; }; |
786a9f888 KVM: set upper bo... |
138 |
#define NR_IOBUS_DEVS 1000 |
a13007160 KVM: resize kvm_i... |
139 |
|
2eeb2e94e KVM: Adds support... |
140 |
struct kvm_io_bus { |
6ea34c9b7 kvm: exclude ioev... |
141 142 |
int dev_count; int ioeventfd_count; |
a13007160 KVM: resize kvm_i... |
143 |
struct kvm_io_range range[]; |
2eeb2e94e KVM: Adds support... |
144 |
}; |
e93f8a0f8 KVM: convert io_b... |
145 146 147 |
enum kvm_bus { KVM_MMIO_BUS, KVM_PIO_BUS, |
060f0ce6f KVM: Introduce KV... |
148 |
KVM_VIRTIO_CCW_NOTIFY_BUS, |
68c3b4d16 KVM: VMX: speed u... |
149 |
KVM_FAST_MMIO_BUS, |
e93f8a0f8 KVM: convert io_b... |
150 151 |
KVM_NR_BUSES }; |
e32edf4fd KVM: Redesign kvm... |
152 |
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
e93f8a0f8 KVM: convert io_b... |
153 |
int len, const void *val); |
e32edf4fd KVM: Redesign kvm... |
154 155 156 157 |
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val, long cookie); int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); |
743eeb0b0 KVM: Intelligent ... |
158 159 |
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); |
90db10434 KVM: kvm_io_bus_u... |
160 161 |
void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev); |
8a39d0067 KVM: kvm_io_bus: ... |
162 163 |
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); |
2eeb2e94e KVM: Adds support... |
164 |
|
af585b921 KVM: Halt vcpu if... |
165 166 167 168 169 170 171 172 173 174 |
#ifdef CONFIG_KVM_ASYNC_PF struct kvm_async_pf { struct work_struct work; struct list_head link; struct list_head queue; struct kvm_vcpu *vcpu; struct mm_struct *mm; gva_t gva; unsigned long addr; struct kvm_arch_async_pf arch; |
f2e106692 KVM: Drop FOLL_GE... |
175 |
bool wakeup_all; |
af585b921 KVM: Halt vcpu if... |
176 177 178 179 |
}; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
e0ead41a6 KVM: async_pf: Pr... |
180 |
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, |
af585b921 KVM: Halt vcpu if... |
181 |
struct kvm_arch_async_pf *arch); |
344d9588a KVM: Add PV MSR t... |
182 |
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
af585b921 KVM: Halt vcpu if... |
183 |
#endif |
6b7e2d099 KVM: Add "exiting... |
184 185 186 |
enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, |
c142786c6 KVM: MMU: Don't u... |
187 188 |
EXITING_GUEST_MODE, READING_SHADOW_PAGE_TABLES, |
6b7e2d099 KVM: Add "exiting... |
189 |
}; |
f78146b0f KVM: Fix page-cro... |
190 191 192 193 194 195 196 197 198 |
/* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. */ struct kvm_mmio_fragment { gpa_t gpa; void *data; unsigned len; }; |
d17fbbf73 KVM: Portability:... |
199 200 |
struct kvm_vcpu { struct kvm *kvm; |
31bb117eb KVM: Use CONFIG_P... |
201 |
#ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf73 KVM: Portability:... |
202 |
struct preempt_notifier preempt_notifier; |
31bb117eb KVM: Use CONFIG_P... |
203 |
#endif |
6b7e2d099 KVM: Add "exiting... |
204 |
int cpu; |
d17fbbf73 KVM: Portability:... |
205 |
int vcpu_id; |
6b7e2d099 KVM: Add "exiting... |
206 207 |
int srcu_idx; int mode; |
d17fbbf73 KVM: Portability:... |
208 |
unsigned long requests; |
d0bfb940e KVM: New guest de... |
209 |
unsigned long guest_debug; |
6b7e2d099 KVM: Add "exiting... |
210 |
|
bf9f6ac8d KVM: Update Poste... |
211 212 |
int pre_pcpu; struct list_head blocked_vcpu_list; |
6b7e2d099 KVM: Add "exiting... |
213 214 |
struct mutex mutex; struct kvm_run *run; |
f656ce018 KVM: switch vcpu ... |
215 |
|
4124a4cff x86,kvm: move qem... |
216 |
int guest_xcr0_loaded; |
8577370fb KVM: Use simple w... |
217 |
struct swait_queue_head wq; |
0e4524a5d KVM: mark vcpu->p... |
218 |
struct pid __rcu *pid; |
d17fbbf73 KVM: Portability:... |
219 220 221 |
int sigset_active; sigset_t sigset; struct kvm_vcpu_stat stat; |
19020f8ab KVM: make halt_po... |
222 |
unsigned int halt_poll_ns; |
3491caf27 KVM: halt_polling... |
223 |
bool valid_wakeup; |
d17fbbf73 KVM: Portability:... |
224 |
|
34c16eecf KVM: Portability:... |
225 |
#ifdef CONFIG_HAS_IOMEM |
d17fbbf73 KVM: Portability:... |
226 227 228 |
int mmio_needed; int mmio_read_completed; int mmio_is_write; |
f78146b0f KVM: Fix page-cro... |
229 230 231 |
int mmio_cur_fragment; int mmio_nr_fragments; struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; |
34c16eecf KVM: Portability:... |
232 |
#endif |
1165f5fec KVM: Per-vcpu sta... |
233 |
|
af585b921 KVM: Halt vcpu if... |
234 235 236 237 238 239 240 241 |
#ifdef CONFIG_KVM_ASYNC_PF struct { u32 queued; struct list_head queue; struct list_head done; spinlock_t lock; } async_pf; #endif |
4c088493c KVM: Note down wh... |
242 243 244 245 246 247 248 249 250 251 252 253 |
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT /* * Cpu relax intercept or pause loop exit optimization * in_spin_loop: set when a vcpu does a pause loop exit * or cpu relax intercepted. * dy_eligible: indicates whether vcpu is eligible for directed yield. */ struct { bool in_spin_loop; bool dy_eligible; } spin_loop; #endif |
3a08a8f9f kvm: Record the p... |
254 |
bool preempted; |
d657a98e3 KVM: Portability:... |
255 |
struct kvm_vcpu_arch arch; |
45b5939e5 kvm: create per-v... |
256 |
struct dentry *debugfs_dentry; |
d657a98e3 KVM: Portability:... |
257 |
}; |
6b7e2d099 KVM: Add "exiting... |
258 259 |
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { |
cde9af6e7 KVM: add explicit... |
260 261 262 263 264 265 |
/* * The memory barrier ensures a previous write to vcpu->requests cannot * be reordered with the read of vcpu->mode. It pairs with the general * memory barrier following the write of vcpu->mode in VCPU RUN. */ smp_mb__before_atomic(); |
6b7e2d099 KVM: Add "exiting... |
266 267 |
return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); } |
660c22c42 KVM: limit the nu... |
268 269 270 271 272 |
/* * Some of the bitops functions do not support too long bitmaps. * This number must be determined not to exceed such limits. */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
6aa8b732c [PATCH] kvm: user... |
273 274 275 |
struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; |
6aa8b732c [PATCH] kvm: user... |
276 |
unsigned long *dirty_bitmap; |
db3fe4eb4 KVM: Introduce kv... |
277 |
struct kvm_arch_memory_slot arch; |
8a7ae055f KVM: MMU: Partial... |
278 |
unsigned long userspace_addr; |
6104f472a KVM: struct kvm_m... |
279 |
u32 flags; |
1e702d9af KVM: struct kvm_m... |
280 |
short id; |
6aa8b732c [PATCH] kvm: user... |
281 |
}; |
87bf6e7de KVM: fix the hand... |
282 283 284 285 |
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) { return ALIGN(memslot->npages, BITS_PER_LONG) / 8; } |
842235987 KVM: s390: irq ro... |
286 287 288 289 290 291 292 |
struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; u64 ind_offset; u32 summary_offset; u32 adapter_id; }; |
5c919412f kvm/x86: Hyper-V ... |
293 294 295 296 |
struct kvm_hv_sint { u32 vcpu; u32 sint; }; |
399ec807d KVM: Userspace co... |
297 298 |
struct kvm_kernel_irq_routing_entry { u32 gsi; |
5116d8f6b KVM: fix ack not ... |
299 |
u32 type; |
4925663a0 KVM: Report IRQ i... |
300 |
int (*set)(struct kvm_kernel_irq_routing_entry *e, |
aa2fbe6d4 KVM: Let ioapic k... |
301 302 |
struct kvm *kvm, int irq_source_id, int level, bool line_status); |
399ec807d KVM: Userspace co... |
303 304 305 306 307 |
union { struct { unsigned irqchip; unsigned pin; } irqchip; |
0455e72c9 KVM: Add devid in... |
308 309 310 311 312 313 314 |
struct { u32 address_lo; u32 address_hi; u32 data; u32 flags; u32 devid; } msi; |
842235987 KVM: s390: irq ro... |
315 |
struct kvm_s390_adapter_int adapter; |
5c919412f kvm/x86: Hyper-V ... |
316 |
struct kvm_hv_sint hv_sint; |
399ec807d KVM: Userspace co... |
317 |
}; |
46e624b95 KVM: Change irq r... |
318 319 |
struct hlist_node link; }; |
b053b2aef KVM: x86: Add EOI... |
320 321 322 323 324 325 326 327 328 329 330 |
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING struct kvm_irq_routing_table { int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; u32 nr_rt_entries; /* * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ struct hlist_head map[0]; }; #endif |
0743247fb KVM: Make KVM_PRI... |
331 332 333 |
#ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif |
93a5cef07 KVM: introduce KV... |
334 |
#ifndef KVM_MEM_SLOTS_NUM |
bbacc0c11 KVM: Rename KVM_M... |
335 |
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef07 KVM: introduce KV... |
336 |
#endif |
f481b069e KVM: implement mu... |
337 338 339 340 341 342 |
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) { return 0; } #endif |
bf3e05bc1 KVM: sort memslot... |
343 344 345 346 347 |
/* * Note: * memslots are not sorted by id anymore, please use id_to_memslot() * to get the memslot by its id. */ |
46a26bf55 KVM: modify memsl... |
348 |
struct kvm_memslots { |
49c7754ce KVM: Add memory s... |
349 |
u64 generation; |
93a5cef07 KVM: introduce KV... |
350 |
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
f85e2cb5d KVM: introduce a ... |
351 |
/* The mapping table from slot id to the index in memslots[]. */ |
1e702d9af KVM: struct kvm_m... |
352 |
short id_to_index[KVM_MEM_SLOTS_NUM]; |
d4ae84a02 kvm: search_memsl... |
353 |
atomic_t lru_slot; |
9c1a5d387 kvm: optimize GFN... |
354 |
int used_slots; |
46a26bf55 KVM: modify memsl... |
355 |
}; |
6aa8b732c [PATCH] kvm: user... |
356 |
struct kvm { |
aaee2c94f KVM: MMU: Switch ... |
357 |
spinlock_t mmu_lock; |
79fac95ec KVM: convert slot... |
358 |
struct mutex slots_lock; |
6d4e4c4fc KVM: Disallow for... |
359 |
struct mm_struct *mm; /* userspace tied to this vm */ |
a80cf7b5f KVM: mark memory ... |
360 |
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; |
fb3f0f51d KVM: Dynamically ... |
361 |
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
6c7caebc2 KVM: introduce kv... |
362 363 364 365 366 367 368 |
/* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only * incremented after storing the kvm_vcpu pointer in vcpus, * and is accessed atomically. */ |
73880c80a KVM: Break depend... |
369 |
atomic_t online_vcpus; |
6c7caebc2 KVM: introduce kv... |
370 |
int created_vcpus; |
217ece612 KVM: use yield_to... |
371 |
int last_boosted_vcpu; |
133de9021 [PATCH] KVM: Add ... |
372 |
struct list_head vm_list; |
60eead79a KVM: introduce ir... |
373 |
struct mutex lock; |
4a12f9517 KVM: mark kvm->bu... |
374 |
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; |
721eecbf4 KVM: irqfd |
375 376 377 378 |
#ifdef CONFIG_HAVE_KVM_EVENTFD struct { spinlock_t lock; struct list_head items; |
7a84428af KVM: Add resampli... |
379 380 |
struct list_head resampler_list; struct mutex resampler_lock; |
721eecbf4 KVM: irqfd |
381 |
} irqfds; |
d34e6b175 KVM: add ioeventf... |
382 |
struct list_head ioeventfds; |
721eecbf4 KVM: irqfd |
383 |
#endif |
ba1389b7a KVM: Extend stats... |
384 |
struct kvm_vm_stat stat; |
d69fb81f0 KVM: Portability:... |
385 |
struct kvm_arch arch; |
e3736c3eb kvm: convert kvm.... |
386 |
refcount_t users_count; |
4b4357e02 kvm: make KVM_COA... |
387 |
#ifdef CONFIG_KVM_MMIO |
5f94c1741 KVM: Add coalesce... |
388 |
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
2b3c246a6 KVM: Make coalesc... |
389 390 |
spinlock_t ring_lock; struct list_head coalesced_zones; |
5f94c1741 KVM: Add coalesce... |
391 |
#endif |
e930bffe9 KVM: Synchronize ... |
392 |
|
60eead79a KVM: introduce ir... |
393 |
struct mutex irq_lock; |
75858a84a KVM: Interrupt ma... |
394 |
#ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b20 KVM: fast-path ms... |
395 |
/* |
9957c86d6 KVM: Move all acc... |
396 |
* Update side is protected by irq_lock. |
bd2b53b20 KVM: fast-path ms... |
397 |
*/ |
4b6a2872a kvm: add __rcu an... |
398 |
struct kvm_irq_routing_table __rcu *irq_routing; |
c77dcacb3 KVM: Move more co... |
399 400 |
#endif #ifdef CONFIG_HAVE_KVM_IRQFD |
136bdfeee KVM: Move irq ack... |
401 |
struct hlist_head irq_ack_notifier_list; |
75858a84a KVM: Interrupt ma... |
402 |
#endif |
36c1ed821 KVM: Guard mmu_no... |
403 |
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
e930bffe9 KVM: Synchronize ... |
404 405 406 407 |
struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif |
a086f6a1e Revert "KVM: Simp... |
408 |
long tlbs_dirty; |
07f0a7bde kvm: destroy emul... |
409 |
struct list_head devices; |
536a6f88c KVM: Create debug... |
410 411 |
struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; |
6ade8694f kvm: Move srcu_st... |
412 413 |
struct srcu_struct srcu; struct srcu_struct irq_srcu; |
fdeaf7e3e KVM: make pid ava... |
414 |
pid_t userspace_pid; |
6aa8b732c [PATCH] kvm: user... |
415 |
}; |
a737f256b KVM: Cleanup the ... |
416 417 418 419 420 421 |
#define kvm_err(fmt, ...) \ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_info(fmt, ...) \ pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug(fmt, ...) \ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
ae0f54995 kvm: x86: don't p... |
422 423 424 |
#define kvm_debug_ratelimited(fmt, ...) \ pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ ## __VA_ARGS__) |
a737f256b KVM: Cleanup the ... |
425 426 427 |
#define kvm_pr_unimpl(fmt, ...) \ pr_err_ratelimited("kvm [%i]: " fmt, \ task_tgid_nr(current), ## __VA_ARGS__) |
f02424785 KVM: Add and use ... |
428 |
|
a737f256b KVM: Cleanup the ... |
429 430 |
/* The guest did something we don't support. */ #define vcpu_unimpl(vcpu, fmt, ...) \ |
671d9ab38 kvm: Dump guest r... |
431 432 |
kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) |
6aa8b732c [PATCH] kvm: user... |
433 |
|
ee86dbc6e kvm: introduce vc... |
434 435 |
#define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
ae0f54995 kvm: x86: don't p... |
436 437 438 |
#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ ## __VA_ARGS__) |
765eaa0f7 kvm/x86: Hyper-V ... |
439 440 |
#define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
ee86dbc6e kvm: introduce vc... |
441 |
|
4a12f9517 KVM: mark kvm->bu... |
442 443 444 |
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) { return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, |
3898da947 KVM: avoid using ... |
445 446 |
lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); |
4a12f9517 KVM: mark kvm->bu... |
447 |
} |
988a2cae6 KVM: Use macro to... |
448 449 |
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { |
dd489240a KVM: document mem... |
450 451 452 453 |
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case * the caller has read kvm->online_vcpus before (as is the case * for kvm_for_each_vcpu, for example). */ |
988a2cae6 KVM: Use macro to... |
454 455 456 457 458 |
smp_rmb(); return kvm->vcpus[i]; } #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
b42fc3cbc KVM: Fix off by o... |
459 460 461 462 |
for (idx = 0; \ idx < atomic_read(&kvm->online_vcpus) && \ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) |
988a2cae6 KVM: Use macro to... |
463 |
|
db27a7a37 KVM: Provide func... |
464 465 |
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { |
9b9e3fc4d KVM: remove NULL ... |
466 |
struct kvm_vcpu *vcpu = NULL; |
db27a7a37 KVM: Provide func... |
467 |
int i; |
9b9e3fc4d KVM: remove NULL ... |
468 |
if (id < 0) |
c896939f7 KVM: use heuristi... |
469 |
return NULL; |
9b9e3fc4d KVM: remove NULL ... |
470 471 |
if (id < KVM_MAX_VCPUS) vcpu = kvm_get_vcpu(kvm, id); |
c896939f7 KVM: use heuristi... |
472 473 |
if (vcpu && vcpu->vcpu_id == id) return vcpu; |
db27a7a37 KVM: Provide func... |
474 475 476 477 478 |
kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->vcpu_id == id) return vcpu; return NULL; } |
497d72d80 KVM: Add kvm_vcpu... |
479 480 481 482 483 484 485 486 487 488 |
static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) { struct kvm_vcpu *tmp; int idx; kvm_for_each_vcpu(idx, tmp, vcpu->kvm) if (tmp == vcpu) return idx; BUG(); } |
be6ba0f09 KVM: introduce kv... |
489 490 |
#define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ |
bf3e05bc1 KVM: sort memslot... |
491 492 |
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ memslot++) |
be6ba0f09 KVM: introduce kv... |
493 |
|
fb3f0f51d KVM: Dynamically ... |
494 495 |
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
9fc77441e KVM: make process... |
496 |
int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
313a3dc75 KVM: Portability:... |
497 |
void vcpu_put(struct kvm_vcpu *vcpu); |
6ef768fac kvm: x86: move io... |
498 |
#ifdef __KVM_HAVE_IOAPIC |
993225adf KVM: x86: rename ... |
499 |
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); |
abdb080f7 kvm/irqchip: kvm_... |
500 |
void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
6ef768fac kvm: x86: move io... |
501 |
#else |
993225adf KVM: x86: rename ... |
502 |
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) |
6ef768fac kvm: x86: move io... |
503 504 |
{ } |
abdb080f7 kvm/irqchip: kvm_... |
505 |
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
b053b2aef KVM: x86: Add EOI... |
506 507 |
{ } |
6ef768fac kvm: x86: move io... |
508 |
#endif |
297e21053 KVM: Give IRQFD i... |
509 |
#ifdef CONFIG_HAVE_KVM_IRQFD |
a0f155e96 KVM: Initialize i... |
510 511 512 513 514 515 516 517 518 519 520 521 |
int kvm_irqfd_init(void); void kvm_irqfd_exit(void); #else static inline int kvm_irqfd_init(void) { return 0; } static inline void kvm_irqfd_exit(void) { } #endif |
0ee75bead KVM: Let vcpu str... |
522 |
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
c16f862d0 KVM: Use kmem cac... |
523 |
struct module *module); |
cb498ea2c KVM: Portability:... |
524 |
void kvm_exit(void); |
6aa8b732c [PATCH] kvm: user... |
525 |
|
d39f13b0d KVM: add vm refco... |
526 527 |
void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); |
f481b069e KVM: implement mu... |
528 |
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
90d83dc3d KVM: use the corr... |
529 |
{ |
7e988b103 KVM: use correct ... |
530 |
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
3898da947 KVM: avoid using ... |
531 532 |
lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); |
90d83dc3d KVM: use the corr... |
533 |
} |
f481b069e KVM: implement mu... |
534 535 536 537 |
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) { return __kvm_memslots(kvm, 0); } |
8e73485c7 KVM: add vcpu-spe... |
538 539 |
static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) { |
f481b069e KVM: implement mu... |
540 541 542 |
int as_id = kvm_arch_vcpu_memslots_id(vcpu); return __kvm_memslots(vcpu->kvm, as_id); |
8e73485c7 KVM: add vcpu-spe... |
543 |
} |
28a37544f KVM: introduce id... |
544 545 546 |
static inline struct kvm_memory_slot * id_to_memslot(struct kvm_memslots *slots, int id) { |
f85e2cb5d KVM: introduce a ... |
547 548 |
int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; |
bf3e05bc1 KVM: sort memslot... |
549 |
|
f85e2cb5d KVM: introduce a ... |
550 |
slot = &slots->memslots[index]; |
bf3e05bc1 KVM: sort memslot... |
551 |
|
f85e2cb5d KVM: introduce a ... |
552 553 |
WARN_ON(slot->id != id); return slot; |
28a37544f KVM: introduce id... |
554 |
} |
74d0727cb KVM: set_memory_r... |
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 |
/* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot * - delete an existing memory slot * - modify an existing memory slot * -- move it in the guest physical memory space * -- just change its flags * * Since flags can be changed by some of these operations, the following * differentiation is the best we can do for __kvm_set_memory_region(): */ enum kvm_mr_change { KVM_MR_CREATE, KVM_MR_DELETE, KVM_MR_MOVE, KVM_MR_FLAGS_ONLY, }; |
210c7c4d7 KVM: Export memor... |
572 |
int kvm_set_memory_region(struct kvm *kvm, |
09170a494 KVM: const-ify us... |
573 |
const struct kvm_userspace_memory_region *mem); |
f78e0e2ee KVM: VMX: Enable ... |
574 |
int __kvm_set_memory_region(struct kvm *kvm, |
09170a494 KVM: const-ify us... |
575 |
const struct kvm_userspace_memory_region *mem); |
5587027ce kvm: Add struct k... |
576 |
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
db3fe4eb4 KVM: Introduce kv... |
577 |
struct kvm_memory_slot *dont); |
5587027ce kvm: Add struct k... |
578 579 |
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages); |
15f46015e KVM: add memslots... |
580 |
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); |
f7784b8ec KVM: split kvm_ar... |
581 582 |
int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, |
09170a494 KVM: const-ify us... |
583 |
const struct kvm_userspace_memory_region *mem, |
7b6195a91 KVM: set_memory_r... |
584 |
enum kvm_mr_change change); |
f7784b8ec KVM: split kvm_ar... |
585 |
void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a494 KVM: const-ify us... |
586 |
const struct kvm_userspace_memory_region *mem, |
8482644ae KVM: set_memory_r... |
587 |
const struct kvm_memory_slot *old, |
f36f3f284 KVM: add "new" ar... |
588 |
const struct kvm_memory_slot *new, |
8482644ae KVM: set_memory_r... |
589 |
enum kvm_mr_change change); |
db3fe4eb4 KVM: Introduce kv... |
590 |
bool kvm_largepages_enabled(void); |
54dee9933 KVM: VMX: conditi... |
591 |
void kvm_disable_largepages(void); |
2df72e9bc KVM: split kvm_ar... |
592 593 594 595 596 |
/* flush all memory translations */ void kvm_arch_flush_shadow_all(struct kvm *kvm); /* flush memory translations pointing to 'slot' */ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
a983fb238 KVM: x86: switch ... |
597 |
|
d9ef13c2b KVM: pass kvm_mem... |
598 599 |
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages); |
48987781e KVM: MMU: introdu... |
600 |
|
954bbbc23 KVM: Simply gfn_t... |
601 |
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da45583 KVM: MMU: large p... |
602 |
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
ba6a35415 KVM: mmu: allow p... |
603 |
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
4d8b81abc KVM: introduce re... |
604 |
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
64d831269 KVM: Introduce gf... |
605 606 |
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable); |
b4231d618 KVM: MMU: Selecti... |
607 608 |
void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); |
35149e212 KVM: MMU: Don't a... |
609 |
void kvm_set_page_accessed(struct page *page); |
ba049e93a kvm: rename pfn_t... |
610 611 612 |
kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
612819c3c KVM: propagate fa... |
613 |
bool *writable); |
ba049e93a kvm: rename pfn_t... |
614 615 616 617 618 |
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable); |
037d92dc5 KVM: introduce gf... |
619 |
|
ba049e93a kvm: rename pfn_t... |
620 621 622 623 |
void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_get_pfn(kvm_pfn_t pfn); |
35149e212 KVM: MMU: Don't a... |
624 |
|
195aefde9 KVM: Add general ... |
625 626 |
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); |
7ec545882 KVM: Add kvm_read... |
627 628 |
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
195aefde9 KVM: Add general ... |
629 |
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
4e335d9e7 Revert "KVM: Supp... |
630 631 |
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); |
195aefde9 KVM: Add general ... |
632 633 634 635 |
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); |
4e335d9e7 Revert "KVM: Supp... |
636 637 638 639 640 641 |
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, int offset, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); |
195aefde9 KVM: Add general ... |
642 643 |
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
6aa8b732c [PATCH] kvm: user... |
644 |
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
33e941547 KVM: kvm_is_visib... |
645 |
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
8f0b1ab6f KVM: Introduce kv... |
646 |
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
6aa8b732c [PATCH] kvm: user... |
647 |
void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
8e73485c7 KVM: add vcpu-spe... |
648 649 |
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); |
ba049e93a kvm: rename pfn_t... |
650 651 |
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
8e73485c7 KVM: add vcpu-spe... |
652 653 654 655 656 657 658 659 660 661 662 663 664 665 |
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len); int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len); int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len); int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len); int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
40ba283e2 KVM: Let KVM_SET_... |
666 667 |
void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); |
8776e5194 KVM: Portability:... |
668 |
void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
3217f7c25 KVM: Add kvm_arch... |
669 670 |
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
178f02ffa KVM: return if kv... |
671 |
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
b6d33834b KVM: Factor out k... |
672 |
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
fa93384f4 sched: Fix signed... |
673 |
int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
199b5763d KVM: add spinlock... |
674 |
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); |
7702fd1f6 KVM: Prevent gues... |
675 676 |
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
a4ee1ca4a KVM: MMU: delay f... |
677 |
|
d9e368d61 KVM: Flush remote... |
678 |
void kvm_flush_remote_tlbs(struct kvm *kvm); |
2e53d63ac KVM: MMU: ignore ... |
679 |
void kvm_reload_remote_mmus(struct kvm *kvm); |
445b82369 kvm: Rename make_... |
680 |
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
6aa8b732c [PATCH] kvm: user... |
681 |
|
043405e10 KVM: Move x86 msr... |
682 683 |
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
313a3dc75 KVM: Portability:... |
684 685 |
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
5b1c1493a KVM: s390: ucontr... |
686 |
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
018d00d2f KVM: Portability:... |
687 |
|
784aa3d7f KVM: Rename and a... |
688 |
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
018d00d2f KVM: Portability:... |
689 |
|
5bb064dcd KVM: Portability:... |
690 691 |
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty); |
ba0513b5b KVM: Add generic ... |
692 693 694 |
int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log, bool *is_dirty); |
3b0f1d01e KVM: Rename kvm_a... |
695 |
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
ba0513b5b KVM: Add generic ... |
696 697 698 |
struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); |
5bb064dcd KVM: Portability:... |
699 700 |
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); |
aa2fbe6d4 KVM: Let ioapic k... |
701 702 |
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); |
1fe779f8e KVM: Portability:... |
703 704 |
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
313a3dc75 KVM: Portability:... |
705 |
|
d07520607 KVM: Portability:... |
706 707 |
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
8b0067913 KVM: Portability:... |
708 709 |
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr); |
b6c7a5dcc KVM: Portability:... |
710 711 712 713 714 715 |
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
62d9f0dbc KVM: add ioctls t... |
716 717 718 719 |
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); |
d0bfb940e KVM: New guest de... |
720 721 |
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); |
b6c7a5dcc KVM: Portability:... |
722 |
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
f8c16bbaa KVM: Portability:... |
723 724 |
int kvm_arch_init(void *opaque); void kvm_arch_exit(void); |
043405e10 KVM: Move x86 msr... |
725 |
|
e9b11c175 KVM: Portability:... |
726 727 |
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
e790d9ef6 KVM: add kvm_arch... |
728 |
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
e9b11c175 KVM: Portability:... |
729 730 731 732 |
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
26e5215fd KVM: Split vcpu c... |
733 |
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
31928aa58 KVM: remove unnee... |
734 |
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
d40ccc624 KVM: Correct cons... |
735 |
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c175 KVM: Portability:... |
736 |
|
235539b48 kvm: add stubs fo... |
737 738 |
bool kvm_arch_has_vcpu_debugfs(void); int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); |
13a34e067 KVM: remove garba... |
739 740 |
int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); |
e9b11c175 KVM: Portability:... |
741 742 743 |
int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); void kvm_arch_check_processor_compat(void *rtn); |
1d737c8a6 KVM: Portability:... |
744 |
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
199b5763d KVM: add spinlock... |
745 |
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
b6d33834b KVM: Factor out k... |
746 |
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
e9b11c175 KVM: Portability:... |
747 |
|
d89f5eff7 KVM: Clean up vm ... |
748 749 750 751 752 753 754 755 756 757 758 |
#ifndef __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); } static inline void kvm_arch_free_vm(struct kvm *kvm) { kfree(kvm); } #endif |
e0f0bbc52 kvm: Create non-c... |
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 |
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); #else static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { } static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { } static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return false; } #endif |
5544eb9b8 KVM: count number... |
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 |
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE void kvm_arch_start_assignment(struct kvm *kvm); void kvm_arch_end_assignment(struct kvm *kvm); bool kvm_arch_has_assigned_device(struct kvm *kvm); #else static inline void kvm_arch_start_assignment(struct kvm *kvm) { } static inline void kvm_arch_end_assignment(struct kvm *kvm) { } static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) { return false; } #endif |
e0f0bbc52 kvm: Create non-c... |
795 |
|
8577370fb KVM: Use simple w... |
796 |
static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
b6d33834b KVM: Factor out k... |
797 |
{ |
2246f8b56 KVM: PPC: Rework ... |
798 799 800 |
#ifdef __KVM_HAVE_ARCH_WQP return vcpu->arch.wqp; #else |
b6d33834b KVM: Factor out k... |
801 |
return &vcpu->wq; |
b6d33834b KVM: Factor out k... |
802 |
#endif |
2246f8b56 KVM: PPC: Rework ... |
803 |
} |
b6d33834b KVM: Factor out k... |
804 |
|
01c94e64f KVM: introduce kv... |
805 806 807 808 809 810 811 812 813 814 815 816 817 |
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED /* * returns true if the virtual interrupt controller is initialized and * ready to accept virtual IRQ. On some architectures the virtual interrupt * controller is dynamically instantiated and this is not always true. */ bool kvm_arch_intc_initialized(struct kvm *kvm); #else static inline bool kvm_arch_intc_initialized(struct kvm *kvm) { return true; } #endif |
e08b96371 KVM: s390: add pa... |
818 |
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
d19a9cd27 KVM: Portability:... |
819 |
void kvm_arch_destroy_vm(struct kvm *kvm); |
ad8ba2cd4 KVM: Add kvm_arch... |
820 |
void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c175 KVM: Portability:... |
821 |
|
3d80840d9 KVM: hlt emulatio... |
822 |
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
5736199af KVM: Move kvm_vcp... |
823 |
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
682c59a3f KVM: Portability:... |
824 |
|
ba049e93a kvm: rename pfn_t... |
825 |
bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
c77fb9dc7 KVM: Change is_mm... |
826 |
|
62c476c7c KVM: Device Assig... |
827 828 829 830 831 |
struct kvm_irq_ack_notifier { struct hlist_node link; unsigned gsi; void (*irq_acked)(struct kvm_irq_ack_notifier *kian); }; |
9957c86d6 KVM: Move all acc... |
832 833 834 |
int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi); int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); |
8ba918d48 KVM: irqchip: Pro... |
835 |
|
aa2fbe6d4 KVM: Let ioapic k... |
836 837 |
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); |
bd2b53b20 KVM: fast-path ms... |
838 |
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
aa2fbe6d4 KVM: Let ioapic k... |
839 |
int irq_source_id, int level, bool line_status); |
b97e6de9c KVM: x86: merge k... |
840 841 842 |
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status); |
c7c9c56ca x86, apicv: add v... |
843 |
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
ba1aefcd6 kvm/eventfd: fact... |
844 |
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
44882eed2 KVM: make irq ack... |
845 |
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc09 KVM: Separate irq... |
846 847 |
void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); |
fa40a8214 KVM: switch irq i... |
848 849 |
void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); |
5550af4df KVM: Fix guest sh... |
850 851 |
int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
62c476c7c KVM: Device Assig... |
852 |
|
9d4cba7f9 KVM: Move gfn_to_... |
853 854 855 856 857 858 859 860 861 |
/* * search_memslots() and __gfn_to_memslot() are here because they are * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. * gfn_to_memslot() itself isn't here as an inline because that would * bloat other code too much. */ static inline struct kvm_memory_slot * search_memslots(struct kvm_memslots *slots, gfn_t gfn) { |
9c1a5d387 kvm: optimize GFN... |
862 |
int start = 0, end = slots->used_slots; |
d4ae84a02 kvm: search_memsl... |
863 |
int slot = atomic_read(&slots->lru_slot); |
9c1a5d387 kvm: optimize GFN... |
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 |
struct kvm_memory_slot *memslots = slots->memslots; if (gfn >= memslots[slot].base_gfn && gfn < memslots[slot].base_gfn + memslots[slot].npages) return &memslots[slot]; while (start < end) { slot = start + (end - start) / 2; if (gfn >= memslots[slot].base_gfn) end = slot; else start = slot + 1; } if (gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; } |
9d4cba7f9 KVM: Move gfn_to_... |
884 885 886 887 888 889 890 891 892 |
return NULL; } static inline struct kvm_memory_slot * __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { return search_memslots(slots, gfn); } |
66a03505a KVM: PPC: book3s:... |
893 894 895 896 897 |
static inline unsigned long __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } |
0ee8dcb87 KVM: cleanup mems... |
898 899 900 901 |
static inline int memslot_id(struct kvm *kvm, gfn_t gfn) { return gfn_to_memslot(kvm, gfn)->id; } |
d19a748b1 KVM: Introduce hv... |
902 903 |
static inline gfn_t hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
887c08ac1 KVM: MMU: introdu... |
904 |
{ |
d19a748b1 KVM: Introduce hv... |
905 906 907 |
gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; return slot->base_gfn + gfn_offset; |
887c08ac1 KVM: MMU: introdu... |
908 |
} |
1755fbcc6 KVM: MMU: Introdu... |
909 910 911 912 |
static inline gpa_t gfn_to_gpa(gfn_t gfn) { return (gpa_t)gfn << PAGE_SHIFT; } |
6aa8b732c [PATCH] kvm: user... |
913 |
|
c30a358d3 KVM: MMU: Add inf... |
914 915 916 917 |
static inline gfn_t gpa_to_gfn(gpa_t gpa) { return (gfn_t)(gpa >> PAGE_SHIFT); } |
ba049e93a kvm: rename pfn_t... |
918 |
static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
62c476c7c KVM: Device Assig... |
919 920 921 |
{ return (hpa_t)pfn << PAGE_SHIFT; } |
5e2f30b75 KVM: nVMX: get ri... |
922 923 924 925 926 |
static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, gpa_t gpa) { return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); } |
dfeec843f KVM: add kvm_is_e... |
927 928 929 930 931 932 |
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) { unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); return kvm_is_error_hva(hva); } |
ba1389b7a KVM: Extend stats... |
933 934 935 936 |
enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, }; |
536a6f88c KVM: Create debug... |
937 938 939 940 |
struct kvm_stat_data { int offset; struct kvm *kvm; }; |
417bc3041 KVM: Portability:... |
941 942 943 |
struct kvm_stats_debugfs_item { const char *name; int offset; |
ba1389b7a KVM: Extend stats... |
944 |
enum kvm_stat_kind kind; |
417bc3041 KVM: Portability:... |
945 946 |
}; extern struct kvm_stats_debugfs_item debugfs_entries[]; |
76f7c8790 KVM: Rename debug... |
947 |
extern struct dentry *kvm_debugfs_dir; |
d4c9ff2d1 KVM: Add kvm trac... |
948 |
|
36c1ed821 KVM: Guard mmu_no... |
949 |
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
8ca40a70a KVM: Take kvm ins... |
950 |
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
e930bffe9 KVM: Synchronize ... |
951 |
{ |
8ca40a70a KVM: Take kvm ins... |
952 |
if (unlikely(kvm->mmu_notifier_count)) |
e930bffe9 KVM: Synchronize ... |
953 954 |
return 1; /* |
a355aa54f KVM: Add barriers... |
955 956 957 958 959 960 961 962 |
* Ensure the read of mmu_notifier_count happens before the read * of mmu_notifier_seq. This interacts with the smp_wmb() in * mmu_notifier_invalidate_range_end to make sure that the caller * either sees the old (non-zero) value of mmu_notifier_count or * the new (incremented) value of mmu_notifier_seq. * PowerPC Book3s HV KVM calls this under a per-page lock * rather than under kvm->mmu_lock, for scalability, so * can't rely on kvm->mmu_lock to keep things ordered. |
e930bffe9 KVM: Synchronize ... |
963 |
*/ |
a355aa54f KVM: Add barriers... |
964 |
smp_rmb(); |
8ca40a70a KVM: Take kvm ins... |
965 |
if (kvm->mmu_notifier_seq != mmu_seq) |
e930bffe9 KVM: Synchronize ... |
966 967 968 969 |
return 1; return 0; } #endif |
a725d56a0 KVM: Introduce CO... |
970 |
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
399ec807d KVM: Userspace co... |
971 |
|
ce44a4d5d KVM: Extend MAX_I... |
972 |
#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
399ec807d KVM: Userspace co... |
973 |
|
5c0aea0e8 KVM: x86: don't h... |
974 |
bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
399ec807d KVM: Userspace co... |
975 976 977 978 |
int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, unsigned flags); |
c63cf538e KVM: pass struct ... |
979 980 |
int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, |
e8cde0939 KVM: Move irq rou... |
981 |
const struct kvm_irq_routing_entry *ue); |
399ec807d KVM: Userspace co... |
982 983 984 985 986 987 988 |
void kvm_free_irq_routing(struct kvm *kvm); #else static inline void kvm_free_irq_routing(struct kvm *kvm) {} #endif |
297e21053 KVM: Give IRQFD i... |
989 |
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
721eecbf4 KVM: irqfd |
990 |
#ifdef CONFIG_HAVE_KVM_EVENTFD |
d34e6b175 KVM: add ioeventf... |
991 |
void kvm_eventfd_init(struct kvm *kvm); |
914daba86 KVM: Distangle ev... |
992 |
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
297e21053 KVM: Give IRQFD i... |
993 |
#ifdef CONFIG_HAVE_KVM_IRQFD |
d4db2935e KVM: Pass kvm_irq... |
994 |
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
721eecbf4 KVM: irqfd |
995 |
void kvm_irqfd_release(struct kvm *kvm); |
9957c86d6 KVM: Move all acc... |
996 |
void kvm_irq_routing_update(struct kvm *); |
914daba86 KVM: Distangle ev... |
997 998 999 1000 1001 1002 1003 1004 |
#else static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) { return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} #endif |
721eecbf4 KVM: irqfd |
1005 1006 |
#else |
d34e6b175 KVM: add ioeventf... |
1007 |
static inline void kvm_eventfd_init(struct kvm *kvm) {} |
bd2b53b20 KVM: fast-path ms... |
1008 |
|
d4db2935e KVM: Pass kvm_irq... |
1009 |
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf4 KVM: irqfd |
1010 1011 1012 1013 1014 |
{ return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} |
bd2b53b20 KVM: fast-path ms... |
1015 |
|
27923eb19 KVM: PPC: Fix com... |
1016 |
#ifdef CONFIG_HAVE_KVM_IRQCHIP |
9957c86d6 KVM: Move all acc... |
1017 |
static inline void kvm_irq_routing_update(struct kvm *kvm) |
bd2b53b20 KVM: fast-path ms... |
1018 |
{ |
bd2b53b20 KVM: fast-path ms... |
1019 |
} |
27923eb19 KVM: PPC: Fix com... |
1020 |
#endif |
bd2b53b20 KVM: fast-path ms... |
1021 |
|
d34e6b175 KVM: add ioeventf... |
1022 1023 1024 1025 |
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { return -ENOSYS; } |
721eecbf4 KVM: irqfd |
1026 1027 |
#endif /* CONFIG_HAVE_KVM_EVENTFD */ |
5f95541a0 kvm: fix warning ... |
1028 |
void kvm_arch_irq_routing_update(struct kvm *kvm); |
a8eeb04a4 KVM: Add mini-API... |
1029 1030 |
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { |
2e4682ba2 KVM: add missing ... |
1031 1032 1033 1034 1035 |
/* * Ensure the rest of the request is published to kvm_check_request's * caller. Paired with the smp_mb__after_atomic in kvm_check_request. */ smp_wmb(); |
930f7fd6d KVM: mark request... |
1036 |
set_bit(req & KVM_REQUEST_MASK, &vcpu->requests); |
a8eeb04a4 KVM: Add mini-API... |
1037 |
} |
2fa6e1e12 KVM: add kvm_requ... |
1038 1039 1040 1041 |
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) { return READ_ONCE(vcpu->requests); } |
72875d8a4 KVM: add kvm_{tes... |
1042 1043 |
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) { |
930f7fd6d KVM: mark request... |
1044 |
return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests); |
72875d8a4 KVM: add kvm_{tes... |
1045 1046 1047 1048 |
} static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) { |
930f7fd6d KVM: mark request... |
1049 |
clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests); |
72875d8a4 KVM: add kvm_{tes... |
1050 |
} |
a8eeb04a4 KVM: Add mini-API... |
1051 1052 |
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { |
72875d8a4 KVM: add kvm_{tes... |
1053 1054 |
if (kvm_test_request(req, vcpu)) { kvm_clear_request(req, vcpu); |
2e4682ba2 KVM: add missing ... |
1055 1056 1057 1058 1059 1060 |
/* * Ensure the rest of the request is visible to kvm_check_request's * caller. Paired with the smp_wmb in kvm_make_request. */ smp_mb__after_atomic(); |
0719837c0 KVM: Reduce atomi... |
1061 1062 1063 1064 |
return true; } else { return false; } |
a8eeb04a4 KVM: Add mini-API... |
1065 |
} |
8b415dcd7 KVM: Move kvm_reb... |
1066 |
extern bool kvm_rebooting; |
ec76d819d KVM: Export kvm m... |
1067 1068 1069 |
extern unsigned int halt_poll_ns; extern unsigned int halt_poll_ns_grow; extern unsigned int halt_poll_ns_shrink; |
852b6d57d kvm: add device c... |
1070 1071 1072 |
struct kvm_device { struct kvm_device_ops *ops; struct kvm *kvm; |
852b6d57d kvm: add device c... |
1073 |
void *private; |
07f0a7bde kvm: destroy emul... |
1074 |
struct list_head vm_node; |
852b6d57d kvm: add device c... |
1075 1076 1077 1078 1079 |
}; /* create, destroy, and name are mandatory */ struct kvm_device_ops { const char *name; |
a28ebea2a KVM: Protect devi... |
1080 1081 1082 1083 1084 1085 |
/* * create is called holding kvm->lock and any operations not suitable * to do while holding the lock should be deferred to init (see * below). */ |
852b6d57d kvm: add device c... |
1086 1087 1088 |
int (*create)(struct kvm_device *dev, u32 type); /* |
023e9fddc KVM: PPC: Move xi... |
1089 1090 1091 1092 1093 1094 |
* init is called after create if create is successful and is called * outside of holding kvm->lock. */ void (*init)(struct kvm_device *dev); /* |
852b6d57d kvm: add device c... |
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 |
* Destroy is responsible for freeing dev. * * Destroy may be called before or after destructors are called * on emulated I/O regions, depending on whether a reference is * held by a vcpu or other kvm component that gets destroyed * after the emulated I/O. */ void (*destroy)(struct kvm_device *dev); int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, unsigned long arg); }; void kvm_device_get(struct kvm_device *dev); void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); |
d60eacb07 KVM: device: add ... |
1114 |
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); |
571ee1b68 kvm: vfio: fix un... |
1115 |
void kvm_unregister_device_ops(u32 type); |
852b6d57d kvm: add device c... |
1116 |
|
5df554ad5 kvm/ppc/mpic: in-... |
1117 |
extern struct kvm_device_ops kvm_mpic_ops; |
ea2f83a7d arm/arm64: KVM: m... |
1118 |
extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
a0675c25d arm/arm64: KVM: a... |
1119 |
extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
5df554ad5 kvm/ppc/mpic: in-... |
1120 |
|
4c088493c KVM: Note down wh... |
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 |
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) { vcpu->spin_loop.in_spin_loop = val; } static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { vcpu->spin_loop.dy_eligible = val; } #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) { } static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } |
4c088493c KVM: Note down wh... |
1141 |
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1a02b2703 KVM: introduce kv... |
1142 1143 |
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
14717e203 kvm: Conditionall... |
1144 |
bool kvm_arch_has_irq_bypass(void); |
1a02b2703 KVM: introduce kv... |
1145 1146 1147 1148 1149 1150 |
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); |
f70c20aaf KVM: Add an arch ... |
1151 1152 |
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); |
1a02b2703 KVM: introduce kv... |
1153 |
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
35181e86d KVM: x86: Add a c... |
1154 |
|
3491caf27 KVM: halt_polling... |
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 |
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS /* If we wakeup during the poll time, was it a sucessful poll? */ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) { return vcpu->valid_wakeup; } #else static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) { return true; } #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ |
bfd99ff5d KVM: Move assigne... |
1168 |
#endif |