Blame view
include/linux/kvm_host.h
34 KB
edf884172 KVM: Move arch de... |
1 2 |
#ifndef __KVM_HOST_H #define __KVM_HOST_H |
6aa8b732c [PATCH] kvm: user... |
3 4 5 6 7 8 9 |
/* * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/types.h> |
e56a7a28e KVM: Use virtual ... |
10 |
#include <linux/hardirq.h> |
6aa8b732c [PATCH] kvm: user... |
11 12 13 |
#include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> |
06ff0d372 KVM: Fix includes |
14 15 |
#include <linux/signal.h> #include <linux/sched.h> |
187f1882b BUG: headers with... |
16 |
#include <linux/bug.h> |
6aa8b732c [PATCH] kvm: user... |
17 |
#include <linux/mm.h> |
b297e672e KVM: Fix include ... |
18 |
#include <linux/mmu_notifier.h> |
15ad71460 KVM: Use the sche... |
19 |
#include <linux/preempt.h> |
0937c48d0 KVM: Add fields f... |
20 |
#include <linux/msi.h> |
d89f5eff7 KVM: Clean up vm ... |
21 |
#include <linux/slab.h> |
bd2b53b20 KVM: fast-path ms... |
22 |
#include <linux/rcupdate.h> |
bd80158af KVM: Clean up and... |
23 |
#include <linux/ratelimit.h> |
83f09228d KVM: inline is_*_... |
24 |
#include <linux/err.h> |
c11f11fcb kvm: Prepare to a... |
25 |
#include <linux/irqflags.h> |
521921bad kvm: Move guest e... |
26 |
#include <linux/context_tracking.h> |
1a02b2703 KVM: introduce kv... |
27 |
#include <linux/irqbypass.h> |
8577370fb KVM: Use simple w... |
28 |
#include <linux/swait.h> |
e8edc6e03 Detach sched.h fr... |
29 |
#include <asm/signal.h> |
6aa8b732c [PATCH] kvm: user... |
30 |
|
6aa8b732c [PATCH] kvm: user... |
31 |
#include <linux/kvm.h> |
102d8325a KVM: add MSR base... |
32 |
#include <linux/kvm_para.h> |
6aa8b732c [PATCH] kvm: user... |
33 |
|
edf884172 KVM: Move arch de... |
34 |
#include <linux/kvm_types.h> |
d77a39d98 KVM: Portability:... |
35 |
|
edf884172 KVM: Move arch de... |
36 |
#include <asm/kvm_host.h> |
d657a98e3 KVM: Portability:... |
37 |
|
0b1b1dfd5 kvm: introduce KV... |
38 39 40 |
#ifndef KVM_MAX_VCPU_ID #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS #endif |
6aa8b732c [PATCH] kvm: user... |
41 |
/* |
67b29204c KVM: hide KVM_MEM... |
42 43 44 45 46 |
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used * in kvm, other bits are visible for userspace which are defined in * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) |
1050dcda3 kvm: add a memslo... |
47 |
#define KVM_MEMSLOT_INCOHERENT (1UL << 17) |
67b29204c KVM: hide KVM_MEM... |
48 |
|
87da7e66a KVM: x86: fix vcp... |
49 50 |
/* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 |
f78146b0f KVM: Fix page-cro... |
51 |
|
f481b069e KVM: implement mu... |
52 53 54 |
#ifndef KVM_ADDRESS_SPACE_NUM #define KVM_ADDRESS_SPACE_NUM 1 #endif |
f78146b0f KVM: Fix page-cro... |
55 |
/* |
9c5b11728 KVM: let the erro... |
56 |
* For the normal pfn, the highest 12 bits should be zero, |
81c52c56e KVM: do not treat... |
57 58 |
* so we can mask bit 62 ~ bit 52 to indicate the error pfn, * mask bit 63 to indicate the noslot pfn. |
9c5b11728 KVM: let the erro... |
59 |
*/ |
81c52c56e KVM: do not treat... |
60 61 62 |
#define KVM_PFN_ERR_MASK (0x7ffULL << 52) #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) #define KVM_PFN_NOSLOT (0x1ULL << 63) |
9c5b11728 KVM: let the erro... |
63 64 65 |
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
81c52c56e KVM: do not treat... |
66 |
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
6c8ee57be KVM: introduce KV... |
67 |
|
81c52c56e KVM: do not treat... |
68 69 70 71 |
/* * error pfns indicate that the gfn is in slot but faild to * translate it to pfn on host. */ |
ba049e93a kvm: rename pfn_t... |
72 |
static inline bool is_error_pfn(kvm_pfn_t pfn) |
83f09228d KVM: inline is_*_... |
73 |
{ |
9c5b11728 KVM: let the erro... |
74 |
return !!(pfn & KVM_PFN_ERR_MASK); |
83f09228d KVM: inline is_*_... |
75 |
} |
81c52c56e KVM: do not treat... |
76 77 78 79 80 |
/* * error_noslot pfns indicate that the gfn can not be * translated to pfn - it is not in slot or failed to * translate it to pfn. */ |
ba049e93a kvm: rename pfn_t... |
81 |
static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
83f09228d KVM: inline is_*_... |
82 |
{ |
81c52c56e KVM: do not treat... |
83 |
return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
83f09228d KVM: inline is_*_... |
84 |
} |
81c52c56e KVM: do not treat... |
85 |
/* noslot pfn indicates that the gfn is not in slot. */ |
ba049e93a kvm: rename pfn_t... |
86 |
static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
83f09228d KVM: inline is_*_... |
87 |
{ |
81c52c56e KVM: do not treat... |
88 |
return pfn == KVM_PFN_NOSLOT; |
83f09228d KVM: inline is_*_... |
89 |
} |
bf640876e KVM: s390: Make K... |
90 91 92 93 94 |
/* * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) * provide own defines and kvm_is_error_hva */ #ifndef KVM_HVA_ERR_BAD |
7068d0971 KVM: introduce KV... |
95 96 |
#define KVM_HVA_ERR_BAD (PAGE_OFFSET) #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) |
ca3a490c7 KVM: introduce KV... |
97 98 99 |
static inline bool kvm_is_error_hva(unsigned long addr) { |
7068d0971 KVM: introduce KV... |
100 |
return addr >= PAGE_OFFSET; |
ca3a490c7 KVM: introduce KV... |
101 |
} |
bf640876e KVM: s390: Make K... |
102 |
#endif |
6cede2e67 KVM: introduce KV... |
103 |
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
9c5b11728 KVM: let the erro... |
104 |
static inline bool is_error_page(struct page *page) |
6cede2e67 KVM: introduce KV... |
105 106 107 |
{ return IS_ERR(page); } |
f78146b0f KVM: Fix page-cro... |
108 |
/* |
2860c4b16 KVM: move archite... |
109 110 |
* Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. |
d9e368d61 KVM: Flush remote... |
111 |
*/ |
3176bc3e5 KVM: Rename KVM_T... |
112 |
#define KVM_REQ_TLB_FLUSH 0 |
6662ba347 KVM: renumber vcp... |
113 114 115 |
#define KVM_REQ_MMU_RELOAD 1 #define KVM_REQ_PENDING_TIMER 2 #define KVM_REQ_UNHALT 3 |
0cd310437 KVM: document whi... |
116 |
|
7a84428af KVM: Add resampli... |
117 118 |
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
5550af4df KVM: Fix guest sh... |
119 |
|
c16f862d0 KVM: Use kmem cac... |
120 |
extern struct kmem_cache *kvm_vcpu_cache; |
6aa8b732c [PATCH] kvm: user... |
121 |
|
2f303b74a KVM: Convert kvm_... |
122 |
extern spinlock_t kvm_lock; |
fc1b74925 KVM: Move vm_list... |
123 |
extern struct list_head vm_list; |
743eeb0b0 KVM: Intelligent ... |
124 125 126 127 128 |
struct kvm_io_range { gpa_t addr; int len; struct kvm_io_device *dev; }; |
786a9f888 KVM: set upper bo... |
129 |
#define NR_IOBUS_DEVS 1000 |
a13007160 KVM: resize kvm_i... |
130 |
|
2eeb2e94e KVM: Adds support... |
131 |
struct kvm_io_bus { |
6ea34c9b7 kvm: exclude ioev... |
132 133 |
int dev_count; int ioeventfd_count; |
a13007160 KVM: resize kvm_i... |
134 |
struct kvm_io_range range[]; |
2eeb2e94e KVM: Adds support... |
135 |
}; |
e93f8a0f8 KVM: convert io_b... |
136 137 138 |
enum kvm_bus { KVM_MMIO_BUS, KVM_PIO_BUS, |
060f0ce6f KVM: Introduce KV... |
139 |
KVM_VIRTIO_CCW_NOTIFY_BUS, |
68c3b4d16 KVM: VMX: speed u... |
140 |
KVM_FAST_MMIO_BUS, |
e93f8a0f8 KVM: convert io_b... |
141 142 |
KVM_NR_BUSES }; |
e32edf4fd KVM: Redesign kvm... |
143 |
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
e93f8a0f8 KVM: convert io_b... |
144 |
int len, const void *val); |
e32edf4fd KVM: Redesign kvm... |
145 146 147 148 |
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val, long cookie); int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); |
743eeb0b0 KVM: Intelligent ... |
149 150 |
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); |
e93f8a0f8 KVM: convert io_b... |
151 152 |
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev); |
8a39d0067 KVM: kvm_io_bus: ... |
153 154 |
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); |
2eeb2e94e KVM: Adds support... |
155 |
|
af585b921 KVM: Halt vcpu if... |
156 157 158 159 160 161 162 163 164 165 |
#ifdef CONFIG_KVM_ASYNC_PF struct kvm_async_pf { struct work_struct work; struct list_head link; struct list_head queue; struct kvm_vcpu *vcpu; struct mm_struct *mm; gva_t gva; unsigned long addr; struct kvm_arch_async_pf arch; |
f2e106692 KVM: Drop FOLL_GE... |
166 |
bool wakeup_all; |
af585b921 KVM: Halt vcpu if... |
167 168 169 170 |
}; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
e0ead41a6 KVM: async_pf: Pr... |
171 |
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, |
af585b921 KVM: Halt vcpu if... |
172 |
struct kvm_arch_async_pf *arch); |
344d9588a KVM: Add PV MSR t... |
173 |
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
af585b921 KVM: Halt vcpu if... |
174 |
#endif |
6b7e2d099 KVM: Add "exiting... |
175 176 177 |
enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, |
c142786c6 KVM: MMU: Don't u... |
178 179 |
EXITING_GUEST_MODE, READING_SHADOW_PAGE_TABLES, |
6b7e2d099 KVM: Add "exiting... |
180 |
}; |
f78146b0f KVM: Fix page-cro... |
181 182 183 184 185 186 187 188 189 |
/* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. */ struct kvm_mmio_fragment { gpa_t gpa; void *data; unsigned len; }; |
d17fbbf73 KVM: Portability:... |
190 191 |
struct kvm_vcpu { struct kvm *kvm; |
31bb117eb KVM: Use CONFIG_P... |
192 |
#ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf73 KVM: Portability:... |
193 |
struct preempt_notifier preempt_notifier; |
31bb117eb KVM: Use CONFIG_P... |
194 |
#endif |
6b7e2d099 KVM: Add "exiting... |
195 |
int cpu; |
d17fbbf73 KVM: Portability:... |
196 |
int vcpu_id; |
6b7e2d099 KVM: Add "exiting... |
197 198 |
int srcu_idx; int mode; |
d17fbbf73 KVM: Portability:... |
199 |
unsigned long requests; |
d0bfb940e KVM: New guest de... |
200 |
unsigned long guest_debug; |
6b7e2d099 KVM: Add "exiting... |
201 |
|
bf9f6ac8d KVM: Update Poste... |
202 203 |
int pre_pcpu; struct list_head blocked_vcpu_list; |
6b7e2d099 KVM: Add "exiting... |
204 205 |
struct mutex mutex; struct kvm_run *run; |
f656ce018 KVM: switch vcpu ... |
206 |
|
d17fbbf73 KVM: Portability:... |
207 |
int fpu_active; |
2acf923e3 KVM: VMX: Enable ... |
208 |
int guest_fpu_loaded, guest_xcr0_loaded; |
8577370fb KVM: Use simple w... |
209 |
struct swait_queue_head wq; |
34bb10b79 KVM: keep track o... |
210 |
struct pid *pid; |
d17fbbf73 KVM: Portability:... |
211 212 213 |
int sigset_active; sigset_t sigset; struct kvm_vcpu_stat stat; |
19020f8ab KVM: make halt_po... |
214 |
unsigned int halt_poll_ns; |
3491caf27 KVM: halt_polling... |
215 |
bool valid_wakeup; |
d17fbbf73 KVM: Portability:... |
216 |
|
34c16eecf KVM: Portability:... |
217 |
#ifdef CONFIG_HAS_IOMEM |
d17fbbf73 KVM: Portability:... |
218 219 220 |
int mmio_needed; int mmio_read_completed; int mmio_is_write; |
f78146b0f KVM: Fix page-cro... |
221 222 223 |
int mmio_cur_fragment; int mmio_nr_fragments; struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; |
34c16eecf KVM: Portability:... |
224 |
#endif |
1165f5fec KVM: Per-vcpu sta... |
225 |
|
af585b921 KVM: Halt vcpu if... |
226 227 228 229 230 231 232 233 |
#ifdef CONFIG_KVM_ASYNC_PF struct { u32 queued; struct list_head queue; struct list_head done; spinlock_t lock; } async_pf; #endif |
4c088493c KVM: Note down wh... |
234 235 236 237 238 239 240 241 242 243 244 245 |
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT /* * Cpu relax intercept or pause loop exit optimization * in_spin_loop: set when a vcpu does a pause loop exit * or cpu relax intercepted. * dy_eligible: indicates whether vcpu is eligible for directed yield. */ struct { bool in_spin_loop; bool dy_eligible; } spin_loop; #endif |
3a08a8f9f kvm: Record the p... |
246 |
bool preempted; |
d657a98e3 KVM: Portability:... |
247 |
struct kvm_vcpu_arch arch; |
45b5939e5 kvm: create per-v... |
248 |
struct dentry *debugfs_dentry; |
d657a98e3 KVM: Portability:... |
249 |
}; |
6b7e2d099 KVM: Add "exiting... |
250 251 252 253 |
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); } |
660c22c42 KVM: limit the nu... |
254 255 256 257 258 |
/* * Some of the bitops functions do not support too long bitmaps. * This number must be determined not to exceed such limits. */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
6aa8b732c [PATCH] kvm: user... |
259 260 261 |
struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; |
6aa8b732c [PATCH] kvm: user... |
262 |
unsigned long *dirty_bitmap; |
db3fe4eb4 KVM: Introduce kv... |
263 |
struct kvm_arch_memory_slot arch; |
8a7ae055f KVM: MMU: Partial... |
264 |
unsigned long userspace_addr; |
6104f472a KVM: struct kvm_m... |
265 |
u32 flags; |
1e702d9af KVM: struct kvm_m... |
266 |
short id; |
6aa8b732c [PATCH] kvm: user... |
267 |
}; |
87bf6e7de KVM: fix the hand... |
268 269 270 271 |
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) { return ALIGN(memslot->npages, BITS_PER_LONG) / 8; } |
842235987 KVM: s390: irq ro... |
272 273 274 275 276 277 278 |
struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; u64 ind_offset; u32 summary_offset; u32 adapter_id; }; |
5c919412f kvm/x86: Hyper-V ... |
279 280 281 282 |
struct kvm_hv_sint { u32 vcpu; u32 sint; }; |
399ec807d KVM: Userspace co... |
283 284 |
struct kvm_kernel_irq_routing_entry { u32 gsi; |
5116d8f6b KVM: fix ack not ... |
285 |
u32 type; |
4925663a0 KVM: Report IRQ i... |
286 |
int (*set)(struct kvm_kernel_irq_routing_entry *e, |
aa2fbe6d4 KVM: Let ioapic k... |
287 288 |
struct kvm *kvm, int irq_source_id, int level, bool line_status); |
399ec807d KVM: Userspace co... |
289 290 291 292 293 |
union { struct { unsigned irqchip; unsigned pin; } irqchip; |
0455e72c9 KVM: Add devid in... |
294 295 296 297 298 299 300 |
struct { u32 address_lo; u32 address_hi; u32 data; u32 flags; u32 devid; } msi; |
842235987 KVM: s390: irq ro... |
301 |
struct kvm_s390_adapter_int adapter; |
5c919412f kvm/x86: Hyper-V ... |
302 |
struct kvm_hv_sint hv_sint; |
399ec807d KVM: Userspace co... |
303 |
}; |
46e624b95 KVM: Change irq r... |
304 305 |
struct hlist_node link; }; |
b053b2aef KVM: x86: Add EOI... |
306 307 308 309 310 311 312 313 314 315 316 |
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING struct kvm_irq_routing_table { int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; u32 nr_rt_entries; /* * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ struct hlist_head map[0]; }; #endif |
0743247fb KVM: Make KVM_PRI... |
317 318 319 |
#ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif |
93a5cef07 KVM: introduce KV... |
320 |
#ifndef KVM_MEM_SLOTS_NUM |
bbacc0c11 KVM: Rename KVM_M... |
321 |
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef07 KVM: introduce KV... |
322 |
#endif |
f481b069e KVM: implement mu... |
323 324 325 326 327 328 |
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) { return 0; } #endif |
bf3e05bc1 KVM: sort memslot... |
329 330 331 332 333 |
/* * Note: * memslots are not sorted by id anymore, please use id_to_memslot() * to get the memslot by its id. */ |
46a26bf55 KVM: modify memsl... |
334 |
struct kvm_memslots { |
49c7754ce KVM: Add memory s... |
335 |
u64 generation; |
93a5cef07 KVM: introduce KV... |
336 |
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
f85e2cb5d KVM: introduce a ... |
337 |
/* The mapping table from slot id to the index in memslots[]. */ |
1e702d9af KVM: struct kvm_m... |
338 |
short id_to_index[KVM_MEM_SLOTS_NUM]; |
d4ae84a02 kvm: search_memsl... |
339 |
atomic_t lru_slot; |
9c1a5d387 kvm: optimize GFN... |
340 |
int used_slots; |
46a26bf55 KVM: modify memsl... |
341 |
}; |
6aa8b732c [PATCH] kvm: user... |
342 |
struct kvm { |
aaee2c94f KVM: MMU: Switch ... |
343 |
spinlock_t mmu_lock; |
79fac95ec KVM: convert slot... |
344 |
struct mutex slots_lock; |
6d4e4c4fc KVM: Disallow for... |
345 |
struct mm_struct *mm; /* userspace tied to this vm */ |
f481b069e KVM: implement mu... |
346 |
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; |
bc6678a33 KVM: introduce kv... |
347 |
struct srcu_struct srcu; |
719d93cd5 kvm/irqchip: Spee... |
348 |
struct srcu_struct irq_srcu; |
fb3f0f51d KVM: Dynamically ... |
349 |
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
6c7caebc2 KVM: introduce kv... |
350 351 352 353 354 355 356 |
/* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only * incremented after storing the kvm_vcpu pointer in vcpus, * and is accessed atomically. */ |
73880c80a KVM: Break depend... |
357 |
atomic_t online_vcpus; |
6c7caebc2 KVM: introduce kv... |
358 |
int created_vcpus; |
217ece612 KVM: use yield_to... |
359 |
int last_boosted_vcpu; |
133de9021 [PATCH] KVM: Add ... |
360 |
struct list_head vm_list; |
60eead79a KVM: introduce ir... |
361 |
struct mutex lock; |
e93f8a0f8 KVM: convert io_b... |
362 |
struct kvm_io_bus *buses[KVM_NR_BUSES]; |
721eecbf4 KVM: irqfd |
363 364 365 366 |
#ifdef CONFIG_HAVE_KVM_EVENTFD struct { spinlock_t lock; struct list_head items; |
7a84428af KVM: Add resampli... |
367 368 |
struct list_head resampler_list; struct mutex resampler_lock; |
721eecbf4 KVM: irqfd |
369 |
} irqfds; |
d34e6b175 KVM: add ioeventf... |
370 |
struct list_head ioeventfds; |
721eecbf4 KVM: irqfd |
371 |
#endif |
ba1389b7a KVM: Extend stats... |
372 |
struct kvm_vm_stat stat; |
d69fb81f0 KVM: Portability:... |
373 |
struct kvm_arch arch; |
d39f13b0d KVM: add vm refco... |
374 |
atomic_t users_count; |
5f94c1741 KVM: Add coalesce... |
375 |
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
5f94c1741 KVM: Add coalesce... |
376 |
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
2b3c246a6 KVM: Make coalesc... |
377 378 |
spinlock_t ring_lock; struct list_head coalesced_zones; |
5f94c1741 KVM: Add coalesce... |
379 |
#endif |
e930bffe9 KVM: Synchronize ... |
380 |
|
60eead79a KVM: introduce ir... |
381 |
struct mutex irq_lock; |
75858a84a KVM: Interrupt ma... |
382 |
#ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b20 KVM: fast-path ms... |
383 |
/* |
9957c86d6 KVM: Move all acc... |
384 |
* Update side is protected by irq_lock. |
bd2b53b20 KVM: fast-path ms... |
385 |
*/ |
4b6a2872a kvm: add __rcu an... |
386 |
struct kvm_irq_routing_table __rcu *irq_routing; |
c77dcacb3 KVM: Move more co... |
387 388 |
#endif #ifdef CONFIG_HAVE_KVM_IRQFD |
136bdfeee KVM: Move irq ack... |
389 |
struct hlist_head irq_ack_notifier_list; |
75858a84a KVM: Interrupt ma... |
390 |
#endif |
36c1ed821 KVM: Guard mmu_no... |
391 |
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
e930bffe9 KVM: Synchronize ... |
392 393 394 395 |
struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif |
a086f6a1e Revert "KVM: Simp... |
396 |
long tlbs_dirty; |
07f0a7bde kvm: destroy emul... |
397 |
struct list_head devices; |
536a6f88c KVM: Create debug... |
398 399 |
struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; |
6aa8b732c [PATCH] kvm: user... |
400 |
}; |
a737f256b KVM: Cleanup the ... |
401 402 403 404 405 406 |
#define kvm_err(fmt, ...) \ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_info(fmt, ...) \ pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug(fmt, ...) \ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
ae0f54995 kvm: x86: don't p... |
407 408 409 |
#define kvm_debug_ratelimited(fmt, ...) \ pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ ## __VA_ARGS__) |
a737f256b KVM: Cleanup the ... |
410 411 412 |
#define kvm_pr_unimpl(fmt, ...) \ pr_err_ratelimited("kvm [%i]: " fmt, \ task_tgid_nr(current), ## __VA_ARGS__) |
f02424785 KVM: Add and use ... |
413 |
|
a737f256b KVM: Cleanup the ... |
414 415 |
/* The guest did something we don't support. */ #define vcpu_unimpl(vcpu, fmt, ...) \ |
671d9ab38 kvm: Dump guest r... |
416 417 |
kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) |
6aa8b732c [PATCH] kvm: user... |
418 |
|
ee86dbc6e kvm: introduce vc... |
419 420 |
#define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
ae0f54995 kvm: x86: don't p... |
421 422 423 |
#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ ## __VA_ARGS__) |
765eaa0f7 kvm/x86: Hyper-V ... |
424 425 |
#define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
ee86dbc6e kvm: introduce vc... |
426 |
|
988a2cae6 KVM: Use macro to... |
427 428 |
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { |
dd489240a KVM: document mem... |
429 430 431 432 |
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case * the caller has read kvm->online_vcpus before (as is the case * for kvm_for_each_vcpu, for example). */ |
988a2cae6 KVM: Use macro to... |
433 434 435 436 437 |
smp_rmb(); return kvm->vcpus[i]; } #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
b42fc3cbc KVM: Fix off by o... |
438 439 440 441 |
for (idx = 0; \ idx < atomic_read(&kvm->online_vcpus) && \ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) |
988a2cae6 KVM: Use macro to... |
442 |
|
db27a7a37 KVM: Provide func... |
443 444 |
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { |
9b9e3fc4d KVM: remove NULL ... |
445 |
struct kvm_vcpu *vcpu = NULL; |
db27a7a37 KVM: Provide func... |
446 |
int i; |
9b9e3fc4d KVM: remove NULL ... |
447 |
if (id < 0) |
c896939f7 KVM: use heuristi... |
448 |
return NULL; |
9b9e3fc4d KVM: remove NULL ... |
449 450 |
if (id < KVM_MAX_VCPUS) vcpu = kvm_get_vcpu(kvm, id); |
c896939f7 KVM: use heuristi... |
451 452 |
if (vcpu && vcpu->vcpu_id == id) return vcpu; |
db27a7a37 KVM: Provide func... |
453 454 455 456 457 |
kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->vcpu_id == id) return vcpu; return NULL; } |
be6ba0f09 KVM: introduce kv... |
458 459 |
#define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ |
bf3e05bc1 KVM: sort memslot... |
460 461 |
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ memslot++) |
be6ba0f09 KVM: introduce kv... |
462 |
|
fb3f0f51d KVM: Dynamically ... |
463 464 |
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
9fc77441e KVM: make process... |
465 |
int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
313a3dc75 KVM: Portability:... |
466 |
void vcpu_put(struct kvm_vcpu *vcpu); |
6ef768fac kvm: x86: move io... |
467 468 |
#ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); |
abdb080f7 kvm/irqchip: kvm_... |
469 |
void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
6ef768fac kvm: x86: move io... |
470 471 472 473 |
#else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } |
abdb080f7 kvm/irqchip: kvm_... |
474 |
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
b053b2aef KVM: x86: Add EOI... |
475 476 |
{ } |
6ef768fac kvm: x86: move io... |
477 |
#endif |
297e21053 KVM: Give IRQFD i... |
478 |
#ifdef CONFIG_HAVE_KVM_IRQFD |
a0f155e96 KVM: Initialize i... |
479 480 481 482 483 484 485 486 487 488 489 490 |
int kvm_irqfd_init(void); void kvm_irqfd_exit(void); #else static inline int kvm_irqfd_init(void) { return 0; } static inline void kvm_irqfd_exit(void) { } #endif |
0ee75bead KVM: Let vcpu str... |
491 |
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
c16f862d0 KVM: Use kmem cac... |
492 |
struct module *module); |
cb498ea2c KVM: Portability:... |
493 |
void kvm_exit(void); |
6aa8b732c [PATCH] kvm: user... |
494 |
|
d39f13b0d KVM: add vm refco... |
495 496 |
void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); |
f481b069e KVM: implement mu... |
497 |
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
90d83dc3d KVM: use the corr... |
498 |
{ |
f481b069e KVM: implement mu... |
499 |
return rcu_dereference_check(kvm->memslots[as_id], |
90d83dc3d KVM: use the corr... |
500 501 502 |
srcu_read_lock_held(&kvm->srcu) || lockdep_is_held(&kvm->slots_lock)); } |
f481b069e KVM: implement mu... |
503 504 505 506 |
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) { return __kvm_memslots(kvm, 0); } |
8e73485c7 KVM: add vcpu-spe... |
507 508 |
static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) { |
f481b069e KVM: implement mu... |
509 510 511 |
int as_id = kvm_arch_vcpu_memslots_id(vcpu); return __kvm_memslots(vcpu->kvm, as_id); |
8e73485c7 KVM: add vcpu-spe... |
512 |
} |
28a37544f KVM: introduce id... |
513 514 515 |
static inline struct kvm_memory_slot * id_to_memslot(struct kvm_memslots *slots, int id) { |
f85e2cb5d KVM: introduce a ... |
516 517 |
int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; |
bf3e05bc1 KVM: sort memslot... |
518 |
|
f85e2cb5d KVM: introduce a ... |
519 |
slot = &slots->memslots[index]; |
bf3e05bc1 KVM: sort memslot... |
520 |
|
f85e2cb5d KVM: introduce a ... |
521 522 |
WARN_ON(slot->id != id); return slot; |
28a37544f KVM: introduce id... |
523 |
} |
74d0727cb KVM: set_memory_r... |
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 |
/* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot * - delete an existing memory slot * - modify an existing memory slot * -- move it in the guest physical memory space * -- just change its flags * * Since flags can be changed by some of these operations, the following * differentiation is the best we can do for __kvm_set_memory_region(): */ enum kvm_mr_change { KVM_MR_CREATE, KVM_MR_DELETE, KVM_MR_MOVE, KVM_MR_FLAGS_ONLY, }; |
210c7c4d7 KVM: Export memor... |
541 |
int kvm_set_memory_region(struct kvm *kvm, |
09170a494 KVM: const-ify us... |
542 |
const struct kvm_userspace_memory_region *mem); |
f78e0e2ee KVM: VMX: Enable ... |
543 |
int __kvm_set_memory_region(struct kvm *kvm, |
09170a494 KVM: const-ify us... |
544 |
const struct kvm_userspace_memory_region *mem); |
5587027ce kvm: Add struct k... |
545 |
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
db3fe4eb4 KVM: Introduce kv... |
546 |
struct kvm_memory_slot *dont); |
5587027ce kvm: Add struct k... |
547 548 |
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages); |
15f46015e KVM: add memslots... |
549 |
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); |
f7784b8ec KVM: split kvm_ar... |
550 551 |
int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, |
09170a494 KVM: const-ify us... |
552 |
const struct kvm_userspace_memory_region *mem, |
7b6195a91 KVM: set_memory_r... |
553 |
enum kvm_mr_change change); |
f7784b8ec KVM: split kvm_ar... |
554 |
void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a494 KVM: const-ify us... |
555 |
const struct kvm_userspace_memory_region *mem, |
8482644ae KVM: set_memory_r... |
556 |
const struct kvm_memory_slot *old, |
f36f3f284 KVM: add "new" ar... |
557 |
const struct kvm_memory_slot *new, |
8482644ae KVM: set_memory_r... |
558 |
enum kvm_mr_change change); |
db3fe4eb4 KVM: Introduce kv... |
559 |
bool kvm_largepages_enabled(void); |
54dee9933 KVM: VMX: conditi... |
560 |
void kvm_disable_largepages(void); |
2df72e9bc KVM: split kvm_ar... |
561 562 563 564 565 |
/* flush all memory translations */ void kvm_arch_flush_shadow_all(struct kvm *kvm); /* flush memory translations pointing to 'slot' */ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
a983fb238 KVM: x86: switch ... |
566 |
|
d9ef13c2b KVM: pass kvm_mem... |
567 568 |
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages); |
48987781e KVM: MMU: introdu... |
569 |
|
954bbbc23 KVM: Simply gfn_t... |
570 |
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da45583 KVM: MMU: large p... |
571 |
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
ba6a35415 KVM: mmu: allow p... |
572 |
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
4d8b81abc KVM: introduce re... |
573 |
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
64d831269 KVM: Introduce gf... |
574 575 |
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable); |
b4231d618 KVM: MMU: Selecti... |
576 577 |
void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); |
35149e212 KVM: MMU: Don't a... |
578 |
void kvm_set_page_accessed(struct page *page); |
ba049e93a kvm: rename pfn_t... |
579 580 581 |
kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
612819c3c KVM: propagate fa... |
582 |
bool *writable); |
ba049e93a kvm: rename pfn_t... |
583 584 585 586 587 |
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable); |
037d92dc5 KVM: introduce gf... |
588 |
|
ba049e93a kvm: rename pfn_t... |
589 590 591 592 |
void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_get_pfn(kvm_pfn_t pfn); |
35149e212 KVM: MMU: Don't a... |
593 |
|
195aefde9 KVM: Add general ... |
594 595 |
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); |
7ec545882 KVM: Add kvm_read... |
596 597 |
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
195aefde9 KVM: Add general ... |
598 |
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
e03b644fe KVM: introduce kv... |
599 600 |
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); |
195aefde9 KVM: Add general ... |
601 602 603 604 |
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); |
49c7754ce KVM: Add memory s... |
605 606 |
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); |
4ec6e8636 kvm: Introduce kv... |
607 608 |
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, int offset, unsigned long len); |
49c7754ce KVM: Add memory s... |
609 |
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
8f964525a KVM: Allow cross ... |
610 |
gpa_t gpa, unsigned long len); |
195aefde9 KVM: Add general ... |
611 612 |
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
6aa8b732c [PATCH] kvm: user... |
613 |
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
33e941547 KVM: kvm_is_visib... |
614 |
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
8f0b1ab6f KVM: Introduce kv... |
615 |
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
6aa8b732c [PATCH] kvm: user... |
616 |
void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
8e73485c7 KVM: add vcpu-spe... |
617 618 |
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); |
ba049e93a kvm: rename pfn_t... |
619 620 |
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
8e73485c7 KVM: add vcpu-spe... |
621 622 623 624 625 626 627 628 629 630 631 632 633 634 |
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len); int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len); int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len); int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len); int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
8776e5194 KVM: Portability:... |
635 |
void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
3217f7c25 KVM: Add kvm_arch... |
636 637 |
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
dd1a4cc1f KVM: split kvm_vc... |
638 |
void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
b6d33834b KVM: Factor out k... |
639 |
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
fa93384f4 sched: Fix signed... |
640 |
int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
d255f4f2b KVM: introduce kv... |
641 |
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
7702fd1f6 KVM: Prevent gues... |
642 643 |
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
a4ee1ca4a KVM: MMU: delay f... |
644 |
|
d9e368d61 KVM: Flush remote... |
645 |
void kvm_flush_remote_tlbs(struct kvm *kvm); |
2e53d63ac KVM: MMU: ignore ... |
646 |
void kvm_reload_remote_mmus(struct kvm *kvm); |
445b82369 kvm: Rename make_... |
647 |
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
6aa8b732c [PATCH] kvm: user... |
648 |
|
043405e10 KVM: Move x86 msr... |
649 650 |
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
313a3dc75 KVM: Portability:... |
651 652 |
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
5b1c1493a KVM: s390: ucontr... |
653 |
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
018d00d2f KVM: Portability:... |
654 |
|
784aa3d7f KVM: Rename and a... |
655 |
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
018d00d2f KVM: Portability:... |
656 |
|
5bb064dcd KVM: Portability:... |
657 658 |
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty); |
ba0513b5b KVM: Add generic ... |
659 660 661 |
int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log, bool *is_dirty); |
3b0f1d01e KVM: Rename kvm_a... |
662 |
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
ba0513b5b KVM: Add generic ... |
663 664 665 |
struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); |
5bb064dcd KVM: Portability:... |
666 667 |
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); |
aa2fbe6d4 KVM: Let ioapic k... |
668 669 |
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); |
1fe779f8e KVM: Portability:... |
670 671 |
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
313a3dc75 KVM: Portability:... |
672 |
|
d07520607 KVM: Portability:... |
673 674 |
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
8b0067913 KVM: Portability:... |
675 676 |
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr); |
b6c7a5dcc KVM: Portability:... |
677 678 679 680 681 682 |
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
62d9f0dbc KVM: add ioctls t... |
683 684 685 686 |
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); |
d0bfb940e KVM: New guest de... |
687 688 |
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); |
b6c7a5dcc KVM: Portability:... |
689 |
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
f8c16bbaa KVM: Portability:... |
690 691 |
int kvm_arch_init(void *opaque); void kvm_arch_exit(void); |
043405e10 KVM: Move x86 msr... |
692 |
|
e9b11c175 KVM: Portability:... |
693 694 |
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
e790d9ef6 KVM: add kvm_arch... |
695 |
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
e9b11c175 KVM: Portability:... |
696 697 698 699 |
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
26e5215fd KVM: Split vcpu c... |
700 |
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
31928aa58 KVM: remove unnee... |
701 |
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
d40ccc624 KVM: Correct cons... |
702 |
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c175 KVM: Portability:... |
703 |
|
235539b48 kvm: add stubs fo... |
704 705 |
bool kvm_arch_has_vcpu_debugfs(void); int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); |
13a34e067 KVM: remove garba... |
706 707 |
int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); |
e9b11c175 KVM: Portability:... |
708 709 710 |
int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); void kvm_arch_check_processor_compat(void *rtn); |
1d737c8a6 KVM: Portability:... |
711 |
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
b6d33834b KVM: Factor out k... |
712 |
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
e9b11c175 KVM: Portability:... |
713 |
|
c1a7b32a1 KVM: Avoid wastin... |
714 |
void *kvm_kvzalloc(unsigned long size); |
c1a7b32a1 KVM: Avoid wastin... |
715 |
|
d89f5eff7 KVM: Clean up vm ... |
716 717 718 719 720 721 722 723 724 725 726 |
#ifndef __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); } static inline void kvm_arch_free_vm(struct kvm *kvm) { kfree(kvm); } #endif |
e0f0bbc52 kvm: Create non-c... |
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 |
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); #else static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { } static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { } static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return false; } #endif |
5544eb9b8 KVM: count number... |
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 |
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE void kvm_arch_start_assignment(struct kvm *kvm); void kvm_arch_end_assignment(struct kvm *kvm); bool kvm_arch_has_assigned_device(struct kvm *kvm); #else static inline void kvm_arch_start_assignment(struct kvm *kvm) { } static inline void kvm_arch_end_assignment(struct kvm *kvm) { } static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) { return false; } #endif |
e0f0bbc52 kvm: Create non-c... |
763 |
|
8577370fb KVM: Use simple w... |
764 |
static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
b6d33834b KVM: Factor out k... |
765 |
{ |
2246f8b56 KVM: PPC: Rework ... |
766 767 768 |
#ifdef __KVM_HAVE_ARCH_WQP return vcpu->arch.wqp; #else |
b6d33834b KVM: Factor out k... |
769 |
return &vcpu->wq; |
b6d33834b KVM: Factor out k... |
770 |
#endif |
2246f8b56 KVM: PPC: Rework ... |
771 |
} |
b6d33834b KVM: Factor out k... |
772 |
|
01c94e64f KVM: introduce kv... |
773 774 775 776 777 778 779 780 781 782 783 784 785 |
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED /* * returns true if the virtual interrupt controller is initialized and * ready to accept virtual IRQ. On some architectures the virtual interrupt * controller is dynamically instantiated and this is not always true. */ bool kvm_arch_intc_initialized(struct kvm *kvm); #else static inline bool kvm_arch_intc_initialized(struct kvm *kvm) { return true; } #endif |
e08b96371 KVM: s390: add pa... |
786 |
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
d19a9cd27 KVM: Portability:... |
787 |
void kvm_arch_destroy_vm(struct kvm *kvm); |
ad8ba2cd4 KVM: Add kvm_arch... |
788 |
void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c175 KVM: Portability:... |
789 |
|
3d80840d9 KVM: hlt emulatio... |
790 |
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
5736199af KVM: Move kvm_vcp... |
791 |
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
682c59a3f KVM: Portability:... |
792 |
|
ba049e93a kvm: rename pfn_t... |
793 |
bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
c77fb9dc7 KVM: Change is_mm... |
794 |
|
62c476c7c KVM: Device Assig... |
795 796 797 798 799 |
struct kvm_irq_ack_notifier { struct hlist_node link; unsigned gsi; void (*irq_acked)(struct kvm_irq_ack_notifier *kian); }; |
9957c86d6 KVM: Move all acc... |
800 801 802 |
int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi); int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); |
8ba918d48 KVM: irqchip: Pro... |
803 |
|
aa2fbe6d4 KVM: Let ioapic k... |
804 805 |
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); |
bd2b53b20 KVM: fast-path ms... |
806 |
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
aa2fbe6d4 KVM: Let ioapic k... |
807 |
int irq_source_id, int level, bool line_status); |
b97e6de9c KVM: x86: merge k... |
808 809 810 |
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status); |
c7c9c56ca x86, apicv: add v... |
811 |
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
ba1aefcd6 kvm/eventfd: fact... |
812 |
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
44882eed2 KVM: make irq ack... |
813 |
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc09 KVM: Separate irq... |
814 815 |
void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); |
fa40a8214 KVM: switch irq i... |
816 817 |
void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); |
5550af4df KVM: Fix guest sh... |
818 819 |
int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
62c476c7c KVM: Device Assig... |
820 |
|
2a5bab100 kvm: Allow build-... |
821 |
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
3ad26d813 KVM: use gfn_to_p... |
822 |
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
32f6daad4 KVM: unmap pages ... |
823 |
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
2a5bab100 kvm: Allow build-... |
824 |
#else |
62c476c7c KVM: Device Assig... |
825 |
static inline int kvm_iommu_map_pages(struct kvm *kvm, |
d7a79b6c8 KVM: Fix signatur... |
826 |
struct kvm_memory_slot *slot) |
62c476c7c KVM: Device Assig... |
827 828 829 |
{ return 0; } |
32f6daad4 KVM: unmap pages ... |
830 831 832 833 |
static inline void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { } |
2a5bab100 kvm: Allow build-... |
834 |
#endif |
62c476c7c KVM: Device Assig... |
835 |
|
9d4cba7f9 KVM: Move gfn_to_... |
836 837 838 839 840 841 842 843 844 |
/* * search_memslots() and __gfn_to_memslot() are here because they are * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. * gfn_to_memslot() itself isn't here as an inline because that would * bloat other code too much. */ static inline struct kvm_memory_slot * search_memslots(struct kvm_memslots *slots, gfn_t gfn) { |
9c1a5d387 kvm: optimize GFN... |
845 |
int start = 0, end = slots->used_slots; |
d4ae84a02 kvm: search_memsl... |
846 |
int slot = atomic_read(&slots->lru_slot); |
9c1a5d387 kvm: optimize GFN... |
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 |
struct kvm_memory_slot *memslots = slots->memslots; if (gfn >= memslots[slot].base_gfn && gfn < memslots[slot].base_gfn + memslots[slot].npages) return &memslots[slot]; while (start < end) { slot = start + (end - start) / 2; if (gfn >= memslots[slot].base_gfn) end = slot; else start = slot + 1; } if (gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; } |
9d4cba7f9 KVM: Move gfn_to_... |
867 868 869 870 871 872 873 874 875 |
return NULL; } static inline struct kvm_memory_slot * __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { return search_memslots(slots, gfn); } |
66a03505a KVM: PPC: book3s:... |
876 877 878 879 880 |
static inline unsigned long __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } |
0ee8dcb87 KVM: cleanup mems... |
881 882 883 884 |
static inline int memslot_id(struct kvm *kvm, gfn_t gfn) { return gfn_to_memslot(kvm, gfn)->id; } |
d19a748b1 KVM: Introduce hv... |
885 886 |
static inline gfn_t hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
887c08ac1 KVM: MMU: introdu... |
887 |
{ |
d19a748b1 KVM: Introduce hv... |
888 889 890 |
gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; return slot->base_gfn + gfn_offset; |
887c08ac1 KVM: MMU: introdu... |
891 |
} |
1755fbcc6 KVM: MMU: Introdu... |
892 893 894 895 |
static inline gpa_t gfn_to_gpa(gfn_t gfn) { return (gpa_t)gfn << PAGE_SHIFT; } |
6aa8b732c [PATCH] kvm: user... |
896 |
|
c30a358d3 KVM: MMU: Add inf... |
897 898 899 900 |
static inline gfn_t gpa_to_gfn(gpa_t gpa) { return (gfn_t)(gpa >> PAGE_SHIFT); } |
ba049e93a kvm: rename pfn_t... |
901 |
static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
62c476c7c KVM: Device Assig... |
902 903 904 |
{ return (hpa_t)pfn << PAGE_SHIFT; } |
dfeec843f KVM: add kvm_is_e... |
905 906 907 908 909 910 |
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) { unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); return kvm_is_error_hva(hva); } |
ba1389b7a KVM: Extend stats... |
911 912 913 914 |
enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, }; |
536a6f88c KVM: Create debug... |
915 916 917 918 |
struct kvm_stat_data { int offset; struct kvm *kvm; }; |
417bc3041 KVM: Portability:... |
919 920 921 |
struct kvm_stats_debugfs_item { const char *name; int offset; |
ba1389b7a KVM: Extend stats... |
922 |
enum kvm_stat_kind kind; |
417bc3041 KVM: Portability:... |
923 924 |
}; extern struct kvm_stats_debugfs_item debugfs_entries[]; |
76f7c8790 KVM: Rename debug... |
925 |
extern struct dentry *kvm_debugfs_dir; |
d4c9ff2d1 KVM: Add kvm trac... |
926 |
|
36c1ed821 KVM: Guard mmu_no... |
927 |
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
8ca40a70a KVM: Take kvm ins... |
928 |
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
e930bffe9 KVM: Synchronize ... |
929 |
{ |
8ca40a70a KVM: Take kvm ins... |
930 |
if (unlikely(kvm->mmu_notifier_count)) |
e930bffe9 KVM: Synchronize ... |
931 932 |
return 1; /* |
a355aa54f KVM: Add barriers... |
933 934 935 936 937 938 939 940 |
* Ensure the read of mmu_notifier_count happens before the read * of mmu_notifier_seq. This interacts with the smp_wmb() in * mmu_notifier_invalidate_range_end to make sure that the caller * either sees the old (non-zero) value of mmu_notifier_count or * the new (incremented) value of mmu_notifier_seq. * PowerPC Book3s HV KVM calls this under a per-page lock * rather than under kvm->mmu_lock, for scalability, so * can't rely on kvm->mmu_lock to keep things ordered. |
e930bffe9 KVM: Synchronize ... |
941 |
*/ |
a355aa54f KVM: Add barriers... |
942 |
smp_rmb(); |
8ca40a70a KVM: Take kvm ins... |
943 |
if (kvm->mmu_notifier_seq != mmu_seq) |
e930bffe9 KVM: Synchronize ... |
944 945 946 947 |
return 1; return 0; } #endif |
a725d56a0 KVM: Introduce CO... |
948 |
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
399ec807d KVM: Userspace co... |
949 |
|
f3f710bc6 KVM: Bump KVM_MAX... |
950 951 |
#ifdef CONFIG_S390 #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... |
995a0ee98 KVM: arm/arm64: E... |
952 953 |
#elif defined(CONFIG_ARM64) #define KVM_MAX_IRQ_ROUTES 4096 |
f3f710bc6 KVM: Bump KVM_MAX... |
954 |
#else |
399ec807d KVM: Userspace co... |
955 |
#define KVM_MAX_IRQ_ROUTES 1024 |
f3f710bc6 KVM: Bump KVM_MAX... |
956 |
#endif |
399ec807d KVM: Userspace co... |
957 |
|
399ec807d KVM: Userspace co... |
958 959 960 961 |
int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, unsigned flags); |
c63cf538e KVM: pass struct ... |
962 963 |
int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, |
e8cde0939 KVM: Move irq rou... |
964 |
const struct kvm_irq_routing_entry *ue); |
399ec807d KVM: Userspace co... |
965 966 967 968 969 970 971 |
void kvm_free_irq_routing(struct kvm *kvm); #else static inline void kvm_free_irq_routing(struct kvm *kvm) {} #endif |
297e21053 KVM: Give IRQFD i... |
972 |
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
721eecbf4 KVM: irqfd |
973 |
#ifdef CONFIG_HAVE_KVM_EVENTFD |
d34e6b175 KVM: add ioeventf... |
974 |
void kvm_eventfd_init(struct kvm *kvm); |
914daba86 KVM: Distangle ev... |
975 |
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
297e21053 KVM: Give IRQFD i... |
976 |
#ifdef CONFIG_HAVE_KVM_IRQFD |
d4db2935e KVM: Pass kvm_irq... |
977 |
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
721eecbf4 KVM: irqfd |
978 |
void kvm_irqfd_release(struct kvm *kvm); |
9957c86d6 KVM: Move all acc... |
979 |
void kvm_irq_routing_update(struct kvm *); |
914daba86 KVM: Distangle ev... |
980 981 982 983 984 985 986 987 |
#else static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) { return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} #endif |
721eecbf4 KVM: irqfd |
988 989 |
#else |
d34e6b175 KVM: add ioeventf... |
990 |
static inline void kvm_eventfd_init(struct kvm *kvm) {} |
bd2b53b20 KVM: fast-path ms... |
991 |
|
d4db2935e KVM: Pass kvm_irq... |
992 |
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf4 KVM: irqfd |
993 994 995 996 997 |
{ return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} |
bd2b53b20 KVM: fast-path ms... |
998 |
|
27923eb19 KVM: PPC: Fix com... |
999 |
#ifdef CONFIG_HAVE_KVM_IRQCHIP |
9957c86d6 KVM: Move all acc... |
1000 |
static inline void kvm_irq_routing_update(struct kvm *kvm) |
bd2b53b20 KVM: fast-path ms... |
1001 |
{ |
bd2b53b20 KVM: fast-path ms... |
1002 |
} |
27923eb19 KVM: PPC: Fix com... |
1003 |
#endif |
abdb080f7 kvm/irqchip: kvm_... |
1004 |
void kvm_arch_irq_routing_update(struct kvm *kvm); |
bd2b53b20 KVM: fast-path ms... |
1005 |
|
d34e6b175 KVM: add ioeventf... |
1006 1007 1008 1009 |
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { return -ENOSYS; } |
721eecbf4 KVM: irqfd |
1010 1011 |
#endif /* CONFIG_HAVE_KVM_EVENTFD */ |
a8eeb04a4 KVM: Add mini-API... |
1012 1013 |
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { |
2e4682ba2 KVM: add missing ... |
1014 1015 1016 1017 1018 |
/* * Ensure the rest of the request is published to kvm_check_request's * caller. Paired with the smp_mb__after_atomic in kvm_check_request. */ smp_wmb(); |
a8eeb04a4 KVM: Add mini-API... |
1019 1020 |
set_bit(req, &vcpu->requests); } |
a8eeb04a4 KVM: Add mini-API... |
1021 1022 |
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { |
0719837c0 KVM: Reduce atomi... |
1023 1024 |
if (test_bit(req, &vcpu->requests)) { clear_bit(req, &vcpu->requests); |
2e4682ba2 KVM: add missing ... |
1025 1026 1027 1028 1029 1030 |
/* * Ensure the rest of the request is visible to kvm_check_request's * caller. Paired with the smp_wmb in kvm_make_request. */ smp_mb__after_atomic(); |
0719837c0 KVM: Reduce atomi... |
1031 1032 1033 1034 |
return true; } else { return false; } |
a8eeb04a4 KVM: Add mini-API... |
1035 |
} |
8b415dcd7 KVM: Move kvm_reb... |
1036 |
extern bool kvm_rebooting; |
ec76d819d KVM: Export kvm m... |
1037 1038 1039 |
extern unsigned int halt_poll_ns; extern unsigned int halt_poll_ns_grow; extern unsigned int halt_poll_ns_shrink; |
852b6d57d kvm: add device c... |
1040 1041 1042 |
struct kvm_device { struct kvm_device_ops *ops; struct kvm *kvm; |
852b6d57d kvm: add device c... |
1043 |
void *private; |
07f0a7bde kvm: destroy emul... |
1044 |
struct list_head vm_node; |
852b6d57d kvm: add device c... |
1045 1046 1047 1048 1049 |
}; /* create, destroy, and name are mandatory */ struct kvm_device_ops { const char *name; |
a28ebea2a KVM: Protect devi... |
1050 1051 1052 1053 1054 1055 |
/* * create is called holding kvm->lock and any operations not suitable * to do while holding the lock should be deferred to init (see * below). */ |
852b6d57d kvm: add device c... |
1056 1057 1058 |
int (*create)(struct kvm_device *dev, u32 type); /* |
023e9fddc KVM: PPC: Move xi... |
1059 1060 1061 1062 1063 1064 |
* init is called after create if create is successful and is called * outside of holding kvm->lock. */ void (*init)(struct kvm_device *dev); /* |
852b6d57d kvm: add device c... |
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 |
* Destroy is responsible for freeing dev. * * Destroy may be called before or after destructors are called * on emulated I/O regions, depending on whether a reference is * held by a vcpu or other kvm component that gets destroyed * after the emulated I/O. */ void (*destroy)(struct kvm_device *dev); int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, unsigned long arg); }; void kvm_device_get(struct kvm_device *dev); void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); |
d60eacb07 KVM: device: add ... |
1084 |
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); |
571ee1b68 kvm: vfio: fix un... |
1085 |
void kvm_unregister_device_ops(u32 type); |
852b6d57d kvm: add device c... |
1086 |
|
5df554ad5 kvm/ppc/mpic: in-... |
1087 |
extern struct kvm_device_ops kvm_mpic_ops; |
5975a2e09 KVM: PPC: Book3S:... |
1088 |
extern struct kvm_device_ops kvm_xics_ops; |
ea2f83a7d arm/arm64: KVM: m... |
1089 |
extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
a0675c25d arm/arm64: KVM: a... |
1090 |
extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
5df554ad5 kvm/ppc/mpic: in-... |
1091 |
|
4c088493c KVM: Note down wh... |
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 |
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) { vcpu->spin_loop.in_spin_loop = val; } static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { vcpu->spin_loop.dy_eligible = val; } #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) { } static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } |
4c088493c KVM: Note down wh... |
1112 |
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1a02b2703 KVM: introduce kv... |
1113 1114 |
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
14717e203 kvm: Conditionall... |
1115 |
bool kvm_arch_has_irq_bypass(void); |
1a02b2703 KVM: introduce kv... |
1116 1117 1118 1119 1120 1121 |
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); |
f70c20aaf KVM: Add an arch ... |
1122 1123 |
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); |
1a02b2703 KVM: introduce kv... |
1124 |
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
35181e86d KVM: x86: Add a c... |
1125 |
|
3491caf27 KVM: halt_polling... |
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 |
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS /* If we wakeup during the poll time, was it a sucessful poll? */ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) { return vcpu->valid_wakeup; } #else static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) { return true; } #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ |
bfd99ff5d KVM: Move assigne... |
1139 |
#endif |