Commit 0998baa8639e298a9272264260faadebe8fb8968
1 parent
b8c5b64243
Exists in
smarc_8mq-imx_v2020.04_5.4.24_2.1.0
and in
1 other branch
MLK-20373-1 Intrdouce xen header files
Introduce xen header files from Linux Kernel commit e2b623fbe6a3("Merge tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux") Signed-off-by: Peng Fan <peng.fan@nxp.com> Reviewed-by: Peng Fan <peng.fan@nxp.com> Reviewed-by: Flynn xu <flynn.xu@nxp.com> (cherry picked from commit ddb393c45158f6114bd53c83dcd8397a6c2acbdc) (cherry picked from commit 8591f37af6345695a98216c136e9209a880901f1) (cherry picked from commit b93308506902dd30b84640f59fa75ada21d8775a)
Showing 14 changed files with 3604 additions and 0 deletions Inline Diff
- arch/arm/include/asm/xen/hypercall.h
- arch/arm/include/asm/xen/interface.h
- include/xen/event_channel.h
- include/xen/events.h
- include/xen/hvm.h
- include/xen/interface/event_channel.h
- include/xen/interface/grant_table.h
- include/xen/interface/hvm/hvm_op.h
- include/xen/interface/hvm/params.h
- include/xen/interface/io/console.h
- include/xen/interface/io/ring.h
- include/xen/interface/platform.h
- include/xen/interface/sched.h
- include/xen/interface/xen.h
arch/arm/include/asm/xen/hypercall.h
File was created | 1 | /****************************************************************************** | |
2 | * hypercall.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _ASM_ARM_XEN_HYPERCALL_H | ||
34 | #define _ASM_ARM_XEN_HYPERCALL_H | ||
35 | |||
36 | #include <linux/bug.h> | ||
37 | |||
38 | #include <xen/interface/xen.h> | ||
39 | //#include <xen/interface/sched.h> | ||
40 | #include <xen/interface/platform.h> | ||
41 | |||
42 | struct xen_dm_op_buf; | ||
43 | |||
44 | long privcmd_call(unsigned call, unsigned long a1, | ||
45 | unsigned long a2, unsigned long a3, | ||
46 | unsigned long a4, unsigned long a5); | ||
47 | int HYPERVISOR_xen_version(int cmd, void *arg); | ||
48 | int HYPERVISOR_console_io(int cmd, int count, char *str); | ||
49 | int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); | ||
50 | int HYPERVISOR_sched_op(int cmd, void *arg); | ||
51 | int HYPERVISOR_event_channel_op(int cmd, void *arg); | ||
52 | unsigned long HYPERVISOR_hvm_op(int op, void *arg); | ||
53 | int HYPERVISOR_memory_op(unsigned int cmd, void *arg); | ||
54 | int HYPERVISOR_physdev_op(int cmd, void *arg); | ||
55 | int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); | ||
56 | int HYPERVISOR_tmem_op(void *arg); | ||
57 | int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); | ||
58 | int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs, | ||
59 | struct xen_dm_op_buf *bufs); | ||
60 | int HYPERVISOR_platform_op_raw(void *arg); | ||
61 | static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) | ||
62 | { | ||
63 | op->interface_version = XENPF_INTERFACE_VERSION; | ||
64 | return HYPERVISOR_platform_op_raw(op); | ||
65 | } | ||
66 | #endif /* _ASM_ARM_XEN_HYPERCALL_H */ | ||
67 |
arch/arm/include/asm/xen/interface.h
File was created | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /****************************************************************************** | ||
3 | * Guest OS interface to ARM Xen. | ||
4 | * | ||
5 | * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_ARM_XEN_INTERFACE_H | ||
9 | #define _ASM_ARM_XEN_INTERFACE_H | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | |||
13 | #define uint64_aligned_t uint64_t __attribute__((aligned(8))) | ||
14 | |||
15 | #define __DEFINE_GUEST_HANDLE(name, type) \ | ||
16 | typedef struct { union { type *p; uint64_aligned_t q; }; } \ | ||
17 | __guest_handle_ ## name | ||
18 | |||
19 | #define DEFINE_GUEST_HANDLE_STRUCT(name) \ | ||
20 | __DEFINE_GUEST_HANDLE(name, struct name) | ||
21 | #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) | ||
22 | #define GUEST_HANDLE(name) __guest_handle_ ## name | ||
23 | |||
24 | #define set_xen_guest_handle(hnd, val) \ | ||
25 | do { \ | ||
26 | if (sizeof(hnd) == 8) \ | ||
27 | *(uint64_t *)&(hnd) = 0; \ | ||
28 | (hnd).p = val; \ | ||
29 | } while (0) | ||
30 | |||
31 | #define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op | ||
32 | |||
33 | #ifndef __ASSEMBLY__ | ||
34 | /* Explicitly size integers that represent pfns in the interface with | ||
35 | * Xen so that we can have one ABI that works for 32 and 64 bit guests. | ||
36 | * Note that this means that the xen_pfn_t type may be capable of | ||
37 | * representing pfn's which the guest cannot represent in its own pfn | ||
38 | * type. However since pfn space is controlled by the guest this is | ||
39 | * fine since it simply wouldn't be able to create any sure pfns in | ||
40 | * the first place. | ||
41 | */ | ||
42 | typedef uint64_t xen_pfn_t; | ||
43 | #define PRI_xen_pfn "llx" | ||
44 | typedef uint64_t xen_ulong_t; | ||
45 | #define PRI_xen_ulong "llx" | ||
46 | typedef int64_t xen_long_t; | ||
47 | #define PRI_xen_long "llx" | ||
48 | /* Guest handles for primitive C types. */ | ||
49 | __DEFINE_GUEST_HANDLE(uchar, unsigned char); | ||
50 | __DEFINE_GUEST_HANDLE(uint, unsigned int); | ||
51 | DEFINE_GUEST_HANDLE(char); | ||
52 | DEFINE_GUEST_HANDLE(int); | ||
53 | DEFINE_GUEST_HANDLE(void); | ||
54 | DEFINE_GUEST_HANDLE(uint64_t); | ||
55 | DEFINE_GUEST_HANDLE(uint32_t); | ||
56 | DEFINE_GUEST_HANDLE(xen_pfn_t); | ||
57 | DEFINE_GUEST_HANDLE(xen_ulong_t); | ||
58 | |||
59 | /* Maximum number of virtual CPUs in multi-processor guests. */ | ||
60 | #define MAX_VIRT_CPUS 1 | ||
61 | |||
62 | struct arch_vcpu_info { }; | ||
63 | struct arch_shared_info { }; | ||
64 | |||
65 | /* TODO: Move pvclock definitions some place arch independent */ | ||
66 | struct pvclock_vcpu_time_info { | ||
67 | u32 version; | ||
68 | u32 pad0; | ||
69 | u64 tsc_timestamp; | ||
70 | u64 system_time; | ||
71 | u32 tsc_to_system_mul; | ||
72 | s8 tsc_shift; | ||
73 | u8 flags; | ||
74 | u8 pad[2]; | ||
75 | } __attribute__((__packed__)); /* 32 bytes */ | ||
76 | |||
77 | /* It is OK to have a 12 bytes struct with no padding because it is packed */ | ||
78 | struct pvclock_wall_clock { | ||
79 | u32 version; | ||
80 | u32 sec; | ||
81 | u32 nsec; | ||
82 | u32 sec_hi; | ||
83 | } __attribute__((__packed__)); | ||
84 | #endif | ||
85 | |||
86 | #endif /* _ASM_ARM_XEN_INTERFACE_H */ | ||
87 |
include/xen/event_channel.h
File was created | 1 | /****************************************************************************** | |
2 | * event_channel.h | ||
3 | * | ||
4 | * Event channels between domains. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright (c) 2003-2004, K A Fraser. | ||
25 | */ | ||
26 | |||
27 | #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ | ||
28 | #define __XEN_PUBLIC_EVENT_CHANNEL_H__ | ||
29 | |||
30 | #include "xen.h" | ||
31 | |||
32 | /* | ||
33 | * `incontents 150 evtchn Event Channels | ||
34 | * | ||
35 | * Event channels are the basic primitive provided by Xen for event | ||
36 | * notifications. An event is the Xen equivalent of a hardware | ||
37 | * interrupt. They essentially store one bit of information, the event | ||
38 | * of interest is signalled by transitioning this bit from 0 to 1. | ||
39 | * | ||
40 | * Notifications are received by a guest via an upcall from Xen, | ||
41 | * indicating when an event arrives (setting the bit). Further | ||
42 | * notifications are masked until the bit is cleared again (therefore, | ||
43 | * guests must check the value of the bit after re-enabling event | ||
44 | * delivery to ensure no missed notifications). | ||
45 | * | ||
46 | * Event notifications can be masked by setting a flag; this is | ||
47 | * equivalent to disabling interrupts and can be used to ensure | ||
48 | * atomicity of certain operations in the guest kernel. | ||
49 | * | ||
50 | * Event channels are represented by the evtchn_* fields in | ||
51 | * struct shared_info and struct vcpu_info. | ||
52 | */ | ||
53 | |||
54 | /* | ||
55 | * ` enum neg_errnoval | ||
56 | * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args) | ||
57 | * ` | ||
58 | * @cmd == EVTCHNOP_* (event-channel operation). | ||
59 | * @args == struct evtchn_* Operation-specific extra arguments (NULL if none). | ||
60 | */ | ||
61 | |||
62 | /* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */ | ||
63 | #define EVTCHNOP_bind_interdomain 0 | ||
64 | #define EVTCHNOP_bind_virq 1 | ||
65 | #define EVTCHNOP_bind_pirq 2 | ||
66 | #define EVTCHNOP_close 3 | ||
67 | #define EVTCHNOP_send 4 | ||
68 | #define EVTCHNOP_status 5 | ||
69 | #define EVTCHNOP_alloc_unbound 6 | ||
70 | #define EVTCHNOP_bind_ipi 7 | ||
71 | #define EVTCHNOP_bind_vcpu 8 | ||
72 | #define EVTCHNOP_unmask 9 | ||
73 | #define EVTCHNOP_reset 10 | ||
74 | #define EVTCHNOP_init_control 11 | ||
75 | #define EVTCHNOP_expand_array 12 | ||
76 | #define EVTCHNOP_set_priority 13 | ||
77 | /* ` } */ | ||
78 | |||
79 | typedef uint32_t evtchn_port_t; | ||
80 | DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); | ||
81 | |||
82 | /* | ||
83 | * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as | ||
84 | * accepting interdomain bindings from domain <remote_dom>. A fresh port | ||
85 | * is allocated in <dom> and returned as <port>. | ||
86 | * NOTES: | ||
87 | * 1. If the caller is unprivileged then <dom> must be DOMID_SELF. | ||
88 | * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections. | ||
89 | */ | ||
90 | struct evtchn_alloc_unbound { | ||
91 | /* IN parameters */ | ||
92 | domid_t dom, remote_dom; | ||
93 | /* OUT parameters */ | ||
94 | evtchn_port_t port; | ||
95 | }; | ||
96 | typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; | ||
97 | |||
98 | /* | ||
99 | * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between | ||
100 | * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify | ||
101 | * a port that is unbound and marked as accepting bindings from the calling | ||
102 | * domain. A fresh port is allocated in the calling domain and returned as | ||
103 | * <local_port>. | ||
104 | * | ||
105 | * In case the peer domain has already tried to set our event channel | ||
106 | * pending, before it was bound, EVTCHNOP_bind_interdomain always sets | ||
107 | * the local event channel pending. | ||
108 | * | ||
109 | * The usual pattern of use, in the guest's upcall (or subsequent | ||
110 | * handler) is as follows: (Re-enable the event channel for subsequent | ||
111 | * signalling and then) check for the existence of whatever condition | ||
112 | * is being waited for by other means, and take whatever action is | ||
113 | * needed (if any). | ||
114 | * | ||
115 | * NOTES: | ||
116 | * 1. <remote_dom> may be DOMID_SELF, allowing loopback connections. | ||
117 | */ | ||
118 | struct evtchn_bind_interdomain { | ||
119 | /* IN parameters. */ | ||
120 | domid_t remote_dom; | ||
121 | evtchn_port_t remote_port; | ||
122 | /* OUT parameters. */ | ||
123 | evtchn_port_t local_port; | ||
124 | }; | ||
125 | typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; | ||
126 | |||
127 | /* | ||
128 | * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified | ||
129 | * vcpu. | ||
130 | * NOTES: | ||
131 | * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list | ||
132 | * in xen.h for the classification of each VIRQ. | ||
133 | * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be | ||
134 | * re-bound via EVTCHNOP_bind_vcpu. | ||
135 | * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. | ||
136 | * The allocated event channel is bound to the specified vcpu and the | ||
137 | * binding cannot be changed. | ||
138 | */ | ||
139 | struct evtchn_bind_virq { | ||
140 | /* IN parameters. */ | ||
141 | uint32_t virq; /* enum virq */ | ||
142 | uint32_t vcpu; | ||
143 | /* OUT parameters. */ | ||
144 | evtchn_port_t port; | ||
145 | }; | ||
146 | typedef struct evtchn_bind_virq evtchn_bind_virq_t; | ||
147 | |||
148 | /* | ||
149 | * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ <irq>). | ||
150 | * NOTES: | ||
151 | * 1. A physical IRQ may be bound to at most one event channel per domain. | ||
152 | * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. | ||
153 | */ | ||
154 | struct evtchn_bind_pirq { | ||
155 | /* IN parameters. */ | ||
156 | uint32_t pirq; | ||
157 | #define BIND_PIRQ__WILL_SHARE 1 | ||
158 | uint32_t flags; /* BIND_PIRQ__* */ | ||
159 | /* OUT parameters. */ | ||
160 | evtchn_port_t port; | ||
161 | }; | ||
162 | typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; | ||
163 | |||
164 | /* | ||
165 | * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. | ||
166 | * NOTES: | ||
167 | * 1. The allocated event channel is bound to the specified vcpu. The binding | ||
168 | * may not be changed. | ||
169 | */ | ||
170 | struct evtchn_bind_ipi { | ||
171 | uint32_t vcpu; | ||
172 | /* OUT parameters. */ | ||
173 | evtchn_port_t port; | ||
174 | }; | ||
175 | typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; | ||
176 | |||
177 | /* | ||
178 | * EVTCHNOP_close: Close a local event channel <port>. If the channel is | ||
179 | * interdomain then the remote end is placed in the unbound state | ||
180 | * (EVTCHNSTAT_unbound), awaiting a new connection. | ||
181 | */ | ||
182 | struct evtchn_close { | ||
183 | /* IN parameters. */ | ||
184 | evtchn_port_t port; | ||
185 | }; | ||
186 | typedef struct evtchn_close evtchn_close_t; | ||
187 | |||
188 | /* | ||
189 | * EVTCHNOP_send: Send an event to the remote end of the channel whose local | ||
190 | * endpoint is <port>. | ||
191 | */ | ||
192 | struct evtchn_send { | ||
193 | /* IN parameters. */ | ||
194 | evtchn_port_t port; | ||
195 | }; | ||
196 | typedef struct evtchn_send evtchn_send_t; | ||
197 | |||
198 | /* | ||
199 | * EVTCHNOP_status: Get the current status of the communication channel which | ||
200 | * has an endpoint at <dom, port>. | ||
201 | * NOTES: | ||
202 | * 1. <dom> may be specified as DOMID_SELF. | ||
203 | * 2. Only a sufficiently-privileged domain may obtain the status of an event | ||
204 | * channel for which <dom> is not DOMID_SELF. | ||
205 | */ | ||
206 | struct evtchn_status { | ||
207 | /* IN parameters */ | ||
208 | domid_t dom; | ||
209 | evtchn_port_t port; | ||
210 | /* OUT parameters */ | ||
211 | #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ | ||
212 | #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ | ||
213 | #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ | ||
214 | #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ | ||
215 | #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ | ||
216 | #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ | ||
217 | uint32_t status; | ||
218 | uint32_t vcpu; /* VCPU to which this channel is bound. */ | ||
219 | union { | ||
220 | struct { | ||
221 | domid_t dom; | ||
222 | } unbound; /* EVTCHNSTAT_unbound */ | ||
223 | struct { | ||
224 | domid_t dom; | ||
225 | evtchn_port_t port; | ||
226 | } interdomain; /* EVTCHNSTAT_interdomain */ | ||
227 | uint32_t pirq; /* EVTCHNSTAT_pirq */ | ||
228 | uint32_t virq; /* EVTCHNSTAT_virq */ | ||
229 | } u; | ||
230 | }; | ||
231 | typedef struct evtchn_status evtchn_status_t; | ||
232 | |||
233 | /* | ||
234 | * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an | ||
235 | * event is pending. | ||
236 | * NOTES: | ||
237 | * 1. IPI-bound channels always notify the vcpu specified at bind time. | ||
238 | * This binding cannot be changed. | ||
239 | * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. | ||
240 | * This binding cannot be changed. | ||
241 | * 3. All other channels notify vcpu0 by default. This default is set when | ||
242 | * the channel is allocated (a port that is freed and subsequently reused | ||
243 | * has its binding reset to vcpu0). | ||
244 | */ | ||
245 | struct evtchn_bind_vcpu { | ||
246 | /* IN parameters. */ | ||
247 | evtchn_port_t port; | ||
248 | uint32_t vcpu; | ||
249 | }; | ||
250 | typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; | ||
251 | |||
252 | /* | ||
253 | * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver | ||
254 | * a notification to the appropriate VCPU if an event is pending. | ||
255 | */ | ||
256 | struct evtchn_unmask { | ||
257 | /* IN parameters. */ | ||
258 | evtchn_port_t port; | ||
259 | }; | ||
260 | typedef struct evtchn_unmask evtchn_unmask_t; | ||
261 | |||
262 | /* | ||
263 | * EVTCHNOP_reset: Close all event channels associated with specified domain. | ||
264 | * NOTES: | ||
265 | * 1. <dom> may be specified as DOMID_SELF. | ||
266 | * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. | ||
267 | * 3. Destroys all control blocks and event array, resets event channel | ||
268 | * operations to 2-level ABI if called with <dom> == DOMID_SELF and FIFO | ||
269 | * ABI was used. Guests should not bind events during EVTCHNOP_reset call | ||
270 | * as these events are likely to be lost. | ||
271 | */ | ||
272 | struct evtchn_reset { | ||
273 | /* IN parameters. */ | ||
274 | domid_t dom; | ||
275 | }; | ||
276 | typedef struct evtchn_reset evtchn_reset_t; | ||
277 | |||
278 | /* | ||
279 | * EVTCHNOP_init_control: initialize the control block for the FIFO ABI. | ||
280 | * | ||
281 | * Note: any events that are currently pending will not be resent and | ||
282 | * will be lost. Guests should call this before binding any event to | ||
283 | * avoid losing any events. | ||
284 | */ | ||
285 | struct evtchn_init_control { | ||
286 | /* IN parameters. */ | ||
287 | uint64_t control_gfn; | ||
288 | uint32_t offset; | ||
289 | uint32_t vcpu; | ||
290 | /* OUT parameters. */ | ||
291 | uint8_t link_bits; | ||
292 | uint8_t _pad[7]; | ||
293 | }; | ||
294 | typedef struct evtchn_init_control evtchn_init_control_t; | ||
295 | |||
296 | /* | ||
297 | * EVTCHNOP_expand_array: add an additional page to the event array. | ||
298 | */ | ||
299 | struct evtchn_expand_array { | ||
300 | /* IN parameters. */ | ||
301 | uint64_t array_gfn; | ||
302 | }; | ||
303 | typedef struct evtchn_expand_array evtchn_expand_array_t; | ||
304 | |||
305 | /* | ||
306 | * EVTCHNOP_set_priority: set the priority for an event channel. | ||
307 | */ | ||
308 | struct evtchn_set_priority { | ||
309 | /* IN parameters. */ | ||
310 | uint32_t port; | ||
311 | uint32_t priority; | ||
312 | }; | ||
313 | typedef struct evtchn_set_priority evtchn_set_priority_t; | ||
314 | |||
315 | /* | ||
316 | * ` enum neg_errnoval | ||
317 | * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op) | ||
318 | * ` | ||
319 | * Superceded by new event_channel_op() hypercall since 0x00030202. | ||
320 | */ | ||
321 | struct evtchn_op { | ||
322 | uint32_t cmd; /* enum event_channel_op */ | ||
323 | union { | ||
324 | struct evtchn_alloc_unbound alloc_unbound; | ||
325 | struct evtchn_bind_interdomain bind_interdomain; | ||
326 | struct evtchn_bind_virq bind_virq; | ||
327 | struct evtchn_bind_pirq bind_pirq; | ||
328 | struct evtchn_bind_ipi bind_ipi; | ||
329 | struct evtchn_close close; | ||
330 | struct evtchn_send send; | ||
331 | struct evtchn_status status; | ||
332 | struct evtchn_bind_vcpu bind_vcpu; | ||
333 | struct evtchn_unmask unmask; | ||
334 | } u; | ||
335 | }; | ||
336 | typedef struct evtchn_op evtchn_op_t; | ||
337 | DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); | ||
338 | |||
339 | /* | ||
340 | * 2-level ABI | ||
341 | */ | ||
342 | |||
343 | #define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) | ||
344 | |||
345 | /* | ||
346 | * FIFO ABI | ||
347 | */ | ||
348 | |||
349 | /* Events may have priorities from 0 (highest) to 15 (lowest). */ | ||
350 | #define EVTCHN_FIFO_PRIORITY_MAX 0 | ||
351 | #define EVTCHN_FIFO_PRIORITY_DEFAULT 7 | ||
352 | #define EVTCHN_FIFO_PRIORITY_MIN 15 | ||
353 | |||
354 | #define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1) | ||
355 | |||
356 | typedef uint32_t event_word_t; | ||
357 | |||
358 | #define EVTCHN_FIFO_PENDING 31 | ||
359 | #define EVTCHN_FIFO_MASKED 30 | ||
360 | #define EVTCHN_FIFO_LINKED 29 | ||
361 | #define EVTCHN_FIFO_BUSY 28 | ||
362 | |||
363 | #define EVTCHN_FIFO_LINK_BITS 17 | ||
364 | #define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1) | ||
365 | |||
366 | #define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS) | ||
367 | |||
368 | struct evtchn_fifo_control_block { | ||
369 | uint32_t ready; | ||
370 | uint32_t _rsvd; | ||
371 | uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; | ||
372 | }; | ||
373 | typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t; | ||
374 | |||
375 | #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ | ||
376 | |||
377 | /* | ||
378 | * Local variables: | ||
379 | * mode: C | ||
380 | * c-file-style: "BSD" | ||
381 | * c-basic-offset: 4 | ||
382 | * tab-width: 4 | ||
383 | * indent-tabs-mode: nil | ||
384 | * End: | ||
385 | */ | ||
386 |
include/xen/events.h
File was created | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _XEN_EVENTS_H | ||
3 | #define _XEN_EVENTS_H | ||
4 | |||
5 | #include <xen/interface/event_channel.h> | ||
6 | #include <asm/xen/hypercall.h> | ||
7 | #include <asm/xen/events.h> | ||
8 | |||
9 | static inline void notify_remote_via_evtchn(int port) | ||
10 | { | ||
11 | struct evtchn_send send = { .port = port }; | ||
12 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); | ||
13 | } | ||
14 | |||
15 | #endif /* _XEN_EVENTS_H */ | ||
16 |
include/xen/hvm.h
File was created | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /* Simple wrappers around HVM functions */ | ||
3 | #ifndef XEN_HVM_H__ | ||
4 | #define XEN_HVM_H__ | ||
5 | |||
6 | #include <xen/interface/hvm/params.h> | ||
7 | #include <asm/xen/hypercall.h> | ||
8 | |||
9 | static const char *param_name(int op) | ||
10 | { | ||
11 | #define PARAM(x) [HVM_PARAM_##x] = #x | ||
12 | static const char *const names[] = { | ||
13 | PARAM(CALLBACK_IRQ), | ||
14 | PARAM(STORE_PFN), | ||
15 | PARAM(STORE_EVTCHN), | ||
16 | PARAM(PAE_ENABLED), | ||
17 | PARAM(IOREQ_PFN), | ||
18 | PARAM(BUFIOREQ_PFN), | ||
19 | PARAM(TIMER_MODE), | ||
20 | PARAM(HPET_ENABLED), | ||
21 | PARAM(IDENT_PT), | ||
22 | PARAM(DM_DOMAIN), | ||
23 | PARAM(ACPI_S_STATE), | ||
24 | PARAM(VM86_TSS), | ||
25 | PARAM(VPT_ALIGN), | ||
26 | PARAM(CONSOLE_PFN), | ||
27 | PARAM(CONSOLE_EVTCHN), | ||
28 | }; | ||
29 | #undef PARAM | ||
30 | |||
31 | if (op >= ARRAY_SIZE(names)) | ||
32 | return "unknown"; | ||
33 | |||
34 | if (!names[op]) | ||
35 | return "reserved"; | ||
36 | |||
37 | return names[op]; | ||
38 | } | ||
39 | static inline int hvm_get_parameter(int idx, uint64_t *value) | ||
40 | { | ||
41 | struct xen_hvm_param xhv; | ||
42 | int r; | ||
43 | |||
44 | xhv.domid = DOMID_SELF; | ||
45 | xhv.index = idx; | ||
46 | r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); | ||
47 | if (r < 0) { | ||
48 | pr_err("Cannot get hvm parameter %s (%d): %d!\n", | ||
49 | param_name(idx), idx, r); | ||
50 | return r; | ||
51 | } | ||
52 | *value = xhv.value; | ||
53 | return r; | ||
54 | } | ||
55 | |||
56 | #define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 | ||
57 | #define HVM_CALLBACK_VIA_TYPE_SHIFT 56 | ||
58 | #define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ | ||
59 | HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) | ||
60 | |||
61 | #endif /* XEN_HVM_H__ */ | ||
62 |
include/xen/interface/event_channel.h
File was created | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /****************************************************************************** | ||
3 | * event_channel.h | ||
4 | * | ||
5 | * Event channels between domains. | ||
6 | * | ||
7 | * Copyright (c) 2003-2004, K A Fraser. | ||
8 | */ | ||
9 | |||
10 | #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ | ||
11 | #define __XEN_PUBLIC_EVENT_CHANNEL_H__ | ||
12 | |||
13 | #include <xen/interface/xen.h> | ||
14 | |||
15 | typedef uint32_t evtchn_port_t; | ||
16 | DEFINE_GUEST_HANDLE(evtchn_port_t); | ||
17 | |||
18 | /* | ||
19 | * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as | ||
20 | * accepting interdomain bindings from domain <remote_dom>. A fresh port | ||
21 | * is allocated in <dom> and returned as <port>. | ||
22 | * NOTES: | ||
23 | * 1. If the caller is unprivileged then <dom> must be DOMID_SELF. | ||
24 | * 2. <rdom> may be DOMID_SELF, allowing loopback connections. | ||
25 | */ | ||
26 | #define EVTCHNOP_alloc_unbound 6 | ||
27 | struct evtchn_alloc_unbound { | ||
28 | /* IN parameters */ | ||
29 | domid_t dom, remote_dom; | ||
30 | /* OUT parameters */ | ||
31 | evtchn_port_t port; | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between | ||
36 | * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify | ||
37 | * a port that is unbound and marked as accepting bindings from the calling | ||
38 | * domain. A fresh port is allocated in the calling domain and returned as | ||
39 | * <local_port>. | ||
40 | * NOTES: | ||
41 | * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections. | ||
42 | */ | ||
43 | #define EVTCHNOP_bind_interdomain 0 | ||
44 | struct evtchn_bind_interdomain { | ||
45 | /* IN parameters. */ | ||
46 | domid_t remote_dom; | ||
47 | evtchn_port_t remote_port; | ||
48 | /* OUT parameters. */ | ||
49 | evtchn_port_t local_port; | ||
50 | }; | ||
51 | |||
52 | /* | ||
53 | * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified | ||
54 | * vcpu. | ||
55 | * NOTES: | ||
56 | * 1. A virtual IRQ may be bound to at most one event channel per vcpu. | ||
57 | * 2. The allocated event channel is bound to the specified vcpu. The binding | ||
58 | * may not be changed. | ||
59 | */ | ||
60 | #define EVTCHNOP_bind_virq 1 | ||
61 | struct evtchn_bind_virq { | ||
62 | /* IN parameters. */ | ||
63 | uint32_t virq; | ||
64 | uint32_t vcpu; | ||
65 | /* OUT parameters. */ | ||
66 | evtchn_port_t port; | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>. | ||
71 | * NOTES: | ||
72 | * 1. A physical IRQ may be bound to at most one event channel per domain. | ||
73 | * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. | ||
74 | */ | ||
75 | #define EVTCHNOP_bind_pirq 2 | ||
76 | struct evtchn_bind_pirq { | ||
77 | /* IN parameters. */ | ||
78 | uint32_t pirq; | ||
79 | #define BIND_PIRQ__WILL_SHARE 1 | ||
80 | uint32_t flags; /* BIND_PIRQ__* */ | ||
81 | /* OUT parameters. */ | ||
82 | evtchn_port_t port; | ||
83 | }; | ||
84 | |||
85 | /* | ||
86 | * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. | ||
87 | * NOTES: | ||
88 | * 1. The allocated event channel is bound to the specified vcpu. The binding | ||
89 | * may not be changed. | ||
90 | */ | ||
91 | #define EVTCHNOP_bind_ipi 7 | ||
92 | struct evtchn_bind_ipi { | ||
93 | uint32_t vcpu; | ||
94 | /* OUT parameters. */ | ||
95 | evtchn_port_t port; | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * EVTCHNOP_close: Close a local event channel <port>. If the channel is | ||
100 | * interdomain then the remote end is placed in the unbound state | ||
101 | * (EVTCHNSTAT_unbound), awaiting a new connection. | ||
102 | */ | ||
103 | #define EVTCHNOP_close 3 | ||
104 | struct evtchn_close { | ||
105 | /* IN parameters. */ | ||
106 | evtchn_port_t port; | ||
107 | }; | ||
108 | |||
109 | /* | ||
110 | * EVTCHNOP_send: Send an event to the remote end of the channel whose local | ||
111 | * endpoint is <port>. | ||
112 | */ | ||
113 | #define EVTCHNOP_send 4 | ||
114 | struct evtchn_send { | ||
115 | /* IN parameters. */ | ||
116 | evtchn_port_t port; | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * EVTCHNOP_status: Get the current status of the communication channel which | ||
121 | * has an endpoint at <dom, port>. | ||
122 | * NOTES: | ||
123 | * 1. <dom> may be specified as DOMID_SELF. | ||
124 | * 2. Only a sufficiently-privileged domain may obtain the status of an event | ||
125 | * channel for which <dom> is not DOMID_SELF. | ||
126 | */ | ||
127 | #define EVTCHNOP_status 5 | ||
128 | struct evtchn_status { | ||
129 | /* IN parameters */ | ||
130 | domid_t dom; | ||
131 | evtchn_port_t port; | ||
132 | /* OUT parameters */ | ||
133 | #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ | ||
134 | #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ | ||
135 | #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ | ||
136 | #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ | ||
137 | #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ | ||
138 | #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ | ||
139 | uint32_t status; | ||
140 | uint32_t vcpu; /* VCPU to which this channel is bound. */ | ||
141 | union { | ||
142 | struct { | ||
143 | domid_t dom; | ||
144 | } unbound; /* EVTCHNSTAT_unbound */ | ||
145 | struct { | ||
146 | domid_t dom; | ||
147 | evtchn_port_t port; | ||
148 | } interdomain; /* EVTCHNSTAT_interdomain */ | ||
149 | uint32_t pirq; /* EVTCHNSTAT_pirq */ | ||
150 | uint32_t virq; /* EVTCHNSTAT_virq */ | ||
151 | } u; | ||
152 | }; | ||
153 | |||
154 | /* | ||
155 | * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an | ||
156 | * event is pending. | ||
157 | * NOTES: | ||
158 | * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised | ||
159 | * the binding. This binding cannot be changed. | ||
160 | * 2. All other channels notify vcpu0 by default. This default is set when | ||
161 | * the channel is allocated (a port that is freed and subsequently reused | ||
162 | * has its binding reset to vcpu0). | ||
163 | */ | ||
164 | #define EVTCHNOP_bind_vcpu 8 | ||
165 | struct evtchn_bind_vcpu { | ||
166 | /* IN parameters. */ | ||
167 | evtchn_port_t port; | ||
168 | uint32_t vcpu; | ||
169 | }; | ||
170 | |||
171 | /* | ||
172 | * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver | ||
173 | * a notification to the appropriate VCPU if an event is pending. | ||
174 | */ | ||
175 | #define EVTCHNOP_unmask 9 | ||
176 | struct evtchn_unmask { | ||
177 | /* IN parameters. */ | ||
178 | evtchn_port_t port; | ||
179 | }; | ||
180 | |||
181 | /* | ||
182 | * EVTCHNOP_reset: Close all event channels associated with specified domain. | ||
183 | * NOTES: | ||
184 | * 1. <dom> may be specified as DOMID_SELF. | ||
185 | * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. | ||
186 | */ | ||
187 | #define EVTCHNOP_reset 10 | ||
188 | struct evtchn_reset { | ||
189 | /* IN parameters. */ | ||
190 | domid_t dom; | ||
191 | }; | ||
192 | typedef struct evtchn_reset evtchn_reset_t; | ||
193 | |||
194 | /* | ||
195 | * EVTCHNOP_init_control: initialize the control block for the FIFO ABI. | ||
196 | */ | ||
197 | #define EVTCHNOP_init_control 11 | ||
198 | struct evtchn_init_control { | ||
199 | /* IN parameters. */ | ||
200 | uint64_t control_gfn; | ||
201 | uint32_t offset; | ||
202 | uint32_t vcpu; | ||
203 | /* OUT parameters. */ | ||
204 | uint8_t link_bits; | ||
205 | uint8_t _pad[7]; | ||
206 | }; | ||
207 | |||
208 | /* | ||
209 | * EVTCHNOP_expand_array: add an additional page to the event array. | ||
210 | */ | ||
211 | #define EVTCHNOP_expand_array 12 | ||
212 | struct evtchn_expand_array { | ||
213 | /* IN parameters. */ | ||
214 | uint64_t array_gfn; | ||
215 | }; | ||
216 | |||
217 | /* | ||
218 | * EVTCHNOP_set_priority: set the priority for an event channel. | ||
219 | */ | ||
220 | #define EVTCHNOP_set_priority 13 | ||
221 | struct evtchn_set_priority { | ||
222 | /* IN parameters. */ | ||
223 | uint32_t port; | ||
224 | uint32_t priority; | ||
225 | }; | ||
226 | |||
227 | struct evtchn_op { | ||
228 | uint32_t cmd; /* EVTCHNOP_* */ | ||
229 | union { | ||
230 | struct evtchn_alloc_unbound alloc_unbound; | ||
231 | struct evtchn_bind_interdomain bind_interdomain; | ||
232 | struct evtchn_bind_virq bind_virq; | ||
233 | struct evtchn_bind_pirq bind_pirq; | ||
234 | struct evtchn_bind_ipi bind_ipi; | ||
235 | struct evtchn_close close; | ||
236 | struct evtchn_send send; | ||
237 | struct evtchn_status status; | ||
238 | struct evtchn_bind_vcpu bind_vcpu; | ||
239 | struct evtchn_unmask unmask; | ||
240 | } u; | ||
241 | }; | ||
242 | DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); | ||
243 | |||
244 | /* | ||
245 | * 2-level ABI | ||
246 | */ | ||
247 | |||
248 | #define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) | ||
249 | |||
250 | /* | ||
251 | * FIFO ABI | ||
252 | */ | ||
253 | |||
254 | /* Events may have priorities from 0 (highest) to 15 (lowest). */ | ||
255 | #define EVTCHN_FIFO_PRIORITY_MAX 0 | ||
256 | #define EVTCHN_FIFO_PRIORITY_DEFAULT 7 | ||
257 | #define EVTCHN_FIFO_PRIORITY_MIN 15 | ||
258 | |||
259 | #define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1) | ||
260 | |||
261 | typedef uint32_t event_word_t; | ||
262 | |||
263 | #define EVTCHN_FIFO_PENDING 31 | ||
264 | #define EVTCHN_FIFO_MASKED 30 | ||
265 | #define EVTCHN_FIFO_LINKED 29 | ||
266 | #define EVTCHN_FIFO_BUSY 28 | ||
267 | |||
268 | #define EVTCHN_FIFO_LINK_BITS 17 | ||
269 | #define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1) | ||
270 | |||
271 | #define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS) | ||
272 | |||
273 | struct evtchn_fifo_control_block { | ||
274 | uint32_t ready; | ||
275 | uint32_t _rsvd; | ||
276 | event_word_t head[EVTCHN_FIFO_MAX_QUEUES]; | ||
277 | }; | ||
278 | |||
279 | #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ | ||
280 |
include/xen/interface/grant_table.h
File was created | 1 | /****************************************************************************** | |
2 | * grant_table.h | ||
3 | * | ||
4 | * Interface for granting foreign access to page frames, and receiving | ||
5 | * page-ownership transfers. | ||
6 | * | ||
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
8 | * of this software and associated documentation files (the "Software"), to | ||
9 | * deal in the Software without restriction, including without limitation the | ||
10 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
11 | * sell copies of the Software, and to permit persons to whom the Software is | ||
12 | * furnished to do so, subject to the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice shall be included in | ||
15 | * all copies or substantial portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | * Copyright (c) 2004, K A Fraser | ||
26 | */ | ||
27 | |||
28 | #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ | ||
29 | #define __XEN_PUBLIC_GRANT_TABLE_H__ | ||
30 | |||
31 | #include <xen/interface/xen.h> | ||
32 | |||
33 | /*********************************** | ||
34 | * GRANT TABLE REPRESENTATION | ||
35 | */ | ||
36 | |||
37 | /* Some rough guidelines on accessing and updating grant-table entries | ||
38 | * in a concurrency-safe manner. For more information, Linux contains a | ||
39 | * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). | ||
40 | * | ||
41 | * NB. WMB is a no-op on current-generation x86 processors. However, a | ||
42 | * compiler barrier will still be required. | ||
43 | * | ||
44 | * Introducing a valid entry into the grant table: | ||
45 | * 1. Write ent->domid. | ||
46 | * 2. Write ent->frame: | ||
47 | * GTF_permit_access: Frame to which access is permitted. | ||
48 | * GTF_accept_transfer: Pseudo-phys frame slot being filled by new | ||
49 | * frame, or zero if none. | ||
50 | * 3. Write memory barrier (WMB). | ||
51 | * 4. Write ent->flags, inc. valid type. | ||
52 | * | ||
53 | * Invalidating an unused GTF_permit_access entry: | ||
54 | * 1. flags = ent->flags. | ||
55 | * 2. Observe that !(flags & (GTF_reading|GTF_writing)). | ||
56 | * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). | ||
57 | * NB. No need for WMB as reuse of entry is control-dependent on success of | ||
58 | * step 3, and all architectures guarantee ordering of ctrl-dep writes. | ||
59 | * | ||
60 | * Invalidating an in-use GTF_permit_access entry: | ||
61 | * This cannot be done directly. Request assistance from the domain controller | ||
62 | * which can set a timeout on the use of a grant entry and take necessary | ||
63 | * action. (NB. This is not yet implemented!). | ||
64 | * | ||
65 | * Invalidating an unused GTF_accept_transfer entry: | ||
66 | * 1. flags = ent->flags. | ||
67 | * 2. Observe that !(flags & GTF_transfer_committed). [*] | ||
68 | * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). | ||
69 | * NB. No need for WMB as reuse of entry is control-dependent on success of | ||
70 | * step 3, and all architectures guarantee ordering of ctrl-dep writes. | ||
71 | * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. | ||
72 | * The guest must /not/ modify the grant entry until the address of the | ||
73 | * transferred frame is written. It is safe for the guest to spin waiting | ||
74 | * for this to occur (detect by observing GTF_transfer_completed in | ||
75 | * ent->flags). | ||
76 | * | ||
77 | * Invalidating a committed GTF_accept_transfer entry: | ||
78 | * 1. Wait for (ent->flags & GTF_transfer_completed). | ||
79 | * | ||
80 | * Changing a GTF_permit_access from writable to read-only: | ||
81 | * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. | ||
82 | * | ||
83 | * Changing a GTF_permit_access from read-only to writable: | ||
84 | * Use SMP-safe bit-setting instruction. | ||
85 | */ | ||
86 | |||
87 | /* | ||
88 | * Reference to a grant entry in a specified domain's grant table. | ||
89 | */ | ||
90 | typedef uint32_t grant_ref_t; | ||
91 | |||
92 | /* | ||
93 | * A grant table comprises a packed array of grant entries in one or more | ||
94 | * page frames shared between Xen and a guest. | ||
95 | * [XEN]: This field is written by Xen and read by the sharing guest. | ||
96 | * [GST]: This field is written by the guest and read by Xen. | ||
97 | */ | ||
98 | |||
99 | /* | ||
100 | * Version 1 of the grant table entry structure is maintained purely | ||
101 | * for backwards compatibility. New guests should use version 2. | ||
102 | */ | ||
103 | struct grant_entry_v1 { | ||
104 | /* GTF_xxx: various type and flag information. [XEN,GST] */ | ||
105 | uint16_t flags; | ||
106 | /* The domain being granted foreign privileges. [GST] */ | ||
107 | domid_t domid; | ||
108 | /* | ||
109 | * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] | ||
110 | * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] | ||
111 | */ | ||
112 | uint32_t frame; | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * Type of grant entry. | ||
117 | * GTF_invalid: This grant entry grants no privileges. | ||
118 | * GTF_permit_access: Allow @domid to map/access @frame. | ||
119 | * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame | ||
120 | * to this guest. Xen writes the page number to @frame. | ||
121 | * GTF_transitive: Allow @domid to transitively access a subrange of | ||
122 | * @trans_grant in @trans_domid. No mappings are allowed. | ||
123 | */ | ||
124 | #define GTF_invalid (0U<<0) | ||
125 | #define GTF_permit_access (1U<<0) | ||
126 | #define GTF_accept_transfer (2U<<0) | ||
127 | #define GTF_transitive (3U<<0) | ||
128 | #define GTF_type_mask (3U<<0) | ||
129 | |||
130 | /* | ||
131 | * Subflags for GTF_permit_access. | ||
132 | * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] | ||
133 | * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] | ||
134 | * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] | ||
135 | * GTF_sub_page: Grant access to only a subrange of the page. @domid | ||
136 | * will only be allowed to copy from the grant, and not | ||
137 | * map it. [GST] | ||
138 | */ | ||
139 | #define _GTF_readonly (2) | ||
140 | #define GTF_readonly (1U<<_GTF_readonly) | ||
141 | #define _GTF_reading (3) | ||
142 | #define GTF_reading (1U<<_GTF_reading) | ||
143 | #define _GTF_writing (4) | ||
144 | #define GTF_writing (1U<<_GTF_writing) | ||
145 | #define _GTF_sub_page (8) | ||
146 | #define GTF_sub_page (1U<<_GTF_sub_page) | ||
147 | |||
148 | /* | ||
149 | * Subflags for GTF_accept_transfer: | ||
150 | * GTF_transfer_committed: Xen sets this flag to indicate that it is committed | ||
151 | * to transferring ownership of a page frame. When a guest sees this flag | ||
152 | * it must /not/ modify the grant entry until GTF_transfer_completed is | ||
153 | * set by Xen. | ||
154 | * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag | ||
155 | * after reading GTF_transfer_committed. Xen will always write the frame | ||
156 | * address, followed by ORing this flag, in a timely manner. | ||
157 | */ | ||
158 | #define _GTF_transfer_committed (2) | ||
159 | #define GTF_transfer_committed (1U<<_GTF_transfer_committed) | ||
160 | #define _GTF_transfer_completed (3) | ||
161 | #define GTF_transfer_completed (1U<<_GTF_transfer_completed) | ||
162 | |||
163 | /* | ||
164 | * Version 2 grant table entries. These fulfil the same role as | ||
165 | * version 1 entries, but can represent more complicated operations. | ||
166 | * Any given domain will have either a version 1 or a version 2 table, | ||
167 | * and every entry in the table will be the same version. | ||
168 | * | ||
169 | * The interface by which domains use grant references does not depend | ||
170 | * on the grant table version in use by the other domain. | ||
171 | */ | ||
172 | |||
173 | /* | ||
174 | * Version 1 and version 2 grant entries share a common prefix. The | ||
175 | * fields of the prefix are documented as part of struct | ||
176 | * grant_entry_v1. | ||
177 | */ | ||
178 | struct grant_entry_header { | ||
179 | uint16_t flags; | ||
180 | domid_t domid; | ||
181 | }; | ||
182 | |||
183 | /* | ||
184 | * Version 2 of the grant entry structure, here is a union because three | ||
185 | * different types are suppotted: full_page, sub_page and transitive. | ||
186 | */ | ||
187 | union grant_entry_v2 { | ||
188 | struct grant_entry_header hdr; | ||
189 | |||
190 | /* | ||
191 | * This member is used for V1-style full page grants, where either: | ||
192 | * | ||
193 | * -- hdr.type is GTF_accept_transfer, or | ||
194 | * -- hdr.type is GTF_permit_access and GTF_sub_page is not set. | ||
195 | * | ||
196 | * In that case, the frame field has the same semantics as the | ||
197 | * field of the same name in the V1 entry structure. | ||
198 | */ | ||
199 | struct { | ||
200 | struct grant_entry_header hdr; | ||
201 | uint32_t pad0; | ||
202 | uint64_t frame; | ||
203 | } full_page; | ||
204 | |||
205 | /* | ||
206 | * If the grant type is GTF_grant_access and GTF_sub_page is set, | ||
207 | * @domid is allowed to access bytes [@page_off,@page_off+@length) | ||
208 | * in frame @frame. | ||
209 | */ | ||
210 | struct { | ||
211 | struct grant_entry_header hdr; | ||
212 | uint16_t page_off; | ||
213 | uint16_t length; | ||
214 | uint64_t frame; | ||
215 | } sub_page; | ||
216 | |||
217 | /* | ||
218 | * If the grant is GTF_transitive, @domid is allowed to use the | ||
219 | * grant @gref in domain @trans_domid, as if it was the local | ||
220 | * domain. Obviously, the transitive access must be compatible | ||
221 | * with the original grant. | ||
222 | */ | ||
223 | struct { | ||
224 | struct grant_entry_header hdr; | ||
225 | domid_t trans_domid; | ||
226 | uint16_t pad0; | ||
227 | grant_ref_t gref; | ||
228 | } transitive; | ||
229 | |||
230 | uint32_t __spacer[4]; /* Pad to a power of two */ | ||
231 | }; | ||
232 | |||
233 | typedef uint16_t grant_status_t; | ||
234 | |||
235 | /*********************************** | ||
236 | * GRANT TABLE QUERIES AND USES | ||
237 | */ | ||
238 | |||
239 | /* | ||
240 | * Handle to track a mapping created via a grant reference. | ||
241 | */ | ||
242 | typedef uint32_t grant_handle_t; | ||
243 | |||
244 | /* | ||
245 | * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access | ||
246 | * by devices and/or host CPUs. If successful, <handle> is a tracking number | ||
247 | * that must be presented later to destroy the mapping(s). On error, <handle> | ||
248 | * is a negative status code. | ||
249 | * NOTES: | ||
250 | * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address | ||
251 | * via which I/O devices may access the granted frame. | ||
252 | * 2. If GNTMAP_host_map is specified then a mapping will be added at | ||
253 | * either a host virtual address in the current address space, or at | ||
254 | * a PTE at the specified machine address. The type of mapping to | ||
255 | * perform is selected through the GNTMAP_contains_pte flag, and the | ||
256 | * address is specified in <host_addr>. | ||
257 | * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a | ||
258 | * host mapping is destroyed by other means then it is *NOT* guaranteed | ||
259 | * to be accounted to the correct grant reference! | ||
260 | */ | ||
261 | #define GNTTABOP_map_grant_ref 0 | ||
262 | struct gnttab_map_grant_ref { | ||
263 | /* IN parameters. */ | ||
264 | uint64_t host_addr; | ||
265 | uint32_t flags; /* GNTMAP_* */ | ||
266 | grant_ref_t ref; | ||
267 | domid_t dom; | ||
268 | /* OUT parameters. */ | ||
269 | int16_t status; /* GNTST_* */ | ||
270 | grant_handle_t handle; | ||
271 | uint64_t dev_bus_addr; | ||
272 | }; | ||
273 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref); | ||
274 | |||
275 | /* | ||
276 | * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings | ||
277 | * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that | ||
278 | * field is ignored. If non-zero, they must refer to a device/host mapping | ||
279 | * that is tracked by <handle> | ||
280 | * NOTES: | ||
281 | * 1. The call may fail in an undefined manner if either mapping is not | ||
282 | * tracked by <handle>. | ||
283 | * 3. After executing a batch of unmaps, it is guaranteed that no stale | ||
284 | * mappings will remain in the device or host TLBs. | ||
285 | */ | ||
286 | #define GNTTABOP_unmap_grant_ref 1 | ||
287 | struct gnttab_unmap_grant_ref { | ||
288 | /* IN parameters. */ | ||
289 | uint64_t host_addr; | ||
290 | uint64_t dev_bus_addr; | ||
291 | grant_handle_t handle; | ||
292 | /* OUT parameters. */ | ||
293 | int16_t status; /* GNTST_* */ | ||
294 | }; | ||
295 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref); | ||
296 | |||
297 | /* | ||
298 | * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least | ||
299 | * <nr_frames> pages. The frame addresses are written to the <frame_list>. | ||
300 | * Only <nr_frames> addresses are written, even if the table is larger. | ||
301 | * NOTES: | ||
302 | * 1. <dom> may be specified as DOMID_SELF. | ||
303 | * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. | ||
304 | * 3. Xen may not support more than a single grant-table page per domain. | ||
305 | */ | ||
306 | #define GNTTABOP_setup_table 2 | ||
307 | struct gnttab_setup_table { | ||
308 | /* IN parameters. */ | ||
309 | domid_t dom; | ||
310 | uint32_t nr_frames; | ||
311 | /* OUT parameters. */ | ||
312 | int16_t status; /* GNTST_* */ | ||
313 | GUEST_HANDLE(xen_pfn_t) frame_list; | ||
314 | }; | ||
315 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table); | ||
316 | |||
317 | /* | ||
318 | * GNTTABOP_dump_table: Dump the contents of the grant table to the | ||
319 | * xen console. Debugging use only. | ||
320 | */ | ||
321 | #define GNTTABOP_dump_table 3 | ||
322 | struct gnttab_dump_table { | ||
323 | /* IN parameters. */ | ||
324 | domid_t dom; | ||
325 | /* OUT parameters. */ | ||
326 | int16_t status; /* GNTST_* */ | ||
327 | }; | ||
328 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table); | ||
329 | |||
330 | /* | ||
331 | * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The | ||
332 | * foreign domain has previously registered its interest in the transfer via | ||
333 | * <domid, ref>. | ||
334 | * | ||
335 | * Note that, even if the transfer fails, the specified page no longer belongs | ||
336 | * to the calling domain *unless* the error is GNTST_bad_page. | ||
337 | */ | ||
338 | #define GNTTABOP_transfer 4 | ||
339 | struct gnttab_transfer { | ||
340 | /* IN parameters. */ | ||
341 | xen_pfn_t mfn; | ||
342 | domid_t domid; | ||
343 | grant_ref_t ref; | ||
344 | /* OUT parameters. */ | ||
345 | int16_t status; | ||
346 | }; | ||
347 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer); | ||
348 | |||
349 | /* | ||
350 | * GNTTABOP_copy: Hypervisor based copy | ||
351 | * source and destinations can be eithers MFNs or, for foreign domains, | ||
352 | * grant references. the foreign domain has to grant read/write access | ||
353 | * in its grant table. | ||
354 | * | ||
355 | * The flags specify what type source and destinations are (either MFN | ||
356 | * or grant reference). | ||
357 | * | ||
358 | * Note that this can also be used to copy data between two domains | ||
359 | * via a third party if the source and destination domains had previously | ||
360 | * grant appropriate access to their pages to the third party. | ||
361 | * | ||
362 | * source_offset specifies an offset in the source frame, dest_offset | ||
363 | * the offset in the target frame and len specifies the number of | ||
364 | * bytes to be copied. | ||
365 | */ | ||
366 | |||
367 | #define _GNTCOPY_source_gref (0) | ||
368 | #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) | ||
369 | #define _GNTCOPY_dest_gref (1) | ||
370 | #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) | ||
371 | |||
372 | #define GNTTABOP_copy 5 | ||
373 | struct gnttab_copy { | ||
374 | /* IN parameters. */ | ||
375 | struct { | ||
376 | union { | ||
377 | grant_ref_t ref; | ||
378 | xen_pfn_t gmfn; | ||
379 | } u; | ||
380 | domid_t domid; | ||
381 | uint16_t offset; | ||
382 | } source, dest; | ||
383 | uint16_t len; | ||
384 | uint16_t flags; /* GNTCOPY_* */ | ||
385 | /* OUT parameters. */ | ||
386 | int16_t status; | ||
387 | }; | ||
388 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy); | ||
389 | |||
390 | /* | ||
391 | * GNTTABOP_query_size: Query the current and maximum sizes of the shared | ||
392 | * grant table. | ||
393 | * NOTES: | ||
394 | * 1. <dom> may be specified as DOMID_SELF. | ||
395 | * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. | ||
396 | */ | ||
397 | #define GNTTABOP_query_size 6 | ||
398 | struct gnttab_query_size { | ||
399 | /* IN parameters. */ | ||
400 | domid_t dom; | ||
401 | /* OUT parameters. */ | ||
402 | uint32_t nr_frames; | ||
403 | uint32_t max_nr_frames; | ||
404 | int16_t status; /* GNTST_* */ | ||
405 | }; | ||
406 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size); | ||
407 | |||
408 | /* | ||
409 | * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings | ||
410 | * tracked by <handle> but atomically replace the page table entry with one | ||
411 | * pointing to the machine address under <new_addr>. <new_addr> will be | ||
412 | * redirected to the null entry. | ||
413 | * NOTES: | ||
414 | * 1. The call may fail in an undefined manner if either mapping is not | ||
415 | * tracked by <handle>. | ||
416 | * 2. After executing a batch of unmaps, it is guaranteed that no stale | ||
417 | * mappings will remain in the device or host TLBs. | ||
418 | */ | ||
419 | #define GNTTABOP_unmap_and_replace 7 | ||
420 | struct gnttab_unmap_and_replace { | ||
421 | /* IN parameters. */ | ||
422 | uint64_t host_addr; | ||
423 | uint64_t new_addr; | ||
424 | grant_handle_t handle; | ||
425 | /* OUT parameters. */ | ||
426 | int16_t status; /* GNTST_* */ | ||
427 | }; | ||
428 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace); | ||
429 | |||
430 | /* | ||
431 | * GNTTABOP_set_version: Request a particular version of the grant | ||
432 | * table shared table structure. This operation can only be performed | ||
433 | * once in any given domain. It must be performed before any grants | ||
434 | * are activated; otherwise, the domain will be stuck with version 1. | ||
435 | * The only defined versions are 1 and 2. | ||
436 | */ | ||
437 | #define GNTTABOP_set_version 8 | ||
438 | struct gnttab_set_version { | ||
439 | /* IN parameters */ | ||
440 | uint32_t version; | ||
441 | }; | ||
442 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version); | ||
443 | |||
444 | /* | ||
445 | * GNTTABOP_get_status_frames: Get the list of frames used to store grant | ||
446 | * status for <dom>. In grant format version 2, the status is separated | ||
447 | * from the other shared grant fields to allow more efficient synchronization | ||
448 | * using barriers instead of atomic cmpexch operations. | ||
449 | * <nr_frames> specify the size of vector <frame_list>. | ||
450 | * The frame addresses are returned in the <frame_list>. | ||
451 | * Only <nr_frames> addresses are returned, even if the table is larger. | ||
452 | * NOTES: | ||
453 | * 1. <dom> may be specified as DOMID_SELF. | ||
454 | * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. | ||
455 | */ | ||
456 | #define GNTTABOP_get_status_frames 9 | ||
457 | struct gnttab_get_status_frames { | ||
458 | /* IN parameters. */ | ||
459 | uint32_t nr_frames; | ||
460 | domid_t dom; | ||
461 | /* OUT parameters. */ | ||
462 | int16_t status; /* GNTST_* */ | ||
463 | GUEST_HANDLE(uint64_t) frame_list; | ||
464 | }; | ||
465 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames); | ||
466 | |||
467 | /* | ||
468 | * GNTTABOP_get_version: Get the grant table version which is in | ||
469 | * effect for domain <dom>. | ||
470 | */ | ||
471 | #define GNTTABOP_get_version 10 | ||
472 | struct gnttab_get_version { | ||
473 | /* IN parameters */ | ||
474 | domid_t dom; | ||
475 | uint16_t pad; | ||
476 | /* OUT parameters */ | ||
477 | uint32_t version; | ||
478 | }; | ||
479 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); | ||
480 | |||
481 | /* | ||
482 | * Issue one or more cache maintenance operations on a portion of a | ||
483 | * page granted to the calling domain by a foreign domain. | ||
484 | */ | ||
485 | #define GNTTABOP_cache_flush 12 | ||
486 | struct gnttab_cache_flush { | ||
487 | union { | ||
488 | uint64_t dev_bus_addr; | ||
489 | grant_ref_t ref; | ||
490 | } a; | ||
491 | uint16_t offset; /* offset from start of grant */ | ||
492 | uint16_t length; /* size within the grant */ | ||
493 | #define GNTTAB_CACHE_CLEAN (1<<0) | ||
494 | #define GNTTAB_CACHE_INVAL (1<<1) | ||
495 | #define GNTTAB_CACHE_SOURCE_GREF (1<<31) | ||
496 | uint32_t op; | ||
497 | }; | ||
498 | DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush); | ||
499 | |||
500 | /* | ||
501 | * Bitfield values for update_pin_status.flags. | ||
502 | */ | ||
503 | /* Map the grant entry for access by I/O devices. */ | ||
504 | #define _GNTMAP_device_map (0) | ||
505 | #define GNTMAP_device_map (1<<_GNTMAP_device_map) | ||
506 | /* Map the grant entry for access by host CPUs. */ | ||
507 | #define _GNTMAP_host_map (1) | ||
508 | #define GNTMAP_host_map (1<<_GNTMAP_host_map) | ||
509 | /* Accesses to the granted frame will be restricted to read-only access. */ | ||
510 | #define _GNTMAP_readonly (2) | ||
511 | #define GNTMAP_readonly (1<<_GNTMAP_readonly) | ||
512 | /* | ||
513 | * GNTMAP_host_map subflag: | ||
514 | * 0 => The host mapping is usable only by the guest OS. | ||
515 | * 1 => The host mapping is usable by guest OS + current application. | ||
516 | */ | ||
517 | #define _GNTMAP_application_map (3) | ||
518 | #define GNTMAP_application_map (1<<_GNTMAP_application_map) | ||
519 | |||
520 | /* | ||
521 | * GNTMAP_contains_pte subflag: | ||
522 | * 0 => This map request contains a host virtual address. | ||
523 | * 1 => This map request contains the machine addess of the PTE to update. | ||
524 | */ | ||
525 | #define _GNTMAP_contains_pte (4) | ||
526 | #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) | ||
527 | |||
528 | /* | ||
529 | * Bits to be placed in guest kernel available PTE bits (architecture | ||
530 | * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). | ||
531 | */ | ||
532 | #define _GNTMAP_guest_avail0 (16) | ||
533 | #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) | ||
534 | |||
535 | /* | ||
536 | * Values for error status returns. All errors are -ve. | ||
537 | */ | ||
538 | #define GNTST_okay (0) /* Normal return. */ | ||
539 | #define GNTST_general_error (-1) /* General undefined error. */ | ||
540 | #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ | ||
541 | #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ | ||
542 | #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ | ||
543 | #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ | ||
544 | #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ | ||
545 | #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ | ||
546 | #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ | ||
547 | #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ | ||
548 | #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ | ||
549 | #define GNTST_address_too_big (-11) /* transfer page address too large. */ | ||
550 | #define GNTST_eagain (-12) /* Operation not done; try again. */ | ||
551 | |||
552 | #define GNTTABOP_error_msgs { \ | ||
553 | "okay", \ | ||
554 | "undefined error", \ | ||
555 | "unrecognised domain id", \ | ||
556 | "invalid grant reference", \ | ||
557 | "invalid mapping handle", \ | ||
558 | "invalid virtual address", \ | ||
559 | "invalid device address", \ | ||
560 | "no spare translation slot in the I/O MMU", \ | ||
561 | "permission denied", \ | ||
562 | "bad page", \ | ||
563 | "copy arguments cross page boundary", \ | ||
564 | "page address size too large", \ | ||
565 | "operation not done; try again" \ | ||
566 | } | ||
567 | |||
568 | #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ | ||
569 |
include/xen/interface/hvm/hvm_op.h
File was created | 1 | /* | |
2 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
3 | * of this software and associated documentation files (the "Software"), to | ||
4 | * deal in the Software without restriction, including without limitation the | ||
5 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
6 | * sell copies of the Software, and to permit persons to whom the Software is | ||
7 | * furnished to do so, subject to the following conditions: | ||
8 | * | ||
9 | * The above copyright notice and this permission notice shall be included in | ||
10 | * all copies or substantial portions of the Software. | ||
11 | * | ||
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
15 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
16 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
17 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
18 | * DEALINGS IN THE SOFTWARE. | ||
19 | */ | ||
20 | |||
21 | #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ | ||
22 | #define __XEN_PUBLIC_HVM_HVM_OP_H__ | ||
23 | |||
24 | /* Get/set subcommands: the second argument of the hypercall is a | ||
25 | * pointer to a xen_hvm_param struct. */ | ||
26 | #define HVMOP_set_param 0 | ||
27 | #define HVMOP_get_param 1 | ||
28 | struct xen_hvm_param { | ||
29 | domid_t domid; /* IN */ | ||
30 | uint32_t index; /* IN */ | ||
31 | uint64_t value; /* IN/OUT */ | ||
32 | }; | ||
33 | DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_param); | ||
34 | |||
35 | /* Hint from PV drivers for pagetable destruction. */ | ||
36 | #define HVMOP_pagetable_dying 9 | ||
37 | struct xen_hvm_pagetable_dying { | ||
38 | /* Domain with a pagetable about to be destroyed. */ | ||
39 | domid_t domid; | ||
40 | /* guest physical address of the toplevel pagetable dying */ | ||
41 | aligned_u64 gpa; | ||
42 | }; | ||
43 | typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t; | ||
44 | DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_pagetable_dying_t); | ||
45 | |||
46 | enum hvmmem_type_t { | ||
47 | HVMMEM_ram_rw, /* Normal read/write guest RAM */ | ||
48 | HVMMEM_ram_ro, /* Read-only; writes are discarded */ | ||
49 | HVMMEM_mmio_dm, /* Reads and write go to the device model */ | ||
50 | }; | ||
51 | |||
52 | #define HVMOP_get_mem_type 15 | ||
53 | /* Return hvmmem_type_t for the specified pfn. */ | ||
54 | struct xen_hvm_get_mem_type { | ||
55 | /* Domain to be queried. */ | ||
56 | domid_t domid; | ||
57 | /* OUT variable. */ | ||
58 | uint16_t mem_type; | ||
59 | uint16_t pad[2]; /* align next field on 8-byte boundary */ | ||
60 | /* IN variable. */ | ||
61 | uint64_t pfn; | ||
62 | }; | ||
63 | DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type); | ||
64 | |||
65 | #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ | ||
66 |
include/xen/interface/hvm/params.h
File was created | 1 | /* | |
2 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
3 | * of this software and associated documentation files (the "Software"), to | ||
4 | * deal in the Software without restriction, including without limitation the | ||
5 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
6 | * sell copies of the Software, and to permit persons to whom the Software is | ||
7 | * furnished to do so, subject to the following conditions: | ||
8 | * | ||
9 | * The above copyright notice and this permission notice shall be included in | ||
10 | * all copies or substantial portions of the Software. | ||
11 | * | ||
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
15 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
16 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
17 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
18 | * DEALINGS IN THE SOFTWARE. | ||
19 | */ | ||
20 | |||
21 | #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ | ||
22 | #define __XEN_PUBLIC_HVM_PARAMS_H__ | ||
23 | |||
24 | #include <xen/interface/hvm/hvm_op.h> | ||
25 | |||
26 | /* | ||
27 | * Parameter space for HVMOP_{set,get}_param. | ||
28 | */ | ||
29 | |||
30 | #define HVM_PARAM_CALLBACK_IRQ 0 | ||
31 | /* | ||
32 | * How should CPU0 event-channel notifications be delivered? | ||
33 | * | ||
34 | * If val == 0 then CPU0 event-channel notifications are not delivered. | ||
35 | * If val != 0, val[63:56] encodes the type, as follows: | ||
36 | */ | ||
37 | |||
38 | #define HVM_PARAM_CALLBACK_TYPE_GSI 0 | ||
39 | /* | ||
40 | * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0, | ||
41 | * and disables all notifications. | ||
42 | */ | ||
43 | |||
44 | #define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1 | ||
45 | /* | ||
46 | * val[55:0] is a delivery PCI INTx line: | ||
47 | * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0] | ||
48 | */ | ||
49 | |||
50 | #if defined(__i386__) || defined(__x86_64__) | ||
51 | #define HVM_PARAM_CALLBACK_TYPE_VECTOR 2 | ||
52 | /* | ||
53 | * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know | ||
54 | * if this delivery method is available. | ||
55 | */ | ||
56 | #elif defined(__arm__) || defined(__aarch64__) | ||
57 | #define HVM_PARAM_CALLBACK_TYPE_PPI 2 | ||
58 | /* | ||
59 | * val[55:16] needs to be zero. | ||
60 | * val[15:8] is interrupt flag of the PPI used by event-channel: | ||
61 | * bit 8: the PPI is edge(1) or level(0) triggered | ||
62 | * bit 9: the PPI is active low(1) or high(0) | ||
63 | * val[7:0] is a PPI number used by event-channel. | ||
64 | * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to | ||
65 | * the notification is handled by the interrupt controller. | ||
66 | */ | ||
67 | #endif | ||
68 | |||
69 | #define HVM_PARAM_STORE_PFN 1 | ||
70 | #define HVM_PARAM_STORE_EVTCHN 2 | ||
71 | |||
72 | #define HVM_PARAM_PAE_ENABLED 4 | ||
73 | |||
74 | #define HVM_PARAM_IOREQ_PFN 5 | ||
75 | |||
76 | #define HVM_PARAM_BUFIOREQ_PFN 6 | ||
77 | |||
78 | /* | ||
79 | * Set mode for virtual timers (currently x86 only): | ||
80 | * delay_for_missed_ticks (default): | ||
81 | * Do not advance a vcpu's time beyond the correct delivery time for | ||
82 | * interrupts that have been missed due to preemption. Deliver missed | ||
83 | * interrupts when the vcpu is rescheduled and advance the vcpu's virtual | ||
84 | * time stepwise for each one. | ||
85 | * no_delay_for_missed_ticks: | ||
86 | * As above, missed interrupts are delivered, but guest time always tracks | ||
87 | * wallclock (i.e., real) time while doing so. | ||
88 | * no_missed_ticks_pending: | ||
89 | * No missed interrupts are held pending. Instead, to ensure ticks are | ||
90 | * delivered at some non-zero rate, if we detect missed ticks then the | ||
91 | * internal tick alarm is not disabled if the VCPU is preempted during the | ||
92 | * next tick period. | ||
93 | * one_missed_tick_pending: | ||
94 | * Missed interrupts are collapsed together and delivered as one 'late tick'. | ||
95 | * Guest time always tracks wallclock (i.e., real) time. | ||
96 | */ | ||
97 | #define HVM_PARAM_TIMER_MODE 10 | ||
98 | #define HVMPTM_delay_for_missed_ticks 0 | ||
99 | #define HVMPTM_no_delay_for_missed_ticks 1 | ||
100 | #define HVMPTM_no_missed_ticks_pending 2 | ||
101 | #define HVMPTM_one_missed_tick_pending 3 | ||
102 | |||
103 | /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ | ||
104 | #define HVM_PARAM_HPET_ENABLED 11 | ||
105 | |||
106 | /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ | ||
107 | #define HVM_PARAM_IDENT_PT 12 | ||
108 | |||
109 | /* Device Model domain, defaults to 0. */ | ||
110 | #define HVM_PARAM_DM_DOMAIN 13 | ||
111 | |||
112 | /* ACPI S state: currently support S0 and S3 on x86. */ | ||
113 | #define HVM_PARAM_ACPI_S_STATE 14 | ||
114 | |||
115 | /* TSS used on Intel when CR0.PE=0. */ | ||
116 | #define HVM_PARAM_VM86_TSS 15 | ||
117 | |||
118 | /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ | ||
119 | #define HVM_PARAM_VPT_ALIGN 16 | ||
120 | |||
121 | /* Console debug shared memory ring and event channel */ | ||
122 | #define HVM_PARAM_CONSOLE_PFN 17 | ||
123 | #define HVM_PARAM_CONSOLE_EVTCHN 18 | ||
124 | |||
125 | #define HVM_NR_PARAMS 19 | ||
126 | |||
127 | #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ | ||
128 |
include/xen/interface/io/console.h
File was created | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /****************************************************************************** | ||
3 | * console.h | ||
4 | * | ||
5 | * Console I/O interface for Xen guest OSes. | ||
6 | * | ||
7 | * Copyright (c) 2005, Keir Fraser | ||
8 | */ | ||
9 | |||
10 | #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ | ||
11 | #define __XEN_PUBLIC_IO_CONSOLE_H__ | ||
12 | |||
13 | typedef uint32_t XENCONS_RING_IDX; | ||
14 | |||
15 | #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) | ||
16 | |||
17 | struct xencons_interface { | ||
18 | char in[1024]; | ||
19 | char out[2048]; | ||
20 | XENCONS_RING_IDX in_cons, in_prod; | ||
21 | XENCONS_RING_IDX out_cons, out_prod; | ||
22 | }; | ||
23 | |||
24 | #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ | ||
25 |
include/xen/interface/io/ring.h
File was created | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /****************************************************************************** | ||
3 | * ring.h | ||
4 | * | ||
5 | * Shared producer-consumer ring macros. | ||
6 | * | ||
7 | * Tim Deegan and Andrew Warfield November 2004. | ||
8 | */ | ||
9 | |||
10 | #ifndef __XEN_PUBLIC_IO_RING_H__ | ||
11 | #define __XEN_PUBLIC_IO_RING_H__ | ||
12 | |||
13 | #include <xen/interface/grant_table.h> | ||
14 | |||
15 | typedef unsigned int RING_IDX; | ||
16 | |||
17 | /* Round a 32-bit unsigned constant down to the nearest power of two. */ | ||
18 | #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) | ||
19 | #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) | ||
20 | #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) | ||
21 | #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) | ||
22 | #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) | ||
23 | |||
24 | /* | ||
25 | * Calculate size of a shared ring, given the total available space for the | ||
26 | * ring and indexes (_sz), and the name tag of the request/response structure. | ||
27 | * A ring contains as many entries as will fit, rounded down to the nearest | ||
28 | * power of two (so we can mask with (size-1) to loop around). | ||
29 | */ | ||
30 | #define __CONST_RING_SIZE(_s, _sz) \ | ||
31 | (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ | ||
32 | sizeof(((struct _s##_sring *)0)->ring[0]))) | ||
33 | |||
34 | /* | ||
35 | * The same for passing in an actual pointer instead of a name tag. | ||
36 | */ | ||
37 | #define __RING_SIZE(_s, _sz) \ | ||
38 | (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) | ||
39 | |||
40 | /* | ||
41 | * Macros to make the correct C datatypes for a new kind of ring. | ||
42 | * | ||
43 | * To make a new ring datatype, you need to have two message structures, | ||
44 | * let's say struct request, and struct response already defined. | ||
45 | * | ||
46 | * In a header where you want the ring datatype declared, you then do: | ||
47 | * | ||
48 | * DEFINE_RING_TYPES(mytag, struct request, struct response); | ||
49 | * | ||
50 | * These expand out to give you a set of types, as you can see below. | ||
51 | * The most important of these are: | ||
52 | * | ||
53 | * struct mytag_sring - The shared ring. | ||
54 | * struct mytag_front_ring - The 'front' half of the ring. | ||
55 | * struct mytag_back_ring - The 'back' half of the ring. | ||
56 | * | ||
57 | * To initialize a ring in your code you need to know the location and size | ||
58 | * of the shared memory area (PAGE_SIZE, for instance). To initialise | ||
59 | * the front half: | ||
60 | * | ||
61 | * struct mytag_front_ring front_ring; | ||
62 | * SHARED_RING_INIT((struct mytag_sring *)shared_page); | ||
63 | * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, | ||
64 | * PAGE_SIZE); | ||
65 | * | ||
66 | * Initializing the back follows similarly (note that only the front | ||
67 | * initializes the shared ring): | ||
68 | * | ||
69 | * struct mytag_back_ring back_ring; | ||
70 | * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, | ||
71 | * PAGE_SIZE); | ||
72 | */ | ||
73 | |||
74 | #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ | ||
75 | \ | ||
76 | /* Shared ring entry */ \ | ||
77 | union __name##_sring_entry { \ | ||
78 | __req_t req; \ | ||
79 | __rsp_t rsp; \ | ||
80 | }; \ | ||
81 | \ | ||
82 | /* Shared ring page */ \ | ||
83 | struct __name##_sring { \ | ||
84 | RING_IDX req_prod, req_event; \ | ||
85 | RING_IDX rsp_prod, rsp_event; \ | ||
86 | uint8_t pad[48]; \ | ||
87 | union __name##_sring_entry ring[1]; /* variable-length */ \ | ||
88 | }; \ | ||
89 | \ | ||
90 | /* "Front" end's private variables */ \ | ||
91 | struct __name##_front_ring { \ | ||
92 | RING_IDX req_prod_pvt; \ | ||
93 | RING_IDX rsp_cons; \ | ||
94 | unsigned int nr_ents; \ | ||
95 | struct __name##_sring *sring; \ | ||
96 | }; \ | ||
97 | \ | ||
98 | /* "Back" end's private variables */ \ | ||
99 | struct __name##_back_ring { \ | ||
100 | RING_IDX rsp_prod_pvt; \ | ||
101 | RING_IDX req_cons; \ | ||
102 | unsigned int nr_ents; \ | ||
103 | struct __name##_sring *sring; \ | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * Macros for manipulating rings. | ||
108 | * | ||
109 | * FRONT_RING_whatever works on the "front end" of a ring: here | ||
110 | * requests are pushed on to the ring and responses taken off it. | ||
111 | * | ||
112 | * BACK_RING_whatever works on the "back end" of a ring: here | ||
113 | * requests are taken off the ring and responses put on. | ||
114 | * | ||
115 | * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. | ||
116 | * This is OK in 1-for-1 request-response situations where the | ||
117 | * requestor (front end) never has more than RING_SIZE()-1 | ||
118 | * outstanding requests. | ||
119 | */ | ||
120 | |||
121 | /* Initialising empty rings */ | ||
122 | #define SHARED_RING_INIT(_s) do { \ | ||
123 | (_s)->req_prod = (_s)->rsp_prod = 0; \ | ||
124 | (_s)->req_event = (_s)->rsp_event = 1; \ | ||
125 | memset((_s)->pad, 0, sizeof((_s)->pad)); \ | ||
126 | } while(0) | ||
127 | |||
128 | #define FRONT_RING_INIT(_r, _s, __size) do { \ | ||
129 | (_r)->req_prod_pvt = 0; \ | ||
130 | (_r)->rsp_cons = 0; \ | ||
131 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
132 | (_r)->sring = (_s); \ | ||
133 | } while (0) | ||
134 | |||
135 | #define BACK_RING_INIT(_r, _s, __size) do { \ | ||
136 | (_r)->rsp_prod_pvt = 0; \ | ||
137 | (_r)->req_cons = 0; \ | ||
138 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
139 | (_r)->sring = (_s); \ | ||
140 | } while (0) | ||
141 | |||
142 | /* Initialize to existing shared indexes -- for recovery */ | ||
143 | #define FRONT_RING_ATTACH(_r, _s, __size) do { \ | ||
144 | (_r)->sring = (_s); \ | ||
145 | (_r)->req_prod_pvt = (_s)->req_prod; \ | ||
146 | (_r)->rsp_cons = (_s)->rsp_prod; \ | ||
147 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
148 | } while (0) | ||
149 | |||
150 | #define BACK_RING_ATTACH(_r, _s, __size) do { \ | ||
151 | (_r)->sring = (_s); \ | ||
152 | (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ | ||
153 | (_r)->req_cons = (_s)->req_prod; \ | ||
154 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
155 | } while (0) | ||
156 | |||
157 | /* How big is this ring? */ | ||
158 | #define RING_SIZE(_r) \ | ||
159 | ((_r)->nr_ents) | ||
160 | |||
161 | /* Number of free requests (for use on front side only). */ | ||
162 | #define RING_FREE_REQUESTS(_r) \ | ||
163 | (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) | ||
164 | |||
165 | /* Test if there is an empty slot available on the front ring. | ||
166 | * (This is only meaningful from the front. ) | ||
167 | */ | ||
168 | #define RING_FULL(_r) \ | ||
169 | (RING_FREE_REQUESTS(_r) == 0) | ||
170 | |||
171 | /* Test if there are outstanding messages to be processed on a ring. */ | ||
172 | #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ | ||
173 | ((_r)->sring->rsp_prod - (_r)->rsp_cons) | ||
174 | |||
175 | #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ | ||
176 | ({ \ | ||
177 | unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ | ||
178 | unsigned int rsp = RING_SIZE(_r) - \ | ||
179 | ((_r)->req_cons - (_r)->rsp_prod_pvt); \ | ||
180 | req < rsp ? req : rsp; \ | ||
181 | }) | ||
182 | |||
183 | /* Direct access to individual ring elements, by index. */ | ||
184 | #define RING_GET_REQUEST(_r, _idx) \ | ||
185 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) | ||
186 | |||
187 | /* | ||
188 | * Get a local copy of a request. | ||
189 | * | ||
190 | * Use this in preference to RING_GET_REQUEST() so all processing is | ||
191 | * done on a local copy that cannot be modified by the other end. | ||
192 | * | ||
193 | * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this | ||
194 | * to be ineffective where _req is a struct which consists of only bitfields. | ||
195 | */ | ||
196 | #define RING_COPY_REQUEST(_r, _idx, _req) do { \ | ||
197 | /* Use volatile to force the copy into _req. */ \ | ||
198 | *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ | ||
199 | } while (0) | ||
200 | |||
201 | #define RING_GET_RESPONSE(_r, _idx) \ | ||
202 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) | ||
203 | |||
204 | /* Loop termination condition: Would the specified index overflow the ring? */ | ||
205 | #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ | ||
206 | (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) | ||
207 | |||
208 | /* Ill-behaved frontend determination: Can there be this many requests? */ | ||
209 | #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ | ||
210 | (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) | ||
211 | |||
212 | |||
213 | #define RING_PUSH_REQUESTS(_r) do { \ | ||
214 | virt_wmb(); /* back sees requests /before/ updated producer index */ \ | ||
215 | (_r)->sring->req_prod = (_r)->req_prod_pvt; \ | ||
216 | } while (0) | ||
217 | |||
218 | #define RING_PUSH_RESPONSES(_r) do { \ | ||
219 | virt_wmb(); /* front sees responses /before/ updated producer index */ \ | ||
220 | (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ | ||
221 | } while (0) | ||
222 | |||
223 | /* | ||
224 | * Notification hold-off (req_event and rsp_event): | ||
225 | * | ||
226 | * When queueing requests or responses on a shared ring, it may not always be | ||
227 | * necessary to notify the remote end. For example, if requests are in flight | ||
228 | * in a backend, the front may be able to queue further requests without | ||
229 | * notifying the back (if the back checks for new requests when it queues | ||
230 | * responses). | ||
231 | * | ||
232 | * When enqueuing requests or responses: | ||
233 | * | ||
234 | * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument | ||
235 | * is a boolean return value. True indicates that the receiver requires an | ||
236 | * asynchronous notification. | ||
237 | * | ||
238 | * After dequeuing requests or responses (before sleeping the connection): | ||
239 | * | ||
240 | * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). | ||
241 | * The second argument is a boolean return value. True indicates that there | ||
242 | * are pending messages on the ring (i.e., the connection should not be put | ||
243 | * to sleep). | ||
244 | * | ||
245 | * These macros will set the req_event/rsp_event field to trigger a | ||
246 | * notification on the very next message that is enqueued. If you want to | ||
247 | * create batches of work (i.e., only receive a notification after several | ||
248 | * messages have been enqueued) then you will need to create a customised | ||
249 | * version of the FINAL_CHECK macro in your own code, which sets the event | ||
250 | * field appropriately. | ||
251 | */ | ||
252 | |||
253 | #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ | ||
254 | RING_IDX __old = (_r)->sring->req_prod; \ | ||
255 | RING_IDX __new = (_r)->req_prod_pvt; \ | ||
256 | virt_wmb(); /* back sees requests /before/ updated producer index */ \ | ||
257 | (_r)->sring->req_prod = __new; \ | ||
258 | virt_mb(); /* back sees new requests /before/ we check req_event */ \ | ||
259 | (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ | ||
260 | (RING_IDX)(__new - __old)); \ | ||
261 | } while (0) | ||
262 | |||
263 | #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ | ||
264 | RING_IDX __old = (_r)->sring->rsp_prod; \ | ||
265 | RING_IDX __new = (_r)->rsp_prod_pvt; \ | ||
266 | virt_wmb(); /* front sees responses /before/ updated producer index */ \ | ||
267 | (_r)->sring->rsp_prod = __new; \ | ||
268 | virt_mb(); /* front sees new responses /before/ we check rsp_event */ \ | ||
269 | (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ | ||
270 | (RING_IDX)(__new - __old)); \ | ||
271 | } while (0) | ||
272 | |||
273 | #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ | ||
274 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ | ||
275 | if (_work_to_do) break; \ | ||
276 | (_r)->sring->req_event = (_r)->req_cons + 1; \ | ||
277 | virt_mb(); \ | ||
278 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ | ||
279 | } while (0) | ||
280 | |||
281 | #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ | ||
282 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ | ||
283 | if (_work_to_do) break; \ | ||
284 | (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ | ||
285 | virt_mb(); \ | ||
286 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ | ||
287 | } while (0) | ||
288 | |||
289 | |||
290 | /* | ||
291 | * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and | ||
292 | * functions to check if there is data on the ring, and to read and | ||
293 | * write to them. | ||
294 | * | ||
295 | * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but | ||
296 | * does not define the indexes page. As different protocols can have | ||
297 | * extensions to the basic format, this macro allow them to define their | ||
298 | * own struct. | ||
299 | * | ||
300 | * XEN_FLEX_RING_SIZE | ||
301 | * Convenience macro to calculate the size of one of the two rings | ||
302 | * from the overall order. | ||
303 | * | ||
304 | * $NAME_mask | ||
305 | * Function to apply the size mask to an index, to reduce the index | ||
306 | * within the range [0-size]. | ||
307 | * | ||
308 | * $NAME_read_packet | ||
309 | * Function to read data from the ring. The amount of data to read is | ||
310 | * specified by the "size" argument. | ||
311 | * | ||
312 | * $NAME_write_packet | ||
313 | * Function to write data to the ring. The amount of data to write is | ||
314 | * specified by the "size" argument. | ||
315 | * | ||
316 | * $NAME_get_ring_ptr | ||
317 | * Convenience function that returns a pointer to read/write to the | ||
318 | * ring at the right location. | ||
319 | * | ||
320 | * $NAME_data_intf | ||
321 | * Indexes page, shared between frontend and backend. It also | ||
322 | * contains the array of grant refs. | ||
323 | * | ||
324 | * $NAME_queued | ||
325 | * Function to calculate how many bytes are currently on the ring, | ||
326 | * ready to be read. It can also be used to calculate how much free | ||
327 | * space is currently on the ring (XEN_FLEX_RING_SIZE() - | ||
328 | * $NAME_queued()). | ||
329 | */ | ||
330 | |||
331 | #ifndef XEN_PAGE_SHIFT | ||
332 | /* The PAGE_SIZE for ring protocols and hypercall interfaces is always | ||
333 | * 4K, regardless of the architecture, and page granularity chosen by | ||
334 | * operating systems. | ||
335 | */ | ||
336 | #define XEN_PAGE_SHIFT 12 | ||
337 | #endif | ||
338 | #define XEN_FLEX_RING_SIZE(order) \ | ||
339 | (1UL << ((order) + XEN_PAGE_SHIFT - 1)) | ||
340 | |||
341 | #define DEFINE_XEN_FLEX_RING(name) \ | ||
342 | static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ | ||
343 | { \ | ||
344 | return idx & (ring_size - 1); \ | ||
345 | } \ | ||
346 | \ | ||
347 | static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ | ||
348 | RING_IDX idx, \ | ||
349 | RING_IDX ring_size) \ | ||
350 | { \ | ||
351 | return buf + name##_mask(idx, ring_size); \ | ||
352 | } \ | ||
353 | \ | ||
354 | static inline void name##_read_packet(void *opaque, \ | ||
355 | const unsigned char *buf, \ | ||
356 | size_t size, \ | ||
357 | RING_IDX masked_prod, \ | ||
358 | RING_IDX *masked_cons, \ | ||
359 | RING_IDX ring_size) \ | ||
360 | { \ | ||
361 | if (*masked_cons < masked_prod || \ | ||
362 | size <= ring_size - *masked_cons) { \ | ||
363 | memcpy(opaque, buf + *masked_cons, size); \ | ||
364 | } else { \ | ||
365 | memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ | ||
366 | memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ | ||
367 | size - (ring_size - *masked_cons)); \ | ||
368 | } \ | ||
369 | *masked_cons = name##_mask(*masked_cons + size, ring_size); \ | ||
370 | } \ | ||
371 | \ | ||
372 | static inline void name##_write_packet(unsigned char *buf, \ | ||
373 | const void *opaque, \ | ||
374 | size_t size, \ | ||
375 | RING_IDX *masked_prod, \ | ||
376 | RING_IDX masked_cons, \ | ||
377 | RING_IDX ring_size) \ | ||
378 | { \ | ||
379 | if (*masked_prod < masked_cons || \ | ||
380 | size <= ring_size - *masked_prod) { \ | ||
381 | memcpy(buf + *masked_prod, opaque, size); \ | ||
382 | } else { \ | ||
383 | memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ | ||
384 | memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ | ||
385 | size - (ring_size - *masked_prod)); \ | ||
386 | } \ | ||
387 | *masked_prod = name##_mask(*masked_prod + size, ring_size); \ | ||
388 | } \ | ||
389 | \ | ||
390 | static inline RING_IDX name##_queued(RING_IDX prod, \ | ||
391 | RING_IDX cons, \ | ||
392 | RING_IDX ring_size) \ | ||
393 | { \ | ||
394 | RING_IDX size; \ | ||
395 | \ | ||
396 | if (prod == cons) \ | ||
397 | return 0; \ | ||
398 | \ | ||
399 | prod = name##_mask(prod, ring_size); \ | ||
400 | cons = name##_mask(cons, ring_size); \ | ||
401 | \ | ||
402 | if (prod == cons) \ | ||
403 | return ring_size; \ | ||
404 | \ | ||
405 | if (prod > cons) \ | ||
406 | size = prod - cons; \ | ||
407 | else \ | ||
408 | size = ring_size - (cons - prod); \ | ||
409 | return size; \ | ||
410 | } \ | ||
411 | \ | ||
412 | struct name##_data { \ | ||
413 | unsigned char *in; /* half of the allocation */ \ | ||
414 | unsigned char *out; /* half of the allocation */ \ | ||
415 | } | ||
416 | |||
417 | #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ | ||
418 | struct name##_data_intf { \ | ||
419 | RING_IDX in_cons, in_prod; \ | ||
420 | \ | ||
421 | uint8_t pad1[56]; \ | ||
422 | \ | ||
423 | RING_IDX out_cons, out_prod; \ | ||
424 | \ | ||
425 | uint8_t pad2[56]; \ | ||
426 | \ | ||
427 | RING_IDX ring_order; \ | ||
428 | grant_ref_t ref[]; \ | ||
429 | }; \ | ||
430 | DEFINE_XEN_FLEX_RING(name) | ||
431 | |||
432 | #endif /* __XEN_PUBLIC_IO_RING_H__ */ | ||
433 |
include/xen/interface/platform.h
File was created | 1 | /****************************************************************************** | |
2 | * platform.h | ||
3 | * | ||
4 | * Hardware platform operations. Intended for use by domain-0 kernel. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright (c) 2002-2006, K Fraser | ||
25 | */ | ||
26 | |||
27 | #ifndef __XEN_PUBLIC_PLATFORM_H__ | ||
28 | #define __XEN_PUBLIC_PLATFORM_H__ | ||
29 | |||
30 | #include <xen/interface/xen.h> | ||
31 | |||
32 | #define XENPF_INTERFACE_VERSION 0x03000001 | ||
33 | |||
34 | /* | ||
35 | * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC, | ||
36 | * 1 January, 1970 if the current system time was <system_time>. | ||
37 | */ | ||
38 | #define XENPF_settime32 17 | ||
39 | struct xenpf_settime32 { | ||
40 | /* IN variables. */ | ||
41 | uint32_t secs; | ||
42 | uint32_t nsecs; | ||
43 | uint64_t system_time; | ||
44 | }; | ||
45 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime32_t); | ||
46 | #define XENPF_settime64 62 | ||
47 | struct xenpf_settime64 { | ||
48 | /* IN variables. */ | ||
49 | uint64_t secs; | ||
50 | uint32_t nsecs; | ||
51 | uint32_t mbz; | ||
52 | uint64_t system_time; | ||
53 | }; | ||
54 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime64_t); | ||
55 | |||
56 | /* | ||
57 | * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. | ||
58 | * On x86, @type is an architecture-defined MTRR memory type. | ||
59 | * On success, returns the MTRR that was used (@reg) and a handle that can | ||
60 | * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. | ||
61 | * (x86-specific). | ||
62 | */ | ||
63 | #define XENPF_add_memtype 31 | ||
64 | struct xenpf_add_memtype { | ||
65 | /* IN variables. */ | ||
66 | xen_pfn_t mfn; | ||
67 | uint64_t nr_mfns; | ||
68 | uint32_t type; | ||
69 | /* OUT variables. */ | ||
70 | uint32_t handle; | ||
71 | uint32_t reg; | ||
72 | }; | ||
73 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_add_memtype_t); | ||
74 | |||
75 | /* | ||
76 | * Tear down an existing memory-range type. If @handle is remembered then it | ||
77 | * should be passed in to accurately tear down the correct setting (in case | ||
78 | * of overlapping memory regions with differing types). If it is not known | ||
79 | * then @handle should be set to zero. In all cases @reg must be set. | ||
80 | * (x86-specific). | ||
81 | */ | ||
82 | #define XENPF_del_memtype 32 | ||
83 | struct xenpf_del_memtype { | ||
84 | /* IN variables. */ | ||
85 | uint32_t handle; | ||
86 | uint32_t reg; | ||
87 | }; | ||
88 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_del_memtype_t); | ||
89 | |||
90 | /* Read current type of an MTRR (x86-specific). */ | ||
91 | #define XENPF_read_memtype 33 | ||
92 | struct xenpf_read_memtype { | ||
93 | /* IN variables. */ | ||
94 | uint32_t reg; | ||
95 | /* OUT variables. */ | ||
96 | xen_pfn_t mfn; | ||
97 | uint64_t nr_mfns; | ||
98 | uint32_t type; | ||
99 | }; | ||
100 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_read_memtype_t); | ||
101 | |||
102 | #define XENPF_microcode_update 35 | ||
103 | struct xenpf_microcode_update { | ||
104 | /* IN variables. */ | ||
105 | GUEST_HANDLE(void) data; /* Pointer to microcode data */ | ||
106 | uint32_t length; /* Length of microcode data. */ | ||
107 | }; | ||
108 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_microcode_update_t); | ||
109 | |||
110 | #define XENPF_platform_quirk 39 | ||
111 | #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ | ||
112 | #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ | ||
113 | #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ | ||
114 | struct xenpf_platform_quirk { | ||
115 | /* IN variables. */ | ||
116 | uint32_t quirk_id; | ||
117 | }; | ||
118 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_platform_quirk_t); | ||
119 | |||
120 | #define XENPF_efi_runtime_call 49 | ||
121 | #define XEN_EFI_get_time 1 | ||
122 | #define XEN_EFI_set_time 2 | ||
123 | #define XEN_EFI_get_wakeup_time 3 | ||
124 | #define XEN_EFI_set_wakeup_time 4 | ||
125 | #define XEN_EFI_get_next_high_monotonic_count 5 | ||
126 | #define XEN_EFI_get_variable 6 | ||
127 | #define XEN_EFI_set_variable 7 | ||
128 | #define XEN_EFI_get_next_variable_name 8 | ||
129 | #define XEN_EFI_query_variable_info 9 | ||
130 | #define XEN_EFI_query_capsule_capabilities 10 | ||
131 | #define XEN_EFI_update_capsule 11 | ||
132 | |||
133 | struct xenpf_efi_runtime_call { | ||
134 | uint32_t function; | ||
135 | /* | ||
136 | * This field is generally used for per sub-function flags (defined | ||
137 | * below), except for the XEN_EFI_get_next_high_monotonic_count case, | ||
138 | * where it holds the single returned value. | ||
139 | */ | ||
140 | uint32_t misc; | ||
141 | xen_ulong_t status; | ||
142 | union { | ||
143 | #define XEN_EFI_GET_TIME_SET_CLEARS_NS 0x00000001 | ||
144 | struct { | ||
145 | struct xenpf_efi_time { | ||
146 | uint16_t year; | ||
147 | uint8_t month; | ||
148 | uint8_t day; | ||
149 | uint8_t hour; | ||
150 | uint8_t min; | ||
151 | uint8_t sec; | ||
152 | uint32_t ns; | ||
153 | int16_t tz; | ||
154 | uint8_t daylight; | ||
155 | } time; | ||
156 | uint32_t resolution; | ||
157 | uint32_t accuracy; | ||
158 | } get_time; | ||
159 | |||
160 | struct xenpf_efi_time set_time; | ||
161 | |||
162 | #define XEN_EFI_GET_WAKEUP_TIME_ENABLED 0x00000001 | ||
163 | #define XEN_EFI_GET_WAKEUP_TIME_PENDING 0x00000002 | ||
164 | struct xenpf_efi_time get_wakeup_time; | ||
165 | |||
166 | #define XEN_EFI_SET_WAKEUP_TIME_ENABLE 0x00000001 | ||
167 | #define XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY 0x00000002 | ||
168 | struct xenpf_efi_time set_wakeup_time; | ||
169 | |||
170 | #define XEN_EFI_VARIABLE_NON_VOLATILE 0x00000001 | ||
171 | #define XEN_EFI_VARIABLE_BOOTSERVICE_ACCESS 0x00000002 | ||
172 | #define XEN_EFI_VARIABLE_RUNTIME_ACCESS 0x00000004 | ||
173 | struct { | ||
174 | GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */ | ||
175 | xen_ulong_t size; | ||
176 | GUEST_HANDLE(void) data; | ||
177 | struct xenpf_efi_guid { | ||
178 | uint32_t data1; | ||
179 | uint16_t data2; | ||
180 | uint16_t data3; | ||
181 | uint8_t data4[8]; | ||
182 | } vendor_guid; | ||
183 | } get_variable, set_variable; | ||
184 | |||
185 | struct { | ||
186 | xen_ulong_t size; | ||
187 | GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */ | ||
188 | struct xenpf_efi_guid vendor_guid; | ||
189 | } get_next_variable_name; | ||
190 | |||
191 | struct { | ||
192 | uint32_t attr; | ||
193 | uint64_t max_store_size; | ||
194 | uint64_t remain_store_size; | ||
195 | uint64_t max_size; | ||
196 | } query_variable_info; | ||
197 | |||
198 | struct { | ||
199 | GUEST_HANDLE(void) capsule_header_array; | ||
200 | xen_ulong_t capsule_count; | ||
201 | uint64_t max_capsule_size; | ||
202 | uint32_t reset_type; | ||
203 | } query_capsule_capabilities; | ||
204 | |||
205 | struct { | ||
206 | GUEST_HANDLE(void) capsule_header_array; | ||
207 | xen_ulong_t capsule_count; | ||
208 | uint64_t sg_list; /* machine address */ | ||
209 | } update_capsule; | ||
210 | } u; | ||
211 | }; | ||
212 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_efi_runtime_call); | ||
213 | |||
214 | #define XEN_FW_EFI_VERSION 0 | ||
215 | #define XEN_FW_EFI_CONFIG_TABLE 1 | ||
216 | #define XEN_FW_EFI_VENDOR 2 | ||
217 | #define XEN_FW_EFI_MEM_INFO 3 | ||
218 | #define XEN_FW_EFI_RT_VERSION 4 | ||
219 | |||
220 | #define XENPF_firmware_info 50 | ||
221 | #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ | ||
222 | #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ | ||
223 | #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ | ||
224 | #define XEN_FW_EFI_INFO 4 /* from EFI */ | ||
225 | #define XEN_FW_KBD_SHIFT_FLAGS 5 /* Int16, Fn02: Get keyboard shift flags. */ | ||
226 | |||
227 | struct xenpf_firmware_info { | ||
228 | /* IN variables. */ | ||
229 | uint32_t type; | ||
230 | uint32_t index; | ||
231 | /* OUT variables. */ | ||
232 | union { | ||
233 | struct { | ||
234 | /* Int13, Fn48: Check Extensions Present. */ | ||
235 | uint8_t device; /* %dl: bios device number */ | ||
236 | uint8_t version; /* %ah: major version */ | ||
237 | uint16_t interface_support; /* %cx: support bitmap */ | ||
238 | /* Int13, Fn08: Legacy Get Device Parameters. */ | ||
239 | uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ | ||
240 | uint8_t legacy_max_head; /* %dh: max head # */ | ||
241 | uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ | ||
242 | /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ | ||
243 | /* NB. First uint16_t of buffer must be set to buffer size. */ | ||
244 | GUEST_HANDLE(void) edd_params; | ||
245 | } disk_info; /* XEN_FW_DISK_INFO */ | ||
246 | struct { | ||
247 | uint8_t device; /* bios device number */ | ||
248 | uint32_t mbr_signature; /* offset 0x1b8 in mbr */ | ||
249 | } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ | ||
250 | struct { | ||
251 | /* Int10, AX=4F15: Get EDID info. */ | ||
252 | uint8_t capabilities; | ||
253 | uint8_t edid_transfer_time; | ||
254 | /* must refer to 128-byte buffer */ | ||
255 | GUEST_HANDLE(uchar) edid; | ||
256 | } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ | ||
257 | |||
258 | union xenpf_efi_info { | ||
259 | uint32_t version; | ||
260 | struct { | ||
261 | uint64_t addr; /* EFI_CONFIGURATION_TABLE */ | ||
262 | uint32_t nent; | ||
263 | } cfg; | ||
264 | struct { | ||
265 | uint32_t revision; | ||
266 | uint32_t bufsz; /* input, in bytes */ | ||
267 | GUEST_HANDLE(void) name; | ||
268 | /* UCS-2/UTF-16 string */ | ||
269 | } vendor; | ||
270 | struct { | ||
271 | uint64_t addr; | ||
272 | uint64_t size; | ||
273 | uint64_t attr; | ||
274 | uint32_t type; | ||
275 | } mem; | ||
276 | } efi_info; /* XEN_FW_EFI_INFO */ | ||
277 | |||
278 | uint8_t kbd_shift_flags; /* XEN_FW_KBD_SHIFT_FLAGS */ | ||
279 | } u; | ||
280 | }; | ||
281 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t); | ||
282 | |||
283 | #define XENPF_enter_acpi_sleep 51 | ||
284 | struct xenpf_enter_acpi_sleep { | ||
285 | /* IN variables */ | ||
286 | uint16_t val_a; /* PM1a control / sleep type A. */ | ||
287 | uint16_t val_b; /* PM1b control / sleep type B. */ | ||
288 | uint32_t sleep_state; /* Which state to enter (Sn). */ | ||
289 | #define XENPF_ACPI_SLEEP_EXTENDED 0x00000001 | ||
290 | uint32_t flags; /* XENPF_ACPI_SLEEP_*. */ | ||
291 | }; | ||
292 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t); | ||
293 | |||
294 | #define XENPF_change_freq 52 | ||
295 | struct xenpf_change_freq { | ||
296 | /* IN variables */ | ||
297 | uint32_t flags; /* Must be zero. */ | ||
298 | uint32_t cpu; /* Physical cpu. */ | ||
299 | uint64_t freq; /* New frequency (Hz). */ | ||
300 | }; | ||
301 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_change_freq_t); | ||
302 | |||
303 | /* | ||
304 | * Get idle times (nanoseconds since boot) for physical CPUs specified in the | ||
305 | * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is | ||
306 | * indexed by CPU number; only entries with the corresponding @cpumap_bitmap | ||
307 | * bit set are written to. On return, @cpumap_bitmap is modified so that any | ||
308 | * non-existent CPUs are cleared. Such CPUs have their @idletime array entry | ||
309 | * cleared. | ||
310 | */ | ||
311 | #define XENPF_getidletime 53 | ||
312 | struct xenpf_getidletime { | ||
313 | /* IN/OUT variables */ | ||
314 | /* IN: CPUs to interrogate; OUT: subset of IN which are present */ | ||
315 | GUEST_HANDLE(uchar) cpumap_bitmap; | ||
316 | /* IN variables */ | ||
317 | /* Size of cpumap bitmap. */ | ||
318 | uint32_t cpumap_nr_cpus; | ||
319 | /* Must be indexable for every cpu in cpumap_bitmap. */ | ||
320 | GUEST_HANDLE(uint64_t) idletime; | ||
321 | /* OUT variables */ | ||
322 | /* System time when the idletime snapshots were taken. */ | ||
323 | uint64_t now; | ||
324 | }; | ||
325 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_getidletime_t); | ||
326 | |||
327 | #define XENPF_set_processor_pminfo 54 | ||
328 | |||
329 | /* ability bits */ | ||
330 | #define XEN_PROCESSOR_PM_CX 1 | ||
331 | #define XEN_PROCESSOR_PM_PX 2 | ||
332 | #define XEN_PROCESSOR_PM_TX 4 | ||
333 | |||
334 | /* cmd type */ | ||
335 | #define XEN_PM_CX 0 | ||
336 | #define XEN_PM_PX 1 | ||
337 | #define XEN_PM_TX 2 | ||
338 | #define XEN_PM_PDC 3 | ||
339 | /* Px sub info type */ | ||
340 | #define XEN_PX_PCT 1 | ||
341 | #define XEN_PX_PSS 2 | ||
342 | #define XEN_PX_PPC 4 | ||
343 | #define XEN_PX_PSD 8 | ||
344 | |||
345 | struct xen_power_register { | ||
346 | uint32_t space_id; | ||
347 | uint32_t bit_width; | ||
348 | uint32_t bit_offset; | ||
349 | uint32_t access_size; | ||
350 | uint64_t address; | ||
351 | }; | ||
352 | |||
353 | struct xen_processor_csd { | ||
354 | uint32_t domain; /* domain number of one dependent group */ | ||
355 | uint32_t coord_type; /* coordination type */ | ||
356 | uint32_t num; /* number of processors in same domain */ | ||
357 | }; | ||
358 | DEFINE_GUEST_HANDLE_STRUCT(xen_processor_csd); | ||
359 | |||
360 | struct xen_processor_cx { | ||
361 | struct xen_power_register reg; /* GAS for Cx trigger register */ | ||
362 | uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ | ||
363 | uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ | ||
364 | uint32_t power; /* average power consumption(mW) */ | ||
365 | uint32_t dpcnt; /* number of dependency entries */ | ||
366 | GUEST_HANDLE(xen_processor_csd) dp; /* NULL if no dependency */ | ||
367 | }; | ||
368 | DEFINE_GUEST_HANDLE_STRUCT(xen_processor_cx); | ||
369 | |||
370 | struct xen_processor_flags { | ||
371 | uint32_t bm_control:1; | ||
372 | uint32_t bm_check:1; | ||
373 | uint32_t has_cst:1; | ||
374 | uint32_t power_setup_done:1; | ||
375 | uint32_t bm_rld_set:1; | ||
376 | }; | ||
377 | |||
378 | struct xen_processor_power { | ||
379 | uint32_t count; /* number of C state entries in array below */ | ||
380 | struct xen_processor_flags flags; /* global flags of this processor */ | ||
381 | GUEST_HANDLE(xen_processor_cx) states; /* supported c states */ | ||
382 | }; | ||
383 | |||
384 | struct xen_pct_register { | ||
385 | uint8_t descriptor; | ||
386 | uint16_t length; | ||
387 | uint8_t space_id; | ||
388 | uint8_t bit_width; | ||
389 | uint8_t bit_offset; | ||
390 | uint8_t reserved; | ||
391 | uint64_t address; | ||
392 | }; | ||
393 | |||
394 | struct xen_processor_px { | ||
395 | uint64_t core_frequency; /* megahertz */ | ||
396 | uint64_t power; /* milliWatts */ | ||
397 | uint64_t transition_latency; /* microseconds */ | ||
398 | uint64_t bus_master_latency; /* microseconds */ | ||
399 | uint64_t control; /* control value */ | ||
400 | uint64_t status; /* success indicator */ | ||
401 | }; | ||
402 | DEFINE_GUEST_HANDLE_STRUCT(xen_processor_px); | ||
403 | |||
404 | struct xen_psd_package { | ||
405 | uint64_t num_entries; | ||
406 | uint64_t revision; | ||
407 | uint64_t domain; | ||
408 | uint64_t coord_type; | ||
409 | uint64_t num_processors; | ||
410 | }; | ||
411 | |||
412 | struct xen_processor_performance { | ||
413 | uint32_t flags; /* flag for Px sub info type */ | ||
414 | uint32_t platform_limit; /* Platform limitation on freq usage */ | ||
415 | struct xen_pct_register control_register; | ||
416 | struct xen_pct_register status_register; | ||
417 | uint32_t state_count; /* total available performance states */ | ||
418 | GUEST_HANDLE(xen_processor_px) states; | ||
419 | struct xen_psd_package domain_info; | ||
420 | uint32_t shared_type; /* coordination type of this processor */ | ||
421 | }; | ||
422 | DEFINE_GUEST_HANDLE_STRUCT(xen_processor_performance); | ||
423 | |||
424 | struct xenpf_set_processor_pminfo { | ||
425 | /* IN variables */ | ||
426 | uint32_t id; /* ACPI CPU ID */ | ||
427 | uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ | ||
428 | union { | ||
429 | struct xen_processor_power power;/* Cx: _CST/_CSD */ | ||
430 | struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ | ||
431 | GUEST_HANDLE(uint32_t) pdc; | ||
432 | }; | ||
433 | }; | ||
434 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo); | ||
435 | |||
436 | #define XENPF_get_cpuinfo 55 | ||
437 | struct xenpf_pcpuinfo { | ||
438 | /* IN */ | ||
439 | uint32_t xen_cpuid; | ||
440 | /* OUT */ | ||
441 | /* The maxium cpu_id that is present */ | ||
442 | uint32_t max_present; | ||
443 | #define XEN_PCPU_FLAGS_ONLINE 1 | ||
444 | /* Correponding xen_cpuid is not present*/ | ||
445 | #define XEN_PCPU_FLAGS_INVALID 2 | ||
446 | uint32_t flags; | ||
447 | uint32_t apic_id; | ||
448 | uint32_t acpi_id; | ||
449 | }; | ||
450 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_pcpuinfo); | ||
451 | |||
452 | #define XENPF_cpu_online 56 | ||
453 | #define XENPF_cpu_offline 57 | ||
454 | struct xenpf_cpu_ol { | ||
455 | uint32_t cpuid; | ||
456 | }; | ||
457 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol); | ||
458 | |||
459 | #define XENPF_cpu_hotadd 58 | ||
460 | struct xenpf_cpu_hotadd { | ||
461 | uint32_t apic_id; | ||
462 | uint32_t acpi_id; | ||
463 | uint32_t pxm; | ||
464 | }; | ||
465 | |||
466 | #define XENPF_mem_hotadd 59 | ||
467 | struct xenpf_mem_hotadd { | ||
468 | uint64_t spfn; | ||
469 | uint64_t epfn; | ||
470 | uint32_t pxm; | ||
471 | uint32_t flags; | ||
472 | }; | ||
473 | |||
474 | #define XENPF_core_parking 60 | ||
475 | struct xenpf_core_parking { | ||
476 | /* IN variables */ | ||
477 | #define XEN_CORE_PARKING_SET 1 | ||
478 | #define XEN_CORE_PARKING_GET 2 | ||
479 | uint32_t type; | ||
480 | /* IN variables: set cpu nums expected to be idled */ | ||
481 | /* OUT variables: get cpu nums actually be idled */ | ||
482 | uint32_t idle_nums; | ||
483 | }; | ||
484 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking); | ||
485 | |||
486 | #define XENPF_get_symbol 63 | ||
487 | struct xenpf_symdata { | ||
488 | /* IN/OUT variables */ | ||
489 | uint32_t namelen; /* size of 'name' buffer */ | ||
490 | |||
491 | /* IN/OUT variables */ | ||
492 | uint32_t symnum; /* IN: Symbol to read */ | ||
493 | /* OUT: Next available symbol. If same as IN */ | ||
494 | /* then we reached the end */ | ||
495 | |||
496 | /* OUT variables */ | ||
497 | GUEST_HANDLE(char) name; | ||
498 | uint64_t address; | ||
499 | char type; | ||
500 | }; | ||
501 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata); | ||
502 | |||
503 | struct xen_platform_op { | ||
504 | uint32_t cmd; | ||
505 | uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ | ||
506 | union { | ||
507 | struct xenpf_settime32 settime32; | ||
508 | struct xenpf_settime64 settime64; | ||
509 | struct xenpf_add_memtype add_memtype; | ||
510 | struct xenpf_del_memtype del_memtype; | ||
511 | struct xenpf_read_memtype read_memtype; | ||
512 | struct xenpf_microcode_update microcode; | ||
513 | struct xenpf_platform_quirk platform_quirk; | ||
514 | struct xenpf_efi_runtime_call efi_runtime_call; | ||
515 | struct xenpf_firmware_info firmware_info; | ||
516 | struct xenpf_enter_acpi_sleep enter_acpi_sleep; | ||
517 | struct xenpf_change_freq change_freq; | ||
518 | struct xenpf_getidletime getidletime; | ||
519 | struct xenpf_set_processor_pminfo set_pminfo; | ||
520 | struct xenpf_pcpuinfo pcpu_info; | ||
521 | struct xenpf_cpu_ol cpu_ol; | ||
522 | struct xenpf_cpu_hotadd cpu_add; | ||
523 | struct xenpf_mem_hotadd mem_add; | ||
524 | struct xenpf_core_parking core_parking; | ||
525 | struct xenpf_symdata symdata; | ||
526 | uint8_t pad[128]; | ||
527 | } u; | ||
528 | }; | ||
529 | DEFINE_GUEST_HANDLE_STRUCT(xen_platform_op_t); | ||
530 | |||
531 | #endif /* __XEN_PUBLIC_PLATFORM_H__ */ | ||
532 |
include/xen/interface/sched.h
File was created | 1 | /****************************************************************************** | |
2 | * sched.h | ||
3 | * | ||
4 | * Scheduler state interactions | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> | ||
25 | */ | ||
26 | |||
27 | #ifndef __XEN_PUBLIC_SCHED_H__ | ||
28 | #define __XEN_PUBLIC_SCHED_H__ | ||
29 | |||
30 | #include <xen/interface/event_channel.h> | ||
31 | |||
32 | /* | ||
33 | * Guest Scheduler Operations | ||
34 | * | ||
35 | * The SCHEDOP interface provides mechanisms for a guest to interact | ||
36 | * with the scheduler, including yield, blocking and shutting itself | ||
37 | * down. | ||
38 | */ | ||
39 | |||
40 | /* | ||
41 | * The prototype for this hypercall is: | ||
42 | * long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...) | ||
43 | * | ||
44 | * @cmd == SCHEDOP_??? (scheduler operation). | ||
45 | * @arg == Operation-specific extra argument(s), as described below. | ||
46 | * ... == Additional Operation-specific extra arguments, described below. | ||
47 | * | ||
48 | * Versions of Xen prior to 3.0.2 provided only the following legacy version | ||
49 | * of this hypercall, supporting only the commands yield, block and shutdown: | ||
50 | * long sched_op(int cmd, unsigned long arg) | ||
51 | * @cmd == SCHEDOP_??? (scheduler operation). | ||
52 | * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) | ||
53 | * == SHUTDOWN_* code (SCHEDOP_shutdown) | ||
54 | * | ||
55 | * This legacy version is available to new guests as: | ||
56 | * long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg) | ||
57 | */ | ||
58 | |||
59 | /* | ||
60 | * Voluntarily yield the CPU. | ||
61 | * @arg == NULL. | ||
62 | */ | ||
63 | #define SCHEDOP_yield 0 | ||
64 | |||
65 | /* | ||
66 | * Block execution of this VCPU until an event is received for processing. | ||
67 | * If called with event upcalls masked, this operation will atomically | ||
68 | * reenable event delivery and check for pending events before blocking the | ||
69 | * VCPU. This avoids a "wakeup waiting" race. | ||
70 | * @arg == NULL. | ||
71 | */ | ||
72 | #define SCHEDOP_block 1 | ||
73 | |||
74 | /* | ||
75 | * Halt execution of this domain (all VCPUs) and notify the system controller. | ||
76 | * @arg == pointer to sched_shutdown structure. | ||
77 | * | ||
78 | * If the sched_shutdown_t reason is SHUTDOWN_suspend then | ||
79 | * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN | ||
80 | * of the guest's start info page. RDX/EDX is the third hypercall | ||
81 | * argument. | ||
82 | * | ||
83 | * In addition, which reason is SHUTDOWN_suspend this hypercall | ||
84 | * returns 1 if suspend was cancelled or the domain was merely | ||
85 | * checkpointed, and 0 if it is resuming in a new domain. | ||
86 | */ | ||
87 | #define SCHEDOP_shutdown 2 | ||
88 | |||
89 | /* | ||
90 | * Poll a set of event-channel ports. Return when one or more are pending. An | ||
91 | * optional timeout may be specified. | ||
92 | * @arg == pointer to sched_poll structure. | ||
93 | */ | ||
94 | #define SCHEDOP_poll 3 | ||
95 | |||
96 | /* | ||
97 | * Declare a shutdown for another domain. The main use of this function is | ||
98 | * in interpreting shutdown requests and reasons for fully-virtualized | ||
99 | * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. | ||
100 | * @arg == pointer to sched_remote_shutdown structure. | ||
101 | */ | ||
102 | #define SCHEDOP_remote_shutdown 4 | ||
103 | |||
104 | /* | ||
105 | * Latch a shutdown code, so that when the domain later shuts down it | ||
106 | * reports this code to the control tools. | ||
107 | * @arg == sched_shutdown, as for SCHEDOP_shutdown. | ||
108 | */ | ||
109 | #define SCHEDOP_shutdown_code 5 | ||
110 | |||
111 | /* | ||
112 | * Setup, poke and destroy a domain watchdog timer. | ||
113 | * @arg == pointer to sched_watchdog structure. | ||
114 | * With id == 0, setup a domain watchdog timer to cause domain shutdown | ||
115 | * after timeout, returns watchdog id. | ||
116 | * With id != 0 and timeout == 0, destroy domain watchdog timer. | ||
117 | * With id != 0 and timeout != 0, poke watchdog timer and set new timeout. | ||
118 | */ | ||
119 | #define SCHEDOP_watchdog 6 | ||
120 | |||
121 | /* | ||
122 | * Override the current vcpu affinity by pinning it to one physical cpu or | ||
123 | * undo this override restoring the previous affinity. | ||
124 | * @arg == pointer to sched_pin_override structure. | ||
125 | * | ||
126 | * A negative pcpu value will undo a previous pin override and restore the | ||
127 | * previous cpu affinity. | ||
128 | * This call is allowed for the hardware domain only and requires the cpu | ||
129 | * to be part of the domain's cpupool. | ||
130 | */ | ||
131 | #define SCHEDOP_pin_override 7 | ||
132 | |||
133 | struct sched_shutdown { | ||
134 | unsigned int reason; /* SHUTDOWN_* => shutdown reason */ | ||
135 | }; | ||
136 | DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); | ||
137 | |||
138 | struct sched_poll { | ||
139 | GUEST_HANDLE(evtchn_port_t) ports; | ||
140 | unsigned int nr_ports; | ||
141 | uint64_t timeout; | ||
142 | }; | ||
143 | DEFINE_GUEST_HANDLE_STRUCT(sched_poll); | ||
144 | |||
145 | struct sched_remote_shutdown { | ||
146 | domid_t domain_id; /* Remote domain ID */ | ||
147 | unsigned int reason; /* SHUTDOWN_* => shutdown reason */ | ||
148 | }; | ||
149 | DEFINE_GUEST_HANDLE_STRUCT(sched_remote_shutdown); | ||
150 | |||
151 | struct sched_watchdog { | ||
152 | uint32_t id; /* watchdog ID */ | ||
153 | uint32_t timeout; /* timeout */ | ||
154 | }; | ||
155 | DEFINE_GUEST_HANDLE_STRUCT(sched_watchdog); | ||
156 | |||
157 | struct sched_pin_override { | ||
158 | int32_t pcpu; | ||
159 | }; | ||
160 | DEFINE_GUEST_HANDLE_STRUCT(sched_pin_override); | ||
161 | |||
162 | /* | ||
163 | * Reason codes for SCHEDOP_shutdown. These may be interpreted by control | ||
164 | * software to determine the appropriate action. For the most part, Xen does | ||
165 | * not care about the shutdown code. | ||
166 | */ | ||
167 | #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ | ||
168 | #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ | ||
169 | #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ | ||
170 | #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ | ||
171 | #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ | ||
172 | |||
173 | /* | ||
174 | * Domain asked to perform 'soft reset' for it. The expected behavior is to | ||
175 | * reset internal Xen state for the domain returning it to the point where it | ||
176 | * was created but leaving the domain's memory contents and vCPU contexts | ||
177 | * intact. This will allow the domain to start over and set up all Xen specific | ||
178 | * interfaces again. | ||
179 | */ | ||
180 | #define SHUTDOWN_soft_reset 5 | ||
181 | #define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */ | ||
182 | |||
183 | #endif /* __XEN_PUBLIC_SCHED_H__ */ | ||
184 |
include/xen/interface/xen.h
File was created | 1 | /****************************************************************************** | |
2 | * xen.h | ||
3 | * | ||
4 | * Guest OS interface to Xen. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright (c) 2004, K A Fraser | ||
25 | */ | ||
26 | |||
27 | #ifndef __XEN_PUBLIC_XEN_H__ | ||
28 | #define __XEN_PUBLIC_XEN_H__ | ||
29 | |||
30 | #include <asm/xen/interface.h> | ||
31 | |||
32 | /* | ||
33 | * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. | ||
38 | * EAX = return value | ||
39 | * (argument registers may be clobbered on return) | ||
40 | * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. | ||
41 | * RAX = return value | ||
42 | * (argument registers not clobbered on return; RCX, R11 are) | ||
43 | */ | ||
44 | #define __HYPERVISOR_set_trap_table 0 | ||
45 | #define __HYPERVISOR_mmu_update 1 | ||
46 | #define __HYPERVISOR_set_gdt 2 | ||
47 | #define __HYPERVISOR_stack_switch 3 | ||
48 | #define __HYPERVISOR_set_callbacks 4 | ||
49 | #define __HYPERVISOR_fpu_taskswitch 5 | ||
50 | #define __HYPERVISOR_sched_op_compat 6 | ||
51 | #define __HYPERVISOR_platform_op 7 | ||
52 | #define __HYPERVISOR_set_debugreg 8 | ||
53 | #define __HYPERVISOR_get_debugreg 9 | ||
54 | #define __HYPERVISOR_update_descriptor 10 | ||
55 | #define __HYPERVISOR_memory_op 12 | ||
56 | #define __HYPERVISOR_multicall 13 | ||
57 | #define __HYPERVISOR_update_va_mapping 14 | ||
58 | #define __HYPERVISOR_set_timer_op 15 | ||
59 | #define __HYPERVISOR_event_channel_op_compat 16 | ||
60 | #define __HYPERVISOR_xen_version 17 | ||
61 | #define __HYPERVISOR_console_io 18 | ||
62 | #define __HYPERVISOR_physdev_op_compat 19 | ||
63 | #define __HYPERVISOR_grant_table_op 20 | ||
64 | #define __HYPERVISOR_vm_assist 21 | ||
65 | #define __HYPERVISOR_update_va_mapping_otherdomain 22 | ||
66 | #define __HYPERVISOR_iret 23 /* x86 only */ | ||
67 | #define __HYPERVISOR_vcpu_op 24 | ||
68 | #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ | ||
69 | #define __HYPERVISOR_mmuext_op 26 | ||
70 | #define __HYPERVISOR_xsm_op 27 | ||
71 | #define __HYPERVISOR_nmi_op 28 | ||
72 | #define __HYPERVISOR_sched_op 29 | ||
73 | #define __HYPERVISOR_callback_op 30 | ||
74 | #define __HYPERVISOR_xenoprof_op 31 | ||
75 | #define __HYPERVISOR_event_channel_op 32 | ||
76 | #define __HYPERVISOR_physdev_op 33 | ||
77 | #define __HYPERVISOR_hvm_op 34 | ||
78 | #define __HYPERVISOR_sysctl 35 | ||
79 | #define __HYPERVISOR_domctl 36 | ||
80 | #define __HYPERVISOR_kexec_op 37 | ||
81 | #define __HYPERVISOR_tmem_op 38 | ||
82 | #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ | ||
83 | #define __HYPERVISOR_xenpmu_op 40 | ||
84 | #define __HYPERVISOR_dm_op 41 | ||
85 | |||
86 | /* Architecture-specific hypercall definitions. */ | ||
87 | #define __HYPERVISOR_arch_0 48 | ||
88 | #define __HYPERVISOR_arch_1 49 | ||
89 | #define __HYPERVISOR_arch_2 50 | ||
90 | #define __HYPERVISOR_arch_3 51 | ||
91 | #define __HYPERVISOR_arch_4 52 | ||
92 | #define __HYPERVISOR_arch_5 53 | ||
93 | #define __HYPERVISOR_arch_6 54 | ||
94 | #define __HYPERVISOR_arch_7 55 | ||
95 | |||
96 | /* | ||
97 | * VIRTUAL INTERRUPTS | ||
98 | * | ||
99 | * Virtual interrupts that a guest OS may receive from Xen. | ||
100 | * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a | ||
101 | * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. | ||
102 | * The latter can be allocated only once per guest: they must initially be | ||
103 | * allocated to VCPU0 but can subsequently be re-bound. | ||
104 | */ | ||
105 | #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ | ||
106 | #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ | ||
107 | #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ | ||
108 | #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ | ||
109 | #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ | ||
110 | #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ | ||
111 | #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ | ||
112 | #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ | ||
113 | #define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ | ||
114 | #define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ | ||
115 | #define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */ | ||
116 | #define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */ | ||
117 | #define VIRQ_XENPMU 13 /* PMC interrupt */ | ||
118 | |||
119 | /* Architecture-specific VIRQ definitions. */ | ||
120 | #define VIRQ_ARCH_0 16 | ||
121 | #define VIRQ_ARCH_1 17 | ||
122 | #define VIRQ_ARCH_2 18 | ||
123 | #define VIRQ_ARCH_3 19 | ||
124 | #define VIRQ_ARCH_4 20 | ||
125 | #define VIRQ_ARCH_5 21 | ||
126 | #define VIRQ_ARCH_6 22 | ||
127 | #define VIRQ_ARCH_7 23 | ||
128 | |||
129 | #define NR_VIRQS 24 | ||
130 | |||
131 | /* | ||
132 | * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[], | ||
133 | * unsigned count, unsigned *done_out, | ||
134 | * unsigned foreigndom) | ||
135 | * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). | ||
136 | * @count is the length of the above array. | ||
137 | * @pdone is an output parameter indicating number of completed operations | ||
138 | * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this | ||
139 | * hypercall invocation. Can be DOMID_SELF. | ||
140 | * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced | ||
141 | * in this hypercall invocation. The value of this field | ||
142 | * (x) encodes the PFD as follows: | ||
143 | * x == 0 => PFD == DOMID_SELF | ||
144 | * x != 0 => PFD == x - 1 | ||
145 | * | ||
146 | * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. | ||
147 | * ------------- | ||
148 | * ptr[1:0] == MMU_NORMAL_PT_UPDATE: | ||
149 | * Updates an entry in a page table belonging to PFD. If updating an L1 table, | ||
150 | * and the new table entry is valid/present, the mapped frame must belong to | ||
151 | * FD. If attempting to map an I/O page then the caller assumes the privilege | ||
152 | * of the FD. | ||
153 | * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. | ||
154 | * FD == DOMID_XEN: Map restricted areas of Xen's heap space. | ||
155 | * ptr[:2] -- Machine address of the page-table entry to modify. | ||
156 | * val -- Value to write. | ||
157 | * | ||
158 | * There also certain implicit requirements when using this hypercall. The | ||
159 | * pages that make up a pagetable must be mapped read-only in the guest. | ||
160 | * This prevents uncontrolled guest updates to the pagetable. Xen strictly | ||
161 | * enforces this, and will disallow any pagetable update which will end up | ||
162 | * mapping pagetable page RW, and will disallow using any writable page as a | ||
163 | * pagetable. In practice it means that when constructing a page table for a | ||
164 | * process, thread, etc, we MUST be very dilligient in following these rules: | ||
165 | * 1). Start with top-level page (PGD or in Xen language: L4). Fill out | ||
166 | * the entries. | ||
167 | * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD | ||
168 | * or L2). | ||
169 | * 3). Start filling out the PTE table (L1) with the PTE entries. Once | ||
170 | * done, make sure to set each of those entries to RO (so writeable bit | ||
171 | * is unset). Once that has been completed, set the PMD (L2) for this | ||
172 | * PTE table as RO. | ||
173 | * 4). When completed with all of the PMD (L2) entries, and all of them have | ||
174 | * been set to RO, make sure to set RO the PUD (L3). Do the same | ||
175 | * operation on PGD (L4) pagetable entries that have a PUD (L3) entry. | ||
176 | * 5). Now before you can use those pages (so setting the cr3), you MUST also | ||
177 | * pin them so that the hypervisor can verify the entries. This is done | ||
178 | * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame | ||
179 | * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op( | ||
180 | * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be | ||
181 | * issued. | ||
182 | * For 32-bit guests, the L4 is not used (as there is less pagetables), so | ||
183 | * instead use L3. | ||
184 | * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE | ||
185 | * hypercall. Also if so desired the OS can also try to write to the PTE | ||
186 | * and be trapped by the hypervisor (as the PTE entry is RO). | ||
187 | * | ||
188 | * To deallocate the pages, the operations are the reverse of the steps | ||
189 | * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the | ||
190 | * pagetable MUST not be in use (meaning that the cr3 is not set to it). | ||
191 | * | ||
192 | * ptr[1:0] == MMU_MACHPHYS_UPDATE: | ||
193 | * Updates an entry in the machine->pseudo-physical mapping table. | ||
194 | * ptr[:2] -- Machine address within the frame whose mapping to modify. | ||
195 | * The frame must belong to the FD, if one is specified. | ||
196 | * val -- Value to write into the mapping entry. | ||
197 | * | ||
198 | * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: | ||
199 | * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed | ||
200 | * with those in @val. | ||
201 | * | ||
202 | * @val is usually the machine frame number along with some attributes. | ||
203 | * The attributes by default follow the architecture defined bits. Meaning that | ||
204 | * if this is a X86_64 machine and four page table layout is used, the layout | ||
205 | * of val is: | ||
206 | * - 63 if set means No execute (NX) | ||
207 | * - 46-13 the machine frame number | ||
208 | * - 12 available for guest | ||
209 | * - 11 available for guest | ||
210 | * - 10 available for guest | ||
211 | * - 9 available for guest | ||
212 | * - 8 global | ||
213 | * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages) | ||
214 | * - 6 dirty | ||
215 | * - 5 accessed | ||
216 | * - 4 page cached disabled | ||
217 | * - 3 page write through | ||
218 | * - 2 userspace accessible | ||
219 | * - 1 writeable | ||
220 | * - 0 present | ||
221 | * | ||
222 | * The one bits that does not fit with the default layout is the PAGE_PSE | ||
223 | * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the | ||
224 | * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB | ||
225 | * (or 2MB) instead of using the PAGE_PSE bit. | ||
226 | * | ||
227 | * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen | ||
228 | * using it as the Page Attribute Table (PAT) bit - for details on it please | ||
229 | * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of | ||
230 | * pages instead of using MTRRs. | ||
231 | * | ||
232 | * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits): | ||
233 | * PAT4 PAT0 | ||
234 | * +-----+-----+----+----+----+-----+----+----+ | ||
235 | * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux | ||
236 | * +-----+-----+----+----+----+-----+----+----+ | ||
237 | * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots) | ||
238 | * +-----+-----+----+----+----+-----+----+----+ | ||
239 | * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen | ||
240 | * +-----+-----+----+----+----+-----+----+----+ | ||
241 | * | ||
242 | * The lookup of this index table translates to looking up | ||
243 | * Bit 7, Bit 4, and Bit 3 of val entry: | ||
244 | * | ||
245 | * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3). | ||
246 | * | ||
247 | * If all bits are off, then we are using PAT0. If bit 3 turned on, | ||
248 | * then we are using PAT1, if bit 3 and bit 4, then PAT2.. | ||
249 | * | ||
250 | * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means | ||
251 | * that if a guest that follows Linux's PAT setup and would like to set Write | ||
252 | * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is | ||
253 | * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the | ||
254 | * caching as: | ||
255 | * | ||
256 | * WB = none (so PAT0) | ||
257 | * WC = PWT (bit 3 on) | ||
258 | * UC = PWT | PCD (bit 3 and 4 are on). | ||
259 | * | ||
260 | * To make it work with Xen, it needs to translate the WC bit as so: | ||
261 | * | ||
262 | * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3 | ||
263 | * | ||
264 | * And to translate back it would: | ||
265 | * | ||
266 | * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7. | ||
267 | */ | ||
268 | #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ | ||
269 | #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ | ||
270 | #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ | ||
271 | #define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */ | ||
272 | |||
273 | /* | ||
274 | * MMU EXTENDED OPERATIONS | ||
275 | * | ||
276 | * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[], | ||
277 | * unsigned int count, | ||
278 | * unsigned int *pdone, | ||
279 | * unsigned int foreigndom) | ||
280 | */ | ||
281 | /* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. | ||
282 | * A foreigndom (FD) can be specified (or DOMID_SELF for none). | ||
283 | * Where the FD has some effect, it is described below. | ||
284 | * | ||
285 | * cmd: MMUEXT_(UN)PIN_*_TABLE | ||
286 | * mfn: Machine frame number to be (un)pinned as a p.t. page. | ||
287 | * The frame must belong to the FD, if one is specified. | ||
288 | * | ||
289 | * cmd: MMUEXT_NEW_BASEPTR | ||
290 | * mfn: Machine frame number of new page-table base to install in MMU. | ||
291 | * | ||
292 | * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] | ||
293 | * mfn: Machine frame number of new page-table base to install in MMU | ||
294 | * when in user space. | ||
295 | * | ||
296 | * cmd: MMUEXT_TLB_FLUSH_LOCAL | ||
297 | * No additional arguments. Flushes local TLB. | ||
298 | * | ||
299 | * cmd: MMUEXT_INVLPG_LOCAL | ||
300 | * linear_addr: Linear address to be flushed from the local TLB. | ||
301 | * | ||
302 | * cmd: MMUEXT_TLB_FLUSH_MULTI | ||
303 | * vcpumask: Pointer to bitmap of VCPUs to be flushed. | ||
304 | * | ||
305 | * cmd: MMUEXT_INVLPG_MULTI | ||
306 | * linear_addr: Linear address to be flushed. | ||
307 | * vcpumask: Pointer to bitmap of VCPUs to be flushed. | ||
308 | * | ||
309 | * cmd: MMUEXT_TLB_FLUSH_ALL | ||
310 | * No additional arguments. Flushes all VCPUs' TLBs. | ||
311 | * | ||
312 | * cmd: MMUEXT_INVLPG_ALL | ||
313 | * linear_addr: Linear address to be flushed from all VCPUs' TLBs. | ||
314 | * | ||
315 | * cmd: MMUEXT_FLUSH_CACHE | ||
316 | * No additional arguments. Writes back and flushes cache contents. | ||
317 | * | ||
318 | * cmd: MMUEXT_FLUSH_CACHE_GLOBAL | ||
319 | * No additional arguments. Writes back and flushes cache contents | ||
320 | * on all CPUs in the system. | ||
321 | * | ||
322 | * cmd: MMUEXT_SET_LDT | ||
323 | * linear_addr: Linear address of LDT base (NB. must be page-aligned). | ||
324 | * nr_ents: Number of entries in LDT. | ||
325 | * | ||
326 | * cmd: MMUEXT_CLEAR_PAGE | ||
327 | * mfn: Machine frame number to be cleared. | ||
328 | * | ||
329 | * cmd: MMUEXT_COPY_PAGE | ||
330 | * mfn: Machine frame number of the destination page. | ||
331 | * src_mfn: Machine frame number of the source page. | ||
332 | * | ||
333 | * cmd: MMUEXT_[UN]MARK_SUPER | ||
334 | * mfn: Machine frame number of head of superpage to be [un]marked. | ||
335 | */ | ||
336 | #define MMUEXT_PIN_L1_TABLE 0 | ||
337 | #define MMUEXT_PIN_L2_TABLE 1 | ||
338 | #define MMUEXT_PIN_L3_TABLE 2 | ||
339 | #define MMUEXT_PIN_L4_TABLE 3 | ||
340 | #define MMUEXT_UNPIN_TABLE 4 | ||
341 | #define MMUEXT_NEW_BASEPTR 5 | ||
342 | #define MMUEXT_TLB_FLUSH_LOCAL 6 | ||
343 | #define MMUEXT_INVLPG_LOCAL 7 | ||
344 | #define MMUEXT_TLB_FLUSH_MULTI 8 | ||
345 | #define MMUEXT_INVLPG_MULTI 9 | ||
346 | #define MMUEXT_TLB_FLUSH_ALL 10 | ||
347 | #define MMUEXT_INVLPG_ALL 11 | ||
348 | #define MMUEXT_FLUSH_CACHE 12 | ||
349 | #define MMUEXT_SET_LDT 13 | ||
350 | #define MMUEXT_NEW_USER_BASEPTR 15 | ||
351 | #define MMUEXT_CLEAR_PAGE 16 | ||
352 | #define MMUEXT_COPY_PAGE 17 | ||
353 | #define MMUEXT_FLUSH_CACHE_GLOBAL 18 | ||
354 | #define MMUEXT_MARK_SUPER 19 | ||
355 | #define MMUEXT_UNMARK_SUPER 20 | ||
356 | |||
357 | #ifndef __ASSEMBLY__ | ||
358 | struct mmuext_op { | ||
359 | unsigned int cmd; | ||
360 | union { | ||
361 | /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR | ||
362 | * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */ | ||
363 | xen_pfn_t mfn; | ||
364 | /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ | ||
365 | unsigned long linear_addr; | ||
366 | } arg1; | ||
367 | union { | ||
368 | /* SET_LDT */ | ||
369 | unsigned int nr_ents; | ||
370 | /* TLB_FLUSH_MULTI, INVLPG_MULTI */ | ||
371 | void *vcpumask; | ||
372 | /* COPY_PAGE */ | ||
373 | xen_pfn_t src_mfn; | ||
374 | } arg2; | ||
375 | }; | ||
376 | DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); | ||
377 | #endif | ||
378 | |||
379 | /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ | ||
380 | /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ | ||
381 | /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ | ||
382 | #define UVMF_NONE (0UL<<0) /* No flushing at all. */ | ||
383 | #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ | ||
384 | #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ | ||
385 | #define UVMF_FLUSHTYPE_MASK (3UL<<0) | ||
386 | #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ | ||
387 | #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ | ||
388 | #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ | ||
389 | |||
390 | /* | ||
391 | * Commands to HYPERVISOR_console_io(). | ||
392 | */ | ||
393 | #define CONSOLEIO_write 0 | ||
394 | #define CONSOLEIO_read 1 | ||
395 | |||
396 | /* | ||
397 | * Commands to HYPERVISOR_vm_assist(). | ||
398 | */ | ||
399 | #define VMASST_CMD_enable 0 | ||
400 | #define VMASST_CMD_disable 1 | ||
401 | |||
402 | /* x86/32 guests: simulate full 4GB segment limits. */ | ||
403 | #define VMASST_TYPE_4gb_segments 0 | ||
404 | |||
405 | /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ | ||
406 | #define VMASST_TYPE_4gb_segments_notify 1 | ||
407 | |||
408 | /* | ||
409 | * x86 guests: support writes to bottom-level PTEs. | ||
410 | * NB1. Page-directory entries cannot be written. | ||
411 | * NB2. Guest must continue to remove all writable mappings of PTEs. | ||
412 | */ | ||
413 | #define VMASST_TYPE_writable_pagetables 2 | ||
414 | |||
415 | /* x86/PAE guests: support PDPTs above 4GB. */ | ||
416 | #define VMASST_TYPE_pae_extended_cr3 3 | ||
417 | |||
418 | /* | ||
419 | * x86 guests: Sane behaviour for virtual iopl | ||
420 | * - virtual iopl updated from do_iret() hypercalls. | ||
421 | * - virtual iopl reported in bounce frames. | ||
422 | * - guest kernels assumed to be level 0 for the purpose of iopl checks. | ||
423 | */ | ||
424 | #define VMASST_TYPE_architectural_iopl 4 | ||
425 | |||
426 | /* | ||
427 | * All guests: activate update indicator in vcpu_runstate_info | ||
428 | * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped | ||
429 | * vcpu_runstate_info during updates of the runstate information. | ||
430 | */ | ||
431 | #define VMASST_TYPE_runstate_update_flag 5 | ||
432 | |||
433 | #define MAX_VMASST_TYPE 5 | ||
434 | |||
435 | #ifndef __ASSEMBLY__ | ||
436 | |||
437 | typedef uint16_t domid_t; | ||
438 | |||
439 | /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ | ||
440 | #define DOMID_FIRST_RESERVED (0x7FF0U) | ||
441 | |||
442 | /* DOMID_SELF is used in certain contexts to refer to oneself. */ | ||
443 | #define DOMID_SELF (0x7FF0U) | ||
444 | |||
445 | /* | ||
446 | * DOMID_IO is used to restrict page-table updates to mapping I/O memory. | ||
447 | * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO | ||
448 | * is useful to ensure that no mappings to the OS's own heap are accidentally | ||
449 | * installed. (e.g., in Linux this could cause havoc as reference counts | ||
450 | * aren't adjusted on the I/O-mapping code path). | ||
451 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can | ||
452 | * be specified by any calling domain. | ||
453 | */ | ||
454 | #define DOMID_IO (0x7FF1U) | ||
455 | |||
456 | /* | ||
457 | * DOMID_XEN is used to allow privileged domains to map restricted parts of | ||
458 | * Xen's heap space (e.g., the machine_to_phys table). | ||
459 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if | ||
460 | * the caller is privileged. | ||
461 | */ | ||
462 | #define DOMID_XEN (0x7FF2U) | ||
463 | |||
464 | /* DOMID_COW is used as the owner of sharable pages */ | ||
465 | #define DOMID_COW (0x7FF3U) | ||
466 | |||
467 | /* DOMID_INVALID is used to identify pages with unknown owner. */ | ||
468 | #define DOMID_INVALID (0x7FF4U) | ||
469 | |||
470 | /* Idle domain. */ | ||
471 | #define DOMID_IDLE (0x7FFFU) | ||
472 | |||
473 | /* | ||
474 | * Send an array of these to HYPERVISOR_mmu_update(). | ||
475 | * NB. The fields are natural pointer/address size for this architecture. | ||
476 | */ | ||
477 | struct mmu_update { | ||
478 | uint64_t ptr; /* Machine address of PTE. */ | ||
479 | uint64_t val; /* New contents of PTE. */ | ||
480 | }; | ||
481 | DEFINE_GUEST_HANDLE_STRUCT(mmu_update); | ||
482 | |||
483 | /* | ||
484 | * Send an array of these to HYPERVISOR_multicall(). | ||
485 | * NB. The fields are logically the natural register size for this | ||
486 | * architecture. In cases where xen_ulong_t is larger than this then | ||
487 | * any unused bits in the upper portion must be zero. | ||
488 | */ | ||
489 | struct multicall_entry { | ||
490 | xen_ulong_t op; | ||
491 | xen_long_t result; | ||
492 | xen_ulong_t args[6]; | ||
493 | }; | ||
494 | DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); | ||
495 | |||
496 | struct vcpu_time_info { | ||
497 | /* | ||
498 | * Updates to the following values are preceded and followed | ||
499 | * by an increment of 'version'. The guest can therefore | ||
500 | * detect updates by looking for changes to 'version'. If the | ||
501 | * least-significant bit of the version number is set then an | ||
502 | * update is in progress and the guest must wait to read a | ||
503 | * consistent set of values. The correct way to interact with | ||
504 | * the version number is similar to Linux's seqlock: see the | ||
505 | * implementations of read_seqbegin/read_seqretry. | ||
506 | */ | ||
507 | uint32_t version; | ||
508 | uint32_t pad0; | ||
509 | uint64_t tsc_timestamp; /* TSC at last update of time vals. */ | ||
510 | uint64_t system_time; /* Time, in nanosecs, since boot. */ | ||
511 | /* | ||
512 | * Current system time: | ||
513 | * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul | ||
514 | * CPU frequency (Hz): | ||
515 | * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift | ||
516 | */ | ||
517 | uint32_t tsc_to_system_mul; | ||
518 | int8_t tsc_shift; | ||
519 | int8_t pad1[3]; | ||
520 | }; /* 32 bytes */ | ||
521 | |||
522 | struct vcpu_info { | ||
523 | /* | ||
524 | * 'evtchn_upcall_pending' is written non-zero by Xen to indicate | ||
525 | * a pending notification for a particular VCPU. It is then cleared | ||
526 | * by the guest OS /before/ checking for pending work, thus avoiding | ||
527 | * a set-and-check race. Note that the mask is only accessed by Xen | ||
528 | * on the CPU that is currently hosting the VCPU. This means that the | ||
529 | * pending and mask flags can be updated by the guest without special | ||
530 | * synchronisation (i.e., no need for the x86 LOCK prefix). | ||
531 | * This may seem suboptimal because if the pending flag is set by | ||
532 | * a different CPU then an IPI may be scheduled even when the mask | ||
533 | * is set. However, note: | ||
534 | * 1. The task of 'interrupt holdoff' is covered by the per-event- | ||
535 | * channel mask bits. A 'noisy' event that is continually being | ||
536 | * triggered can be masked at source at this very precise | ||
537 | * granularity. | ||
538 | * 2. The main purpose of the per-VCPU mask is therefore to restrict | ||
539 | * reentrant execution: whether for concurrency control, or to | ||
540 | * prevent unbounded stack usage. Whatever the purpose, we expect | ||
541 | * that the mask will be asserted only for short periods at a time, | ||
542 | * and so the likelihood of a 'spurious' IPI is suitably small. | ||
543 | * The mask is read before making an event upcall to the guest: a | ||
544 | * non-zero mask therefore guarantees that the VCPU will not receive | ||
545 | * an upcall activation. The mask is cleared when the VCPU requests | ||
546 | * to block: this avoids wakeup-waiting races. | ||
547 | */ | ||
548 | uint8_t evtchn_upcall_pending; | ||
549 | uint8_t evtchn_upcall_mask; | ||
550 | xen_ulong_t evtchn_pending_sel; | ||
551 | struct arch_vcpu_info arch; | ||
552 | struct pvclock_vcpu_time_info time; | ||
553 | }; /* 64 bytes (x86) */ | ||
554 | |||
555 | /* | ||
556 | * Xen/kernel shared data -- pointer provided in start_info. | ||
557 | * NB. We expect that this struct is smaller than a page. | ||
558 | */ | ||
559 | struct shared_info { | ||
560 | struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; | ||
561 | |||
562 | /* | ||
563 | * A domain can create "event channels" on which it can send and receive | ||
564 | * asynchronous event notifications. There are three classes of event that | ||
565 | * are delivered by this mechanism: | ||
566 | * 1. Bi-directional inter- and intra-domain connections. Domains must | ||
567 | * arrange out-of-band to set up a connection (usually by allocating | ||
568 | * an unbound 'listener' port and avertising that via a storage service | ||
569 | * such as xenstore). | ||
570 | * 2. Physical interrupts. A domain with suitable hardware-access | ||
571 | * privileges can bind an event-channel port to a physical interrupt | ||
572 | * source. | ||
573 | * 3. Virtual interrupts ('events'). A domain can bind an event-channel | ||
574 | * port to a virtual interrupt source, such as the virtual-timer | ||
575 | * device or the emergency console. | ||
576 | * | ||
577 | * Event channels are addressed by a "port index". Each channel is | ||
578 | * associated with two bits of information: | ||
579 | * 1. PENDING -- notifies the domain that there is a pending notification | ||
580 | * to be processed. This bit is cleared by the guest. | ||
581 | * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING | ||
582 | * will cause an asynchronous upcall to be scheduled. This bit is only | ||
583 | * updated by the guest. It is read-only within Xen. If a channel | ||
584 | * becomes pending while the channel is masked then the 'edge' is lost | ||
585 | * (i.e., when the channel is unmasked, the guest must manually handle | ||
586 | * pending notifications as no upcall will be scheduled by Xen). | ||
587 | * | ||
588 | * To expedite scanning of pending notifications, any 0->1 pending | ||
589 | * transition on an unmasked channel causes a corresponding bit in a | ||
590 | * per-vcpu selector word to be set. Each bit in the selector covers a | ||
591 | * 'C long' in the PENDING bitfield array. | ||
592 | */ | ||
593 | xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8]; | ||
594 | xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8]; | ||
595 | |||
596 | /* | ||
597 | * Wallclock time: updated only by control software. Guests should base | ||
598 | * their gettimeofday() syscall on this wallclock-base value. | ||
599 | */ | ||
600 | struct pvclock_wall_clock wc; | ||
601 | |||
602 | struct arch_shared_info arch; | ||
603 | |||
604 | }; | ||
605 | |||
606 | /* | ||
607 | * Start-of-day memory layout | ||
608 | * | ||
609 | * 1. The domain is started within contiguous virtual-memory region. | ||
610 | * 2. The contiguous region begins and ends on an aligned 4MB boundary. | ||
611 | * 3. This the order of bootstrap elements in the initial virtual region: | ||
612 | * a. relocated kernel image | ||
613 | * b. initial ram disk [mod_start, mod_len] | ||
614 | * (may be omitted) | ||
615 | * c. list of allocated page frames [mfn_list, nr_pages] | ||
616 | * (unless relocated due to XEN_ELFNOTE_INIT_P2M) | ||
617 | * d. start_info_t structure [register ESI (x86)] | ||
618 | * in case of dom0 this page contains the console info, too | ||
619 | * e. unless dom0: xenstore ring page | ||
620 | * f. unless dom0: console ring page | ||
621 | * g. bootstrap page tables [pt_base, CR3 (x86)] | ||
622 | * h. bootstrap stack [register ESP (x86)] | ||
623 | * 4. Bootstrap elements are packed together, but each is 4kB-aligned. | ||
624 | * 5. The list of page frames forms a contiguous 'pseudo-physical' memory | ||
625 | * layout for the domain. In particular, the bootstrap virtual-memory | ||
626 | * region is a 1:1 mapping to the first section of the pseudo-physical map. | ||
627 | * 6. All bootstrap elements are mapped read-writable for the guest OS. The | ||
628 | * only exception is the bootstrap page table, which is mapped read-only. | ||
629 | * 7. There is guaranteed to be at least 512kB padding after the final | ||
630 | * bootstrap element. If necessary, the bootstrap virtual region is | ||
631 | * extended by an extra 4MB to ensure this. | ||
632 | */ | ||
633 | |||
634 | #define MAX_GUEST_CMDLINE 1024 | ||
635 | struct start_info { | ||
636 | /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ | ||
637 | char magic[32]; /* "xen-<version>-<platform>". */ | ||
638 | unsigned long nr_pages; /* Total pages allocated to this domain. */ | ||
639 | unsigned long shared_info; /* MACHINE address of shared info struct. */ | ||
640 | uint32_t flags; /* SIF_xxx flags. */ | ||
641 | xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ | ||
642 | uint32_t store_evtchn; /* Event channel for store communication. */ | ||
643 | union { | ||
644 | struct { | ||
645 | xen_pfn_t mfn; /* MACHINE page number of console page. */ | ||
646 | uint32_t evtchn; /* Event channel for console page. */ | ||
647 | } domU; | ||
648 | struct { | ||
649 | uint32_t info_off; /* Offset of console_info struct. */ | ||
650 | uint32_t info_size; /* Size of console_info struct from start.*/ | ||
651 | } dom0; | ||
652 | } console; | ||
653 | /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ | ||
654 | unsigned long pt_base; /* VIRTUAL address of page directory. */ | ||
655 | unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ | ||
656 | unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ | ||
657 | unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ | ||
658 | unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ | ||
659 | int8_t cmd_line[MAX_GUEST_CMDLINE]; | ||
660 | /* The pfn range here covers both page table and p->m table frames. */ | ||
661 | unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ | ||
662 | unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ | ||
663 | }; | ||
664 | |||
665 | /* These flags are passed in the 'flags' field of start_info_t. */ | ||
666 | #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ | ||
667 | #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ | ||
668 | #define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ | ||
669 | #define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ | ||
670 | #define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */ | ||
671 | /* P->M making the 3 level tree obsolete? */ | ||
672 | #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ | ||
673 | |||
674 | /* | ||
675 | * A multiboot module is a package containing modules very similar to a | ||
676 | * multiboot module array. The only differences are: | ||
677 | * - the array of module descriptors is by convention simply at the beginning | ||
678 | * of the multiboot module, | ||
679 | * - addresses in the module descriptors are based on the beginning of the | ||
680 | * multiboot module, | ||
681 | * - the number of modules is determined by a termination descriptor that has | ||
682 | * mod_start == 0. | ||
683 | * | ||
684 | * This permits to both build it statically and reference it in a configuration | ||
685 | * file, and let the PV guest easily rebase the addresses to virtual addresses | ||
686 | * and at the same time count the number of modules. | ||
687 | */ | ||
688 | struct xen_multiboot_mod_list { | ||
689 | /* Address of first byte of the module */ | ||
690 | uint32_t mod_start; | ||
691 | /* Address of last byte of the module (inclusive) */ | ||
692 | uint32_t mod_end; | ||
693 | /* Address of zero-terminated command line */ | ||
694 | uint32_t cmdline; | ||
695 | /* Unused, must be zero */ | ||
696 | uint32_t pad; | ||
697 | }; | ||
698 | /* | ||
699 | * The console structure in start_info.console.dom0 | ||
700 | * | ||
701 | * This structure includes a variety of information required to | ||
702 | * have a working VGA/VESA console. | ||
703 | */ | ||
704 | struct dom0_vga_console_info { | ||
705 | uint8_t video_type; | ||
706 | #define XEN_VGATYPE_TEXT_MODE_3 0x03 | ||
707 | #define XEN_VGATYPE_VESA_LFB 0x23 | ||
708 | #define XEN_VGATYPE_EFI_LFB 0x70 | ||
709 | |||
710 | union { | ||
711 | struct { | ||
712 | /* Font height, in pixels. */ | ||
713 | uint16_t font_height; | ||
714 | /* Cursor location (column, row). */ | ||
715 | uint16_t cursor_x, cursor_y; | ||
716 | /* Number of rows and columns (dimensions in characters). */ | ||
717 | uint16_t rows, columns; | ||
718 | } text_mode_3; | ||
719 | |||
720 | struct { | ||
721 | /* Width and height, in pixels. */ | ||
722 | uint16_t width, height; | ||
723 | /* Bytes per scan line. */ | ||
724 | uint16_t bytes_per_line; | ||
725 | /* Bits per pixel. */ | ||
726 | uint16_t bits_per_pixel; | ||
727 | /* LFB physical address, and size (in units of 64kB). */ | ||
728 | uint32_t lfb_base; | ||
729 | uint32_t lfb_size; | ||
730 | /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ | ||
731 | uint8_t red_pos, red_size; | ||
732 | uint8_t green_pos, green_size; | ||
733 | uint8_t blue_pos, blue_size; | ||
734 | uint8_t rsvd_pos, rsvd_size; | ||
735 | |||
736 | /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ | ||
737 | uint32_t gbl_caps; | ||
738 | /* Mode attributes (offset 0x0, VESA command 0x4f01). */ | ||
739 | uint16_t mode_attrs; | ||
740 | } vesa_lfb; | ||
741 | } u; | ||
742 | }; | ||
743 | |||
744 | typedef uint64_t cpumap_t; | ||
745 | |||
746 | typedef uint8_t xen_domain_handle_t[16]; | ||
747 | |||
748 | /* Turn a plain number into a C unsigned long constant. */ | ||
749 | #define __mk_unsigned_long(x) x ## UL | ||
750 | #define mk_unsigned_long(x) __mk_unsigned_long(x) | ||
751 | |||
752 | #define TMEM_SPEC_VERSION 1 | ||
753 | |||
754 | struct tmem_op { | ||
755 | uint32_t cmd; | ||
756 | int32_t pool_id; | ||
757 | union { | ||
758 | struct { /* for cmd == TMEM_NEW_POOL */ | ||
759 | uint64_t uuid[2]; | ||
760 | uint32_t flags; | ||
761 | } new; | ||
762 | struct { | ||
763 | uint64_t oid[3]; | ||
764 | uint32_t index; | ||
765 | uint32_t tmem_offset; | ||
766 | uint32_t pfn_offset; | ||
767 | uint32_t len; | ||
768 | GUEST_HANDLE(void) gmfn; /* guest machine page frame */ | ||
769 | } gen; | ||
770 | } u; | ||
771 | }; | ||
772 | |||
773 | DEFINE_GUEST_HANDLE(u64); | ||
774 | |||
775 | #else /* __ASSEMBLY__ */ | ||
776 | |||
777 | /* In assembly code we cannot use C numeric constant suffixes. */ | ||
778 | #define mk_unsigned_long(x) x | ||
779 | |||
780 | #endif /* !__ASSEMBLY__ */ | ||
781 | |||
782 | #endif /* __XEN_PUBLIC_XEN_H__ */ | ||
783 |