Blame view
drivers/xen/events.c
44.1 KB
e46cdb66c xen: event channels |
1 2 3 4 5 6 7 |
/* * Xen event channels * * Xen models interrupts with abstract event channels. Because each * domain gets 1024 event channels, but NR_IRQ is not that large, we * must dynamically map irqs<->event channels. The event channels * interface with the rest of the kernel by defining a xen interrupt |
25985edce Fix common misspe... |
8 |
* chip. When an event is received, it is mapped to an irq and sent |
e46cdb66c xen: event channels |
9 10 11 12 13 14 15 16 17 18 |
* through the normal interrupt processing path. * * There are four kinds of events which can be mapped to an event * channel: * * 1. Inter-domain notifications. This includes all the virtual * device events, since they're driven by front-ends in another domain * (typically dom0). * 2. VIRQs, typically used for timers. These are per-cpu events. * 3. IPIs. |
d46a78b05 xen: implement pi... |
19 |
* 4. PIRQs - Hardware interrupts. |
e46cdb66c xen: event channels |
20 21 22 23 24 25 26 27 28 |
* * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #include <linux/linkage.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/string.h> |
28e08861b xen: fix too earl... |
29 |
#include <linux/bootmem.h> |
5a0e3ad6a include cleanup: ... |
30 |
#include <linux/slab.h> |
b21ddbf50 xen: dynamically ... |
31 |
#include <linux/irqnr.h> |
f731e3ef0 xen: remap MSIs i... |
32 |
#include <linux/pci.h> |
e46cdb66c xen: event channels |
33 |
|
0ec53ecf3 xen/arm: receive ... |
34 |
#ifdef CONFIG_X86 |
38e20b07e x86/xen: event ch... |
35 |
#include <asm/desc.h> |
e46cdb66c xen: event channels |
36 37 |
#include <asm/ptrace.h> #include <asm/irq.h> |
792dc4f6c xen: use our own ... |
38 |
#include <asm/idle.h> |
0794bfc74 xen: identity map... |
39 |
#include <asm/io_apic.h> |
9846ff10a xen: support pirq... |
40 |
#include <asm/xen/page.h> |
42a1de56f xen: implement xe... |
41 |
#include <asm/xen/pci.h> |
0ec53ecf3 xen/arm: receive ... |
42 43 |
#endif #include <asm/sync_bitops.h> |
e46cdb66c xen: event channels |
44 |
#include <asm/xen/hypercall.h> |
8d1b87530 arch/i386/xen/eve... |
45 |
#include <asm/xen/hypervisor.h> |
e46cdb66c xen: event channels |
46 |
|
38e20b07e x86/xen: event ch... |
47 48 |
#include <xen/xen.h> #include <xen/hvm.h> |
e04d0d076 xen: move events.... |
49 |
#include <xen/xen-ops.h> |
e46cdb66c xen: event channels |
50 51 52 |
#include <xen/events.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> |
38e20b07e x86/xen: event ch... |
53 54 |
#include <xen/interface/hvm/hvm_op.h> #include <xen/interface/hvm/params.h> |
0ec53ecf3 xen/arm: receive ... |
55 56 57 |
#include <xen/interface/physdev.h> #include <xen/interface/sched.h> #include <asm/hw_irq.h> |
e46cdb66c xen: event channels |
58 |
|
e46cdb66c xen: event channels |
59 60 61 62 |
/* * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ |
773659483 xen/irq: Alter th... |
63 |
static DEFINE_MUTEX(irq_mapping_update_lock); |
e46cdb66c xen: event channels |
64 |
|
6cb6537d3 xen: events: main... |
65 |
static LIST_HEAD(xen_irq_list_head); |
e46cdb66c xen: event channels |
66 |
/* IRQ <-> VIRQ mapping. */ |
204fba4aa percpu: cleanup p... |
67 |
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; |
e46cdb66c xen: event channels |
68 |
|
f87e4cac4 xen: SMP guest su... |
69 |
/* IRQ <-> IPI mapping */ |
204fba4aa percpu: cleanup p... |
70 |
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; |
f87e4cac4 xen: SMP guest su... |
71 |
|
ced40d0f3 xen: pack all irq... |
72 73 |
/* Interrupt types. */ enum xen_irq_type { |
d77bbd4db xen: remove irq b... |
74 |
IRQT_UNBOUND = 0, |
f87e4cac4 xen: SMP guest su... |
75 76 77 78 79 |
IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN }; |
e46cdb66c xen: event channels |
80 |
|
ced40d0f3 xen: pack all irq... |
81 82 83 84 85 86 |
/* * Packed IRQ information: * type - enum xen_irq_type * event channel - irq->event channel mapping * cpu - cpu this event channel is bound to * index - type-specific information: |
dec02dea1 xen: drop trackin... |
87 |
* PIRQ - physical IRQ, GSI, flags, and owner domain |
ced40d0f3 xen: pack all irq... |
88 89 90 91 |
* VIRQ - virq number * IPI - IPI vector * EVTCHN - */ |
088c05a84 Xen: fix whitespa... |
92 |
struct irq_info { |
6cb6537d3 xen: events: main... |
93 |
struct list_head list; |
420eb554d xen/event: Add re... |
94 |
int refcnt; |
ced40d0f3 xen: pack all irq... |
95 |
enum xen_irq_type type; /* type */ |
6cb6537d3 xen: events: main... |
96 |
unsigned irq; |
ced40d0f3 xen: pack all irq... |
97 98 99 100 101 102 103 |
unsigned short evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ union { unsigned short virq; enum ipi_vector ipi; struct { |
7a043f119 xen: support pirq... |
104 |
unsigned short pirq; |
ced40d0f3 xen: pack all irq... |
105 |
unsigned short gsi; |
d46a78b05 xen: implement pi... |
106 |
unsigned char flags; |
beafbdc1d xen/irq: Check if... |
107 |
uint16_t domid; |
ced40d0f3 xen: pack all irq... |
108 109 110 |
} pirq; } u; }; |
d46a78b05 xen: implement pi... |
111 |
#define PIRQ_NEEDS_EOI (1 << 0) |
15ebbb82b xen: fix shared i... |
112 |
#define PIRQ_SHAREABLE (1 << 1) |
ced40d0f3 xen: pack all irq... |
113 |
|
b21ddbf50 xen: dynamically ... |
114 |
static int *evtchn_to_irq; |
bf86ad809 xen: events: pirq... |
115 |
#ifdef CONFIG_X86 |
9846ff10a xen: support pirq... |
116 |
static unsigned long *pirq_eoi_map; |
bf86ad809 xen: events: pirq... |
117 |
#endif |
9846ff10a xen: support pirq... |
118 |
static bool (*pirq_needs_eoi)(unsigned irq); |
3b32f574a xen: statically i... |
119 |
|
c81611c4e xen: event channe... |
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
/* * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be * careful to only use bitops which allow for this (e.g * test_bit/find_first_bit and friends but not __ffs) and to pass * BITS_PER_EVTCHN_WORD as the bitmask length. */ #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8) /* * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t * array. Primarily to avoid long lines (hence the terse name). */ #define BM(x) (unsigned long *)(x) /* Find the first set bit in a evtchn mask */ #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD) static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD], |
cb60d1142 xen: events: use ... |
136 |
cpu_evtchn_mask); |
e46cdb66c xen: event channels |
137 |
|
e46cdb66c xen: event channels |
138 139 |
/* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) |
e46cdb66c xen: event channels |
140 |
static struct irq_chip xen_dynamic_chip; |
aaca49642 xen: use percpu i... |
141 |
static struct irq_chip xen_percpu_chip; |
d46a78b05 xen: implement pi... |
142 |
static struct irq_chip xen_pirq_chip; |
7e186bdd0 xen: do not clear... |
143 144 |
static void enable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data); |
e46cdb66c xen: event channels |
145 |
|
9158c3588 xen: events: turn... |
146 147 |
/* Get info for IRQ */ static struct irq_info *info_for_irq(unsigned irq) |
ced40d0f3 xen: pack all irq... |
148 |
{ |
c442b8068 xen: Cleanup geni... |
149 |
return irq_get_handler_data(irq); |
ced40d0f3 xen: pack all irq... |
150 |
} |
9158c3588 xen: events: turn... |
151 152 |
/* Constructors for packed IRQ information. */ static void xen_irq_info_common_init(struct irq_info *info, |
3d4cfa373 xen: events: push... |
153 |
unsigned irq, |
9158c3588 xen: events: turn... |
154 155 156 |
enum xen_irq_type type, unsigned short evtchn, unsigned short cpu) |
ced40d0f3 xen: pack all irq... |
157 |
{ |
9158c3588 xen: events: turn... |
158 159 160 161 |
BUG_ON(info->type != IRQT_UNBOUND && info->type != type); info->type = type; |
6cb6537d3 xen: events: main... |
162 |
info->irq = irq; |
9158c3588 xen: events: turn... |
163 164 |
info->evtchn = evtchn; info->cpu = cpu; |
3d4cfa373 xen: events: push... |
165 166 |
evtchn_to_irq[evtchn] = irq; |
934f585e9 xen: clear IRQ_NO... |
167 168 |
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); |
ced40d0f3 xen: pack all irq... |
169 |
} |
9158c3588 xen: events: turn... |
170 171 |
static void xen_irq_info_evtchn_init(unsigned irq, unsigned short evtchn) |
ced40d0f3 xen: pack all irq... |
172 |
{ |
9158c3588 xen: events: turn... |
173 |
struct irq_info *info = info_for_irq(irq); |
3d4cfa373 xen: events: push... |
174 |
xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0); |
ced40d0f3 xen: pack all irq... |
175 |
} |
3d4cfa373 xen: events: push... |
176 177 |
static void xen_irq_info_ipi_init(unsigned cpu, unsigned irq, |
9158c3588 xen: events: turn... |
178 179 |
unsigned short evtchn, enum ipi_vector ipi) |
e46cdb66c xen: event channels |
180 |
{ |
9158c3588 xen: events: turn... |
181 |
struct irq_info *info = info_for_irq(irq); |
3d4cfa373 xen: events: push... |
182 |
xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0); |
9158c3588 xen: events: turn... |
183 184 |
info->u.ipi = ipi; |
3d4cfa373 xen: events: push... |
185 186 |
per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
ced40d0f3 xen: pack all irq... |
187 |
} |
3d4cfa373 xen: events: push... |
188 189 |
static void xen_irq_info_virq_init(unsigned cpu, unsigned irq, |
9158c3588 xen: events: turn... |
190 191 |
unsigned short evtchn, unsigned short virq) |
ced40d0f3 xen: pack all irq... |
192 |
{ |
9158c3588 xen: events: turn... |
193 |
struct irq_info *info = info_for_irq(irq); |
3d4cfa373 xen: events: push... |
194 |
xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0); |
9158c3588 xen: events: turn... |
195 196 |
info->u.virq = virq; |
3d4cfa373 xen: events: push... |
197 198 |
per_cpu(virq_to_irq, cpu)[virq] = irq; |
ced40d0f3 xen: pack all irq... |
199 |
} |
9158c3588 xen: events: turn... |
200 201 202 203 |
static void xen_irq_info_pirq_init(unsigned irq, unsigned short evtchn, unsigned short pirq, unsigned short gsi, |
beafbdc1d xen/irq: Check if... |
204 |
uint16_t domid, |
9158c3588 xen: events: turn... |
205 |
unsigned char flags) |
ced40d0f3 xen: pack all irq... |
206 |
{ |
9158c3588 xen: events: turn... |
207 |
struct irq_info *info = info_for_irq(irq); |
3d4cfa373 xen: events: push... |
208 |
xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0); |
9158c3588 xen: events: turn... |
209 210 211 |
info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; |
beafbdc1d xen/irq: Check if... |
212 |
info->u.pirq.domid = domid; |
9158c3588 xen: events: turn... |
213 |
info->u.pirq.flags = flags; |
e46cdb66c xen: event channels |
214 215 216 217 218 |
} /* * Accessors for packed IRQ information. */ |
ced40d0f3 xen: pack all irq... |
219 |
static unsigned int evtchn_from_irq(unsigned irq) |
e46cdb66c xen: event channels |
220 |
{ |
110e7c7e4 xen/event: valida... |
221 222 223 |
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d! ", irq))) return 0; |
ced40d0f3 xen: pack all irq... |
224 |
return info_for_irq(irq)->evtchn; |
e46cdb66c xen: event channels |
225 |
} |
d4c045364 xen: add irq_from... |
226 227 228 229 230 |
unsigned irq_from_evtchn(unsigned int evtchn) { return evtchn_to_irq[evtchn]; } EXPORT_SYMBOL_GPL(irq_from_evtchn); |
ced40d0f3 xen: pack all irq... |
231 |
static enum ipi_vector ipi_from_irq(unsigned irq) |
e46cdb66c xen: event channels |
232 |
{ |
ced40d0f3 xen: pack all irq... |
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); return info->u.ipi; } static unsigned virq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_VIRQ); return info->u.virq; } |
7a043f119 xen: support pirq... |
250 251 252 253 254 255 256 257 258 |
static unsigned pirq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; } |
ced40d0f3 xen: pack all irq... |
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 |
static enum xen_irq_type type_from_irq(unsigned irq) { return info_for_irq(irq)->type; } static unsigned cpu_from_irq(unsigned irq) { return info_for_irq(irq)->cpu; } static unsigned int cpu_from_evtchn(unsigned int evtchn) { int irq = evtchn_to_irq[evtchn]; unsigned ret = 0; if (irq != -1) ret = cpu_from_irq(irq); return ret; |
e46cdb66c xen: event channels |
278 |
} |
bf86ad809 xen: events: pirq... |
279 |
#ifdef CONFIG_X86 |
9846ff10a xen: support pirq... |
280 |
static bool pirq_check_eoi_map(unsigned irq) |
d46a78b05 xen: implement pi... |
281 |
{ |
521394e4e xen: use the pirq... |
282 |
return test_bit(pirq_from_irq(irq), pirq_eoi_map); |
9846ff10a xen: support pirq... |
283 |
} |
bf86ad809 xen: events: pirq... |
284 |
#endif |
d46a78b05 xen: implement pi... |
285 |
|
9846ff10a xen: support pirq... |
286 287 288 |
static bool pirq_needs_eoi_flag(unsigned irq) { struct irq_info *info = info_for_irq(irq); |
d46a78b05 xen: implement pi... |
289 290 291 292 |
BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.flags & PIRQ_NEEDS_EOI; } |
c81611c4e xen: event channe... |
293 294 295 |
static inline xen_ulong_t active_evtchns(unsigned int cpu, struct shared_info *sh, unsigned int idx) |
e46cdb66c xen: event channels |
296 |
{ |
088c05a84 Xen: fix whitespa... |
297 |
return sh->evtchn_pending[idx] & |
cb60d1142 xen: events: use ... |
298 |
per_cpu(cpu_evtchn_mask, cpu)[idx] & |
088c05a84 Xen: fix whitespa... |
299 |
~sh->evtchn_mask[idx]; |
e46cdb66c xen: event channels |
300 301 302 303 304 305 306 307 |
} static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) { int irq = evtchn_to_irq[chn]; BUG_ON(irq == -1); #ifdef CONFIG_SMP |
c9e265e03 xen: Switch to ne... |
308 |
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); |
e46cdb66c xen: event channels |
309 |
#endif |
c81611c4e xen: event channe... |
310 311 |
clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)))); set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu))); |
e46cdb66c xen: event channels |
312 |
|
ca62ce8cd xen: events: dyna... |
313 |
info_for_irq(irq)->cpu = cpu; |
e46cdb66c xen: event channels |
314 315 316 317 |
} static void init_evtchn_cpu_bindings(void) { |
1c6969ec8 xen/evtchn: clear... |
318 |
int i; |
e46cdb66c xen: event channels |
319 |
#ifdef CONFIG_SMP |
6cb6537d3 xen: events: main... |
320 |
struct irq_info *info; |
10e580842 genirq: use itera... |
321 |
|
e46cdb66c xen: event channels |
322 |
/* By default all event channels notify CPU#0. */ |
6cb6537d3 xen: events: main... |
323 324 |
list_for_each_entry(info, &xen_irq_list_head, list) { struct irq_desc *desc = irq_to_desc(info->irq); |
c9e265e03 xen: Switch to ne... |
325 |
cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
0b8f1efad sparse irq_desc[]... |
326 |
} |
e46cdb66c xen: event channels |
327 |
#endif |
1c6969ec8 xen/evtchn: clear... |
328 |
for_each_possible_cpu(i) |
cb60d1142 xen: events: use ... |
329 |
memset(per_cpu(cpu_evtchn_mask, i), |
d8251a942 xen/events: initi... |
330 |
(i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); |
e46cdb66c xen: event channels |
331 |
} |
e46cdb66c xen: event channels |
332 333 334 |
static inline void clear_evtchn(int port) { struct shared_info *s = HYPERVISOR_shared_info; |
c81611c4e xen: event channe... |
335 |
sync_clear_bit(port, BM(&s->evtchn_pending[0])); |
e46cdb66c xen: event channels |
336 337 338 339 340 |
} static inline void set_evtchn(int port) { struct shared_info *s = HYPERVISOR_shared_info; |
c81611c4e xen: event channe... |
341 |
sync_set_bit(port, BM(&s->evtchn_pending[0])); |
e46cdb66c xen: event channels |
342 |
} |
168d2f464 xen: save previou... |
343 344 345 |
static inline int test_evtchn(int port) { struct shared_info *s = HYPERVISOR_shared_info; |
c81611c4e xen: event channe... |
346 |
return sync_test_bit(port, BM(&s->evtchn_pending[0])); |
168d2f464 xen: save previou... |
347 |
} |
e46cdb66c xen: event channels |
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
/** * notify_remote_via_irq - send event to remote end of event channel via irq * @irq: irq of event channel to send event to * * Unlike notify_remote_via_evtchn(), this is safe to use across * save/restore. Notifications on a broken connection are silently * dropped. */ void notify_remote_via_irq(int irq) { int evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL_GPL(notify_remote_via_irq); static void mask_evtchn(int port) { struct shared_info *s = HYPERVISOR_shared_info; |
c81611c4e xen: event channe... |
369 |
sync_set_bit(port, BM(&s->evtchn_mask[0])); |
e46cdb66c xen: event channels |
370 371 372 373 374 375 |
} static void unmask_evtchn(int port) { struct shared_info *s = HYPERVISOR_shared_info; unsigned int cpu = get_cpu(); |
b5e579232 xen/events: fix u... |
376 |
int do_hypercall = 0, evtchn_pending = 0; |
e46cdb66c xen: event channels |
377 378 |
BUG_ON(!irqs_disabled()); |
b5e579232 xen/events: fix u... |
379 380 |
if (unlikely((cpu != cpu_from_evtchn(port)))) do_hypercall = 1; |
c26377e62 xen/events: avoid... |
381 382 383 384 385 386 387 388 389 390 |
else { /* * Need to clear the mask before checking pending to * avoid a race with an event becoming pending. * * EVTCHNOP_unmask will only trigger an upcall if the * mask bit was set, so if a hypercall is needed * remask the event. */ sync_clear_bit(port, BM(&s->evtchn_mask[0])); |
c81611c4e xen: event channe... |
391 |
evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); |
b5e579232 xen/events: fix u... |
392 |
|
c26377e62 xen/events: avoid... |
393 394 395 396 397 |
if (unlikely(evtchn_pending && xen_hvm_domain())) { sync_set_bit(port, BM(&s->evtchn_mask[0])); do_hypercall = 1; } } |
b5e579232 xen/events: fix u... |
398 399 400 401 402 |
/* Slow path (hypercall) if this is a non-local port or if this is * an hvm domain and an event is pending (hvm domains don't have * their own implementation of irq_enable). */ if (do_hypercall) { |
e46cdb66c xen: event channels |
403 404 405 |
struct evtchn_unmask unmask = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); } else { |
780f36d8b xen: Use this_cpu... |
406 |
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
e46cdb66c xen: event channels |
407 |
|
e46cdb66c xen: event channels |
408 409 410 411 412 |
/* * The following is basically the equivalent of * 'hw_resend_irq'. Just like a real IO-APIC we 'lose * the interrupt edge' if the channel is masked. */ |
b5e579232 xen/events: fix u... |
413 |
if (evtchn_pending && |
c81611c4e xen: event channe... |
414 415 |
!sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD, BM(&vcpu_info->evtchn_pending_sel))) |
e46cdb66c xen: event channels |
416 417 418 419 420 |
vcpu_info->evtchn_upcall_pending = 1; } put_cpu(); } |
6cb6537d3 xen: events: main... |
421 422 423 |
static void xen_irq_init(unsigned irq) { struct irq_info *info; |
b5328cd14 xen: Fix compile ... |
424 |
#ifdef CONFIG_SMP |
6cb6537d3 xen: events: main... |
425 426 427 428 |
struct irq_desc *desc = irq_to_desc(irq); /* By default all event channels notify CPU#0. */ cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
44626e4a3 xen: events: Fix ... |
429 |
#endif |
6cb6537d3 xen: events: main... |
430 |
|
ca62ce8cd xen: events: dyna... |
431 432 433 434 |
info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) panic("Unable to allocate metadata for IRQ%d ", irq); |
6cb6537d3 xen: events: main... |
435 436 |
info->type = IRQT_UNBOUND; |
420eb554d xen/event: Add re... |
437 |
info->refcnt = -1; |
6cb6537d3 xen: events: main... |
438 |
|
c442b8068 xen: Cleanup geni... |
439 |
irq_set_handler_data(irq, info); |
ca62ce8cd xen: events: dyna... |
440 |
|
6cb6537d3 xen: events: main... |
441 442 |
list_add_tail(&info->list, &xen_irq_list_head); } |
7bee97682 xen: events: prop... |
443 |
static int __must_check xen_allocate_irq_dynamic(void) |
0794bfc74 xen: identity map... |
444 |
{ |
89911501f xen: events: allo... |
445 446 |
int first = 0; int irq; |
0794bfc74 xen: identity map... |
447 448 |
#ifdef CONFIG_X86_IO_APIC |
89911501f xen: events: allo... |
449 450 |
/* * For an HVM guest or domain 0 which see "real" (emulated or |
25985edce Fix common misspe... |
451 |
* actual respectively) GSIs we allocate dynamic IRQs |
89911501f xen: events: allo... |
452 453 454 455 456 457 |
* e.g. those corresponding to event channels or MSIs * etc. from the range above those "real" GSIs to avoid * collisions. */ if (xen_initial_domain() || xen_hvm_domain()) first = get_nr_irqs_gsi(); |
0794bfc74 xen: identity map... |
458 |
#endif |
89911501f xen: events: allo... |
459 |
irq = irq_alloc_desc_from(first, -1); |
3a69e9165 xen: Find an unbo... |
460 |
|
e6599225d xen/irq: If we fa... |
461 462 |
if (irq >= 0) xen_irq_init(irq); |
ced40d0f3 xen: pack all irq... |
463 |
|
e46cdb66c xen: event channels |
464 |
return irq; |
d46a78b05 xen: implement pi... |
465 |
} |
7bee97682 xen: events: prop... |
466 |
static int __must_check xen_allocate_irq_gsi(unsigned gsi) |
c9df1ce58 xen: events: add ... |
467 468 |
{ int irq; |
89911501f xen: events: allo... |
469 470 471 472 473 474 475 |
/* * A PV guest has no concept of a GSI (since it has no ACPI * nor access to/knowledge of the physical APICs). Therefore * all IRQs are dynamically allocated from the entire IRQ * space. */ if (xen_pv_domain() && !xen_initial_domain()) |
c9df1ce58 xen: events: add ... |
476 477 478 479 |
return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ if (gsi < NR_IRQS_LEGACY) |
6cb6537d3 xen: events: main... |
480 481 482 |
irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); |
c9df1ce58 xen: events: add ... |
483 |
|
6cb6537d3 xen: events: main... |
484 |
xen_irq_init(irq); |
c9df1ce58 xen: events: add ... |
485 486 487 488 489 490 |
return irq; } static void xen_free_irq(unsigned irq) { |
c442b8068 xen: Cleanup geni... |
491 |
struct irq_info *info = irq_get_handler_data(irq); |
6cb6537d3 xen: events: main... |
492 |
|
94032c506 xen/events: Check... |
493 494 |
if (WARN_ON(!info)) return; |
6cb6537d3 xen: events: main... |
495 |
list_del(&info->list); |
9158c3588 xen: events: turn... |
496 |
|
c442b8068 xen: Cleanup geni... |
497 |
irq_set_handler_data(irq, NULL); |
ca62ce8cd xen: events: dyna... |
498 |
|
420eb554d xen/event: Add re... |
499 |
WARN_ON(info->refcnt > 0); |
ca62ce8cd xen: events: dyna... |
500 |
kfree(info); |
721461047 xen: events: do n... |
501 502 503 |
/* Legacy IRQ descriptors are managed by the arch. */ if (irq < NR_IRQS_LEGACY) return; |
c9df1ce58 xen: events: add ... |
504 505 |
irq_free_desc(irq); } |
d46a78b05 xen: implement pi... |
506 507 508 509 510 511 |
static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); |
7a043f119 xen: support pirq... |
512 |
irq_status.irq = pirq_from_irq(irq); |
d46a78b05 xen: implement pi... |
513 514 515 516 517 518 519 520 521 522 523 524 525 526 |
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) irq_status.flags = 0; info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; if (irq_status.flags & XENIRQSTAT_needs_eoi) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } static bool probing_irq(int irq) { struct irq_desc *desc = irq_to_desc(irq); return desc && desc->action == NULL; } |
7e186bdd0 xen: do not clear... |
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 |
static void eoi_pirq(struct irq_data *data) { int evtchn = evtchn_from_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; irq_move_irq(data); if (VALID_EVTCHN(evtchn)) clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); WARN_ON(rc); } } static void mask_ack_pirq(struct irq_data *data) { disable_dynirq(data); eoi_pirq(data); } |
c9e265e03 xen: Switch to ne... |
549 |
static unsigned int __startup_pirq(unsigned int irq) |
d46a78b05 xen: implement pi... |
550 551 552 553 |
{ struct evtchn_bind_pirq bind_pirq; struct irq_info *info = info_for_irq(irq); int evtchn = evtchn_from_irq(irq); |
15ebbb82b xen: fix shared i... |
554 |
int rc; |
d46a78b05 xen: implement pi... |
555 556 557 558 559 |
BUG_ON(info->type != IRQT_PIRQ); if (VALID_EVTCHN(evtchn)) goto out; |
7a043f119 xen: support pirq... |
560 |
bind_pirq.pirq = pirq_from_irq(irq); |
d46a78b05 xen: implement pi... |
561 |
/* NB. We are happy to share unless we are probing. */ |
15ebbb82b xen: fix shared i... |
562 563 564 565 |
bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { |
d46a78b05 xen: implement pi... |
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 |
if (!probing_irq(irq)) printk(KERN_INFO "Failed to obtain physical IRQ %d ", irq); return 0; } evtchn = bind_pirq.port; pirq_query_unmask(irq); evtchn_to_irq[evtchn] = irq; bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; out: unmask_evtchn(evtchn); |
7e186bdd0 xen: do not clear... |
582 |
eoi_pirq(irq_get_irq_data(irq)); |
d46a78b05 xen: implement pi... |
583 584 585 |
return 0; } |
c9e265e03 xen: Switch to ne... |
586 587 588 589 590 591 |
static unsigned int startup_pirq(struct irq_data *data) { return __startup_pirq(data->irq); } static void shutdown_pirq(struct irq_data *data) |
d46a78b05 xen: implement pi... |
592 593 |
{ struct evtchn_close close; |
c9e265e03 xen: Switch to ne... |
594 |
unsigned int irq = data->irq; |
d46a78b05 xen: implement pi... |
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 |
struct irq_info *info = info_for_irq(irq); int evtchn = evtchn_from_irq(irq); BUG_ON(info->type != IRQT_PIRQ); if (!VALID_EVTCHN(evtchn)) return; mask_evtchn(evtchn); close.port = evtchn; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = -1; info->evtchn = 0; } |
c9e265e03 xen: Switch to ne... |
613 |
static void enable_pirq(struct irq_data *data) |
d46a78b05 xen: implement pi... |
614 |
{ |
c9e265e03 xen: Switch to ne... |
615 |
startup_pirq(data); |
d46a78b05 xen: implement pi... |
616 |
} |
c9e265e03 xen: Switch to ne... |
617 |
static void disable_pirq(struct irq_data *data) |
d46a78b05 xen: implement pi... |
618 |
{ |
7e186bdd0 xen: do not clear... |
619 |
disable_dynirq(data); |
d46a78b05 xen: implement pi... |
620 |
} |
68c2c39a7 xen: do not map t... |
621 |
int xen_irq_from_gsi(unsigned gsi) |
d46a78b05 xen: implement pi... |
622 |
{ |
6cb6537d3 xen: events: main... |
623 |
struct irq_info *info; |
d46a78b05 xen: implement pi... |
624 |
|
6cb6537d3 xen: events: main... |
625 626 |
list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) |
d46a78b05 xen: implement pi... |
627 |
continue; |
6cb6537d3 xen: events: main... |
628 629 |
if (info->u.pirq.gsi == gsi) return info->irq; |
d46a78b05 xen: implement pi... |
630 631 632 633 |
} return -1; } |
68c2c39a7 xen: do not map t... |
634 |
EXPORT_SYMBOL_GPL(xen_irq_from_gsi); |
d46a78b05 xen: implement pi... |
635 |
|
653378acd xen: events: simp... |
636 637 638 |
/* * Do not make any assumptions regarding the relationship between the * IRQ number returned here and the Xen pirq argument. |
7a043f119 xen: support pirq... |
639 640 641 |
* * Note: We don't assign an event channel until the irq actually started * up. Return an existing irq if we've already got one for the gsi. |
e5ac0bda9 xen: use the trig... |
642 643 644 |
* * Shareable implies level triggered, not shareable implies edge * triggered here. |
d46a78b05 xen: implement pi... |
645 |
*/ |
f4d0635bf xen: events: refa... |
646 647 |
int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) |
d46a78b05 xen: implement pi... |
648 |
{ |
a0e181167 xen: events: fix ... |
649 |
int irq = -1; |
d46a78b05 xen: implement pi... |
650 |
struct physdev_irq irq_op; |
773659483 xen/irq: Alter th... |
651 |
mutex_lock(&irq_mapping_update_lock); |
d46a78b05 xen: implement pi... |
652 |
|
68c2c39a7 xen: do not map t... |
653 |
irq = xen_irq_from_gsi(gsi); |
d46a78b05 xen: implement pi... |
654 |
if (irq != -1) { |
7a043f119 xen: support pirq... |
655 656 |
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u ", |
d46a78b05 xen: implement pi... |
657 |
irq, gsi); |
420eb554d xen/event: Add re... |
658 |
goto out; |
d46a78b05 xen: implement pi... |
659 |
} |
c9df1ce58 xen: events: add ... |
660 |
irq = xen_allocate_irq_gsi(gsi); |
7bee97682 xen: events: prop... |
661 662 |
if (irq < 0) goto out; |
d46a78b05 xen: implement pi... |
663 |
|
d46a78b05 xen: implement pi... |
664 |
irq_op.irq = irq; |
b5401a96b xen/x86/PCI: Add ... |
665 666 667 668 669 670 671 |
irq_op.vector = 0; /* Only the privileged domain can do this. For non-priv, the pcifront * driver provides a PCI bus that does the call to do exactly * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { |
c9df1ce58 xen: events: add ... |
672 |
xen_free_irq(irq); |
d46a78b05 xen: implement pi... |
673 674 675 |
irq = -ENOSPC; goto out; } |
dec02dea1 xen: drop trackin... |
676 |
xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF, |
9158c3588 xen: events: turn... |
677 |
shareable ? PIRQ_SHAREABLE : 0); |
d46a78b05 xen: implement pi... |
678 |
|
7e186bdd0 xen: do not clear... |
679 680 |
pirq_query_unmask(irq); /* We try to use the handler with the appropriate semantic for the |
e5ac0bda9 xen: use the trig... |
681 682 |
* type of interrupt: if the interrupt is an edge triggered * interrupt we use handle_edge_irq. |
7e186bdd0 xen: do not clear... |
683 |
* |
e5ac0bda9 xen: use the trig... |
684 685 |
* On the other hand if the interrupt is level triggered we use * handle_fasteoi_irq like the native code does for this kind of |
7e186bdd0 xen: do not clear... |
686 |
* interrupts. |
e5ac0bda9 xen: use the trig... |
687 |
* |
7e186bdd0 xen: do not clear... |
688 689 690 691 692 693 694 |
* Depending on the Xen version, pirq_needs_eoi might return true * not only for level triggered interrupts but for edge triggered * interrupts too. In any case Xen always honors the eoi mechanism, * not injecting any more pirqs of the same kind if the first one * hasn't received an eoi yet. Therefore using the fasteoi handler * is the right choice either way. */ |
e5ac0bda9 xen: use the trig... |
695 |
if (shareable) |
7e186bdd0 xen: do not clear... |
696 697 698 699 700 |
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_fasteoi_irq, name); else irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); |
d46a78b05 xen: implement pi... |
701 |
out: |
773659483 xen/irq: Alter th... |
702 |
mutex_unlock(&irq_mapping_update_lock); |
d46a78b05 xen: implement pi... |
703 704 705 |
return irq; } |
f731e3ef0 xen: remap MSIs i... |
706 |
#ifdef CONFIG_PCI_MSI |
bf480d952 xen: events: sepa... |
707 |
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) |
cbf6aa89f xen:events: move ... |
708 |
{ |
5cad61a6b xen: events: assu... |
709 |
int rc; |
cbf6aa89f xen:events: move ... |
710 |
struct physdev_get_free_pirq op_get_free_pirq; |
cbf6aa89f xen:events: move ... |
711 |
|
bf480d952 xen: events: sepa... |
712 |
op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; |
cbf6aa89f xen:events: move ... |
713 |
rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); |
cbf6aa89f xen:events: move ... |
714 |
|
5cad61a6b xen: events: assu... |
715 716 717 718 719 |
WARN_ONCE(rc == -ENOSYS, "hypervisor does not support the PHYSDEVOP_get_free_pirq interface "); return rc ? -1 : op_get_free_pirq.pirq; |
cbf6aa89f xen:events: move ... |
720 |
} |
bf480d952 xen: events: sepa... |
721 |
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
dec02dea1 xen: drop trackin... |
722 |
int pirq, const char *name, domid_t domid) |
809f9267b xen: map MSIs int... |
723 |
{ |
bf480d952 xen: events: sepa... |
724 |
int irq, ret; |
4b41df7f6 xen: events: retu... |
725 |
|
773659483 xen/irq: Alter th... |
726 |
mutex_lock(&irq_mapping_update_lock); |
809f9267b xen: map MSIs int... |
727 |
|
4b41df7f6 xen: events: retu... |
728 |
irq = xen_allocate_irq_dynamic(); |
e6599225d xen/irq: If we fa... |
729 |
if (irq < 0) |
bb5d079ae xen: events: drop... |
730 |
goto out; |
809f9267b xen: map MSIs int... |
731 |
|
7e186bdd0 xen: do not clear... |
732 733 |
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); |
809f9267b xen: map MSIs int... |
734 |
|
dec02dea1 xen: drop trackin... |
735 |
xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0); |
5f6fb4546 Merge branch 'irq... |
736 |
ret = irq_set_msi_desc(irq, msidesc); |
bf480d952 xen: events: sepa... |
737 738 |
if (ret < 0) goto error_irq; |
809f9267b xen: map MSIs int... |
739 |
out: |
773659483 xen/irq: Alter th... |
740 |
mutex_unlock(&irq_mapping_update_lock); |
4b41df7f6 xen: events: retu... |
741 |
return irq; |
bf480d952 xen: events: sepa... |
742 |
error_irq: |
773659483 xen/irq: Alter th... |
743 |
mutex_unlock(&irq_mapping_update_lock); |
bf480d952 xen: events: sepa... |
744 |
xen_free_irq(irq); |
e6599225d xen/irq: If we fa... |
745 |
return ret; |
809f9267b xen: map MSIs int... |
746 |
} |
f731e3ef0 xen: remap MSIs i... |
747 |
#endif |
b5401a96b xen/x86/PCI: Add ... |
748 749 750 |
int xen_destroy_irq(int irq) { struct irq_desc *desc; |
38aa66fcb xen: remap GSIs a... |
751 752 |
struct physdev_unmap_pirq unmap_irq; struct irq_info *info = info_for_irq(irq); |
b5401a96b xen/x86/PCI: Add ... |
753 |
int rc = -ENOENT; |
773659483 xen/irq: Alter th... |
754 |
mutex_lock(&irq_mapping_update_lock); |
b5401a96b xen/x86/PCI: Add ... |
755 756 757 758 |
desc = irq_to_desc(irq); if (!desc) goto out; |
38aa66fcb xen: remap GSIs a... |
759 |
if (xen_initial_domain()) { |
123347157 xen/events: Use P... |
760 |
unmap_irq.pirq = info->u.pirq.pirq; |
beafbdc1d xen/irq: Check if... |
761 |
unmap_irq.domid = info->u.pirq.domid; |
38aa66fcb xen: remap GSIs a... |
762 |
rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); |
1eff1ad02 xen/irq: The Xen ... |
763 764 765 766 767 768 769 770 771 |
/* If another domain quits without making the pci_disable_msix * call, the Xen hypervisor takes care of freeing the PIRQs * (free_domain_pirqs). */ if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) printk(KERN_INFO "domain %d does not have %d anymore ", info->u.pirq.domid, info->u.pirq.pirq); else if (rc) { |
38aa66fcb xen: remap GSIs a... |
772 773 774 775 776 |
printk(KERN_WARNING "unmap irq failed %d ", rc); goto out; } } |
b5401a96b xen/x86/PCI: Add ... |
777 |
|
c9df1ce58 xen: events: add ... |
778 |
xen_free_irq(irq); |
b5401a96b xen/x86/PCI: Add ... |
779 780 |
out: |
773659483 xen/irq: Alter th... |
781 |
mutex_unlock(&irq_mapping_update_lock); |
b5401a96b xen/x86/PCI: Add ... |
782 783 |
return rc; } |
af42b8d12 xen: fix MSI setu... |
784 |
int xen_irq_from_pirq(unsigned pirq) |
d46a78b05 xen: implement pi... |
785 |
{ |
69c358ce3 xen: events: remo... |
786 |
int irq; |
d46a78b05 xen: implement pi... |
787 |
|
69c358ce3 xen: events: remo... |
788 |
struct irq_info *info; |
e46cdb66c xen: event channels |
789 |
|
773659483 xen/irq: Alter th... |
790 |
mutex_lock(&irq_mapping_update_lock); |
69c358ce3 xen: events: remo... |
791 792 |
list_for_each_entry(info, &xen_irq_list_head, list) { |
9bb9efe4b xen/events: Don't... |
793 |
if (info->type != IRQT_PIRQ) |
69c358ce3 xen: events: remo... |
794 795 796 797 798 799 800 |
continue; irq = info->irq; if (info->u.pirq.pirq == pirq) goto out; } irq = -1; out: |
773659483 xen/irq: Alter th... |
801 |
mutex_unlock(&irq_mapping_update_lock); |
69c358ce3 xen: events: remo... |
802 803 |
return irq; |
af42b8d12 xen: fix MSI setu... |
804 |
} |
e6197acc7 xen/irq: Export '... |
805 806 807 808 809 810 |
int xen_pirq_from_irq(unsigned irq) { return pirq_from_irq(irq); } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); |
b536b4b96 xen: use the hvc ... |
811 |
int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66c xen: event channels |
812 813 |
{ int irq; |
773659483 xen/irq: Alter th... |
814 |
mutex_lock(&irq_mapping_update_lock); |
e46cdb66c xen: event channels |
815 816 817 818 |
irq = evtchn_to_irq[evtchn]; if (irq == -1) { |
c9df1ce58 xen: events: add ... |
819 |
irq = xen_allocate_irq_dynamic(); |
68ba45ff3 xen: fix error ha... |
820 |
if (irq < 0) |
7bee97682 xen: events: prop... |
821 |
goto out; |
e46cdb66c xen: event channels |
822 |
|
c442b8068 xen: Cleanup geni... |
823 |
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
7e186bdd0 xen: do not clear... |
824 |
handle_edge_irq, "event"); |
e46cdb66c xen: event channels |
825 |
|
9158c3588 xen: events: turn... |
826 |
xen_irq_info_evtchn_init(irq, evtchn); |
5e152e6c4 xen/events: Add W... |
827 828 829 |
} else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); |
e46cdb66c xen: event channels |
830 |
} |
7bee97682 xen: events: prop... |
831 |
out: |
773659483 xen/irq: Alter th... |
832 |
mutex_unlock(&irq_mapping_update_lock); |
e46cdb66c xen: event channels |
833 834 835 |
return irq; } |
b536b4b96 xen: use the hvc ... |
836 |
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66c xen: event channels |
837 |
|
f87e4cac4 xen: SMP guest su... |
838 839 840 841 |
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; int evtchn, irq; |
773659483 xen/irq: Alter th... |
842 |
mutex_lock(&irq_mapping_update_lock); |
f87e4cac4 xen: SMP guest su... |
843 844 |
irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
90af9514a xen: explicitly i... |
845 |
|
f87e4cac4 xen: SMP guest su... |
846 |
if (irq == -1) { |
c9df1ce58 xen: events: add ... |
847 |
irq = xen_allocate_irq_dynamic(); |
f87e4cac4 xen: SMP guest su... |
848 849 |
if (irq < 0) goto out; |
c442b8068 xen: Cleanup geni... |
850 |
irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
aaca49642 xen: use percpu i... |
851 |
handle_percpu_irq, "ipi"); |
f87e4cac4 xen: SMP guest su... |
852 853 854 855 856 857 |
bind_ipi.vcpu = cpu; if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; |
3d4cfa373 xen: events: push... |
858 |
xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
f87e4cac4 xen: SMP guest su... |
859 860 |
bind_evtchn_to_cpu(evtchn, cpu); |
5e152e6c4 xen/events: Add W... |
861 862 863 |
} else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_IPI); |
f87e4cac4 xen: SMP guest su... |
864 |
} |
f87e4cac4 xen: SMP guest su... |
865 |
out: |
773659483 xen/irq: Alter th... |
866 |
mutex_unlock(&irq_mapping_update_lock); |
f87e4cac4 xen: SMP guest su... |
867 868 |
return irq; } |
2e820f58f xen/irq: implemen... |
869 870 871 872 873 874 875 876 877 878 879 880 881 882 |
static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, unsigned int remote_port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = remote_domain; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); } |
62cc5fc7b xen/pv-on-hvm kex... |
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 |
static int find_virq(unsigned int virq, unsigned int cpu) { struct evtchn_status status; int port, rc = -ENOENT; memset(&status, 0, sizeof(status)); for (port = 0; port <= NR_EVENT_CHANNELS; port++) { status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); if (rc < 0) continue; if (status.status != EVTCHNSTAT_virq) continue; if (status.u.virq == virq && status.vcpu == cpu) { rc = port; break; } } return rc; } |
f87e4cac4 xen: SMP guest su... |
904 |
|
4fe7d5a70 xen: make hvc_xen... |
905 |
int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
e46cdb66c xen: event channels |
906 907 |
{ struct evtchn_bind_virq bind_virq; |
62cc5fc7b xen/pv-on-hvm kex... |
908 |
int evtchn, irq, ret; |
e46cdb66c xen: event channels |
909 |
|
773659483 xen/irq: Alter th... |
910 |
mutex_lock(&irq_mapping_update_lock); |
e46cdb66c xen: event channels |
911 912 913 914 |
irq = per_cpu(virq_to_irq, cpu)[virq]; if (irq == -1) { |
c9df1ce58 xen: events: add ... |
915 |
irq = xen_allocate_irq_dynamic(); |
68ba45ff3 xen: fix error ha... |
916 |
if (irq < 0) |
7bee97682 xen: events: prop... |
917 |
goto out; |
a52521f14 xen: set up IRQ b... |
918 |
|
c442b8068 xen: Cleanup geni... |
919 |
irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
a52521f14 xen: set up IRQ b... |
920 |
handle_percpu_irq, "virq"); |
e46cdb66c xen: event channels |
921 922 |
bind_virq.virq = virq; bind_virq.vcpu = cpu; |
62cc5fc7b xen/pv-on-hvm kex... |
923 924 925 926 927 928 929 930 931 932 |
ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (ret == 0) evtchn = bind_virq.port; else { if (ret == -EEXIST) ret = find_virq(virq, cpu); BUG_ON(ret < 0); evtchn = ret; } |
e46cdb66c xen: event channels |
933 |
|
3d4cfa373 xen: events: push... |
934 |
xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
e46cdb66c xen: event channels |
935 936 |
bind_evtchn_to_cpu(evtchn, cpu); |
5e152e6c4 xen/events: Add W... |
937 938 939 |
} else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_VIRQ); |
e46cdb66c xen: event channels |
940 |
} |
7bee97682 xen: events: prop... |
941 |
out: |
773659483 xen/irq: Alter th... |
942 |
mutex_unlock(&irq_mapping_update_lock); |
e46cdb66c xen: event channels |
943 944 945 946 947 948 949 950 |
return irq; } static void unbind_from_irq(unsigned int irq) { struct evtchn_close close; int evtchn = evtchn_from_irq(irq); |
420eb554d xen/event: Add re... |
951 |
struct irq_info *info = irq_get_handler_data(irq); |
e46cdb66c xen: event channels |
952 |
|
94032c506 xen/events: Check... |
953 954 |
if (WARN_ON(!info)) return; |
773659483 xen/irq: Alter th... |
955 |
mutex_lock(&irq_mapping_update_lock); |
e46cdb66c xen: event channels |
956 |
|
420eb554d xen/event: Add re... |
957 958 959 960 961 |
if (info->refcnt > 0) { info->refcnt--; if (info->refcnt != 0) goto done; } |
d77bbd4db xen: remove irq b... |
962 |
if (VALID_EVTCHN(evtchn)) { |
e46cdb66c xen: event channels |
963 964 965 966 967 968 969 |
close.port = evtchn; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); switch (type_from_irq(irq)) { case IRQT_VIRQ: per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
ced40d0f3 xen: pack all irq... |
970 |
[virq_from_irq(irq)] = -1; |
e46cdb66c xen: event channels |
971 |
break; |
d68d82afd xen: implement CP... |
972 973 |
case IRQT_IPI: per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) |
ced40d0f3 xen: pack all irq... |
974 |
[ipi_from_irq(irq)] = -1; |
d68d82afd xen: implement CP... |
975 |
break; |
e46cdb66c xen: event channels |
976 977 978 979 980 981 982 983 |
default: break; } /* Closed ports are implicitly re-bound to VCPU0. */ bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = -1; |
fed5ea87e xen: don't leak I... |
984 |
} |
ca62ce8cd xen: events: dyna... |
985 |
BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); |
e46cdb66c xen: event channels |
986 |
|
9158c3588 xen: events: turn... |
987 |
xen_free_irq(irq); |
e46cdb66c xen: event channels |
988 |
|
420eb554d xen/event: Add re... |
989 |
done: |
773659483 xen/irq: Alter th... |
990 |
mutex_unlock(&irq_mapping_update_lock); |
e46cdb66c xen: event channels |
991 992 993 |
} int bind_evtchn_to_irqhandler(unsigned int evtchn, |
7c2399756 [SPARC, XEN, NET/... |
994 |
irq_handler_t handler, |
e46cdb66c xen: event channels |
995 996 997 |
unsigned long irqflags, const char *devname, void *dev_id) { |
361ae8cb5 xen: events: fix ... |
998 |
int irq, retval; |
e46cdb66c xen: event channels |
999 1000 |
irq = bind_evtchn_to_irq(evtchn); |
7bee97682 xen: events: prop... |
1001 1002 |
if (irq < 0) return irq; |
e46cdb66c xen: event channels |
1003 1004 1005 1006 1007 1008 1009 1010 1011 |
retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); |
2e820f58f xen/irq: implemen... |
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 |
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); |
e46cdb66c xen: event channels |
1034 |
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
7c2399756 [SPARC, XEN, NET/... |
1035 |
irq_handler_t handler, |
e46cdb66c xen: event channels |
1036 1037 |
unsigned long irqflags, const char *devname, void *dev_id) { |
361ae8cb5 xen: events: fix ... |
1038 |
int irq, retval; |
e46cdb66c xen: event channels |
1039 1040 |
irq = bind_virq_to_irq(virq, cpu); |
7bee97682 xen: events: prop... |
1041 1042 |
if (irq < 0) return irq; |
e46cdb66c xen: event channels |
1043 1044 1045 1046 1047 1048 1049 1050 1051 |
retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); |
f87e4cac4 xen: SMP guest su... |
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 |
int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_ipi_to_irq(ipi, cpu); if (irq < 0) return irq; |
9bab0b7fb genirq: Add IRQF_... |
1064 |
irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; |
f87e4cac4 xen: SMP guest su... |
1065 1066 1067 1068 1069 1070 1071 1072 |
retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } |
e46cdb66c xen: event channels |
1073 1074 |
void unbind_from_irqhandler(unsigned int irq, void *dev_id) { |
94032c506 xen/events: Check... |
1075 1076 1077 1078 |
struct irq_info *info = irq_get_handler_data(irq); if (WARN_ON(!info)) return; |
e46cdb66c xen: event channels |
1079 1080 1081 1082 |
free_irq(irq, dev_id); unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(unbind_from_irqhandler); |
420eb554d xen/event: Add re... |
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 |
int evtchn_make_refcounted(unsigned int evtchn) { int irq = evtchn_to_irq[evtchn]; struct irq_info *info; if (irq == -1) return -ENOENT; info = irq_get_handler_data(irq); if (!info) return -ENOENT; WARN_ON(info->refcnt != -1); info->refcnt = 1; return 0; } EXPORT_SYMBOL_GPL(evtchn_make_refcounted); int evtchn_get(unsigned int evtchn) { int irq; struct irq_info *info; int err = -ENOENT; |
c3b3f16d1 xen/events: preve... |
1109 1110 |
if (evtchn >= NR_EVENT_CHANNELS) return -EINVAL; |
420eb554d xen/event: Add re... |
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 |
mutex_lock(&irq_mapping_update_lock); irq = evtchn_to_irq[evtchn]; if (irq == -1) goto done; info = irq_get_handler_data(irq); if (!info) goto done; err = -EINVAL; if (info->refcnt <= 0) goto done; info->refcnt++; err = 0; done: mutex_unlock(&irq_mapping_update_lock); return err; } EXPORT_SYMBOL_GPL(evtchn_get); void evtchn_put(unsigned int evtchn) { int irq = evtchn_to_irq[evtchn]; if (WARN_ON(irq == -1)) return; unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(evtchn_put); |
f87e4cac4 xen: SMP guest su... |
1143 1144 1145 1146 1147 1148 |
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { int irq = per_cpu(ipi_to_irq, cpu)[vector]; BUG_ON(irq < 0); notify_remote_via_irq(irq); } |
ee523ca1e xen: implement a ... |
1149 1150 1151 1152 |
irqreturn_t xen_debug_interrupt(int irq, void *dev_id) { struct shared_info *sh = HYPERVISOR_shared_info; int cpu = smp_processor_id(); |
c81611c4e xen: event channe... |
1153 |
xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); |
ee523ca1e xen: implement a ... |
1154 1155 1156 |
int i; unsigned long flags; static DEFINE_SPINLOCK(debug_lock); |
cb52e6d9e xen: improvements... |
1157 |
struct vcpu_info *v; |
ee523ca1e xen: implement a ... |
1158 1159 |
spin_lock_irqsave(&debug_lock, flags); |
cb52e6d9e xen: improvements... |
1160 1161 1162 |
printk(" vcpu %d ", cpu); |
ee523ca1e xen: implement a ... |
1163 1164 |
for_each_online_cpu(i) { |
cb52e6d9e xen: improvements... |
1165 1166 1167 1168 1169 |
int pending; v = per_cpu(xen_vcpu, i); pending = (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask; |
c81611c4e xen: event channe... |
1170 1171 |
printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong" ", i, |
cb52e6d9e xen: improvements... |
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 |
pending, v->evtchn_upcall_pending, (int)(sizeof(v->evtchn_pending_sel)*2), v->evtchn_pending_sel); } v = per_cpu(xen_vcpu, cpu); printk(" pending: "); for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) |
c81611c4e xen: event channe... |
1182 1183 |
printk("%0*"PRI_xen_ulong"%s", (int)sizeof(sh->evtchn_pending[0])*2, |
cb52e6d9e xen: improvements... |
1184 1185 1186 1187 1188 1189 1190 |
sh->evtchn_pending[i], i % 8 == 0 ? " " : " "); printk(" global mask: "); for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
c81611c4e xen: event channe... |
1191 |
printk("%0*"PRI_xen_ulong"%s", |
cb52e6d9e xen: improvements... |
1192 1193 1194 1195 1196 1197 1198 1199 1200 |
(int)(sizeof(sh->evtchn_mask[0])*2), sh->evtchn_mask[i], i % 8 == 0 ? " " : " "); printk(" globally unmasked: "); for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
c81611c4e xen: event channe... |
1201 1202 |
printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(sh->evtchn_mask[0])*2), |
cb52e6d9e xen: improvements... |
1203 1204 1205 1206 1207 1208 1209 |
sh->evtchn_pending[i] & ~sh->evtchn_mask[i], i % 8 == 0 ? " " : " "); printk(" local cpu%d mask: ", cpu); |
c81611c4e xen: event channe... |
1210 1211 |
for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--) printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2), |
cb52e6d9e xen: improvements... |
1212 1213 1214 1215 1216 1217 1218 1219 |
cpu_evtchn[i], i % 8 == 0 ? " " : " "); printk(" locally unmasked: "); for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { |
c81611c4e xen: event channe... |
1220 |
xen_ulong_t pending = sh->evtchn_pending[i] |
cb52e6d9e xen: improvements... |
1221 1222 |
& ~sh->evtchn_mask[i] & cpu_evtchn[i]; |
c81611c4e xen: event channe... |
1223 1224 |
printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(sh->evtchn_mask[0])*2), |
cb52e6d9e xen: improvements... |
1225 1226 |
pending, i % 8 == 0 ? " " : " "); |
ee523ca1e xen: implement a ... |
1227 |
} |
ee523ca1e xen: implement a ... |
1228 1229 1230 1231 |
printk(" pending list: "); |
cb52e6d9e xen: improvements... |
1232 |
for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
c81611c4e xen: event channe... |
1233 1234 |
if (sync_test_bit(i, BM(sh->evtchn_pending))) { int word_idx = i / BITS_PER_EVTCHN_WORD; |
cb52e6d9e xen: improvements... |
1235 1236 |
printk(" %d: event %d -> irq %d%s%s%s ", |
ced40d0f3 xen: pack all irq... |
1237 |
cpu_from_evtchn(i), i, |
cb52e6d9e xen: improvements... |
1238 |
evtchn_to_irq[i], |
c81611c4e xen: event channe... |
1239 |
sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) |
cb52e6d9e xen: improvements... |
1240 |
? "" : " l2-clear", |
c81611c4e xen: event channe... |
1241 |
!sync_test_bit(i, BM(sh->evtchn_mask)) |
cb52e6d9e xen: improvements... |
1242 |
? "" : " globally-masked", |
c81611c4e xen: event channe... |
1243 |
sync_test_bit(i, BM(cpu_evtchn)) |
cb52e6d9e xen: improvements... |
1244 |
? "" : " locally-masked"); |
ee523ca1e xen: implement a ... |
1245 1246 1247 1248 1249 1250 1251 |
} } spin_unlock_irqrestore(&debug_lock, flags); return IRQ_HANDLED; } |
245b2e70e percpu: clean up ... |
1252 |
static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
ada6814c8 xen: events: Clea... |
1253 1254 |
static DEFINE_PER_CPU(unsigned int, current_word_idx); static DEFINE_PER_CPU(unsigned int, current_bit_idx); |
245b2e70e percpu: clean up ... |
1255 |
|
e46cdb66c xen: event channels |
1256 |
/* |
ab7f863e4 xen: events: Proc... |
1257 1258 |
* Mask out the i least significant bits of w */ |
c81611c4e xen: event channe... |
1259 |
#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i)) |
245b2e70e percpu: clean up ... |
1260 |
|
e46cdb66c xen: event channels |
1261 1262 1263 1264 1265 1266 1267 1268 1269 |
/* * Search the CPUs pending events bitmasks. For each one found, map * the event number to an irq, and feed it into do_IRQ() for * handling. * * Xen uses a two-level bitmap to speed searching. The first level is * a bitset of words which contain pending event bits. The second * level is a bitset of pending events themselves. */ |
38e20b07e x86/xen: event ch... |
1270 |
static void __xen_evtchn_do_upcall(void) |
e46cdb66c xen: event channels |
1271 |
{ |
24b51c2f2 xen: events: Make... |
1272 |
int start_word_idx, start_bit_idx; |
ab7f863e4 xen: events: Proc... |
1273 |
int word_idx, bit_idx; |
bee980d9e xen/events: Handl... |
1274 |
int i, irq; |
e46cdb66c xen: event channels |
1275 1276 |
int cpu = get_cpu(); struct shared_info *s = HYPERVISOR_shared_info; |
780f36d8b xen: Use this_cpu... |
1277 |
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
088c05a84 Xen: fix whitespa... |
1278 |
unsigned count; |
e46cdb66c xen: event channels |
1279 |
|
229664bee xen: short-cut fo... |
1280 |
do { |
c81611c4e xen: event channe... |
1281 |
xen_ulong_t pending_words; |
bee980d9e xen/events: Handl... |
1282 1283 |
xen_ulong_t pending_bits; struct irq_desc *desc; |
e46cdb66c xen: event channels |
1284 |
|
229664bee xen: short-cut fo... |
1285 |
vcpu_info->evtchn_upcall_pending = 0; |
e46cdb66c xen: event channels |
1286 |
|
b2e4ae697 xen: Use this_cpu... |
1287 |
if (__this_cpu_inc_return(xed_nesting_count) - 1) |
229664bee xen: short-cut fo... |
1288 |
goto out; |
e46cdb66c xen: event channels |
1289 |
|
c81611c4e xen: event channe... |
1290 1291 1292 1293 1294 |
/* * Master flag must be cleared /before/ clearing * selector flag. xchg_xen_ulong must contain an * appropriate barrier. */ |
bee980d9e xen/events: Handl... |
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 |
if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { int evtchn = evtchn_from_irq(irq); word_idx = evtchn / BITS_PER_LONG; pending_bits = evtchn % BITS_PER_LONG; if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { desc = irq_to_desc(irq); if (desc) generic_handle_irq_desc(irq, desc); } } |
c81611c4e xen: event channe... |
1305 |
pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); |
ab7f863e4 xen: events: Proc... |
1306 |
|
24b51c2f2 xen: events: Make... |
1307 1308 1309 1310 |
start_word_idx = __this_cpu_read(current_word_idx); start_bit_idx = __this_cpu_read(current_bit_idx); word_idx = start_word_idx; |
ab7f863e4 xen: events: Proc... |
1311 |
|
24b51c2f2 xen: events: Make... |
1312 |
for (i = 0; pending_words != 0; i++) { |
c81611c4e xen: event channe... |
1313 |
xen_ulong_t words; |
229664bee xen: short-cut fo... |
1314 |
|
ab7f863e4 xen: events: Proc... |
1315 1316 1317 |
words = MASK_LSBS(pending_words, word_idx); /* |
ada6814c8 xen: events: Clea... |
1318 |
* If we masked out all events, wrap to beginning. |
ab7f863e4 xen: events: Proc... |
1319 1320 |
*/ if (words == 0) { |
ada6814c8 xen: events: Clea... |
1321 1322 |
word_idx = 0; bit_idx = 0; |
ab7f863e4 xen: events: Proc... |
1323 1324 |
continue; } |
c81611c4e xen: event channe... |
1325 |
word_idx = EVTCHN_FIRST_BIT(words); |
229664bee xen: short-cut fo... |
1326 |
|
24b51c2f2 xen: events: Make... |
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 |
pending_bits = active_evtchns(cpu, s, word_idx); bit_idx = 0; /* usually scan entire word from start */ if (word_idx == start_word_idx) { /* We scan the starting word in two parts */ if (i == 0) /* 1st time: start in the middle */ bit_idx = start_bit_idx; else /* 2nd time: mask bits done already */ bit_idx &= (1UL << start_bit_idx) - 1; } |
ab7f863e4 xen: events: Proc... |
1338 |
do { |
c81611c4e xen: event channe... |
1339 |
xen_ulong_t bits; |
bee980d9e xen/events: Handl... |
1340 |
int port; |
229664bee xen: short-cut fo... |
1341 |
|
ab7f863e4 xen: events: Proc... |
1342 1343 1344 |
bits = MASK_LSBS(pending_bits, bit_idx); /* If we masked out all events, move on. */ |
ada6814c8 xen: events: Clea... |
1345 |
if (bits == 0) |
ab7f863e4 xen: events: Proc... |
1346 |
break; |
ab7f863e4 xen: events: Proc... |
1347 |
|
c81611c4e xen: event channe... |
1348 |
bit_idx = EVTCHN_FIRST_BIT(bits); |
ab7f863e4 xen: events: Proc... |
1349 1350 |
/* Process port. */ |
c81611c4e xen: event channe... |
1351 |
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; |
ab7f863e4 xen: events: Proc... |
1352 |
irq = evtchn_to_irq[port]; |
ca4dbc668 xen: Remove unnec... |
1353 1354 1355 1356 1357 |
if (irq != -1) { desc = irq_to_desc(irq); if (desc) generic_handle_irq_desc(irq, desc); } |
ab7f863e4 xen: events: Proc... |
1358 |
|
c81611c4e xen: event channe... |
1359 |
bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD; |
ada6814c8 xen: events: Clea... |
1360 1361 1362 1363 |
/* Next caller starts at last processed + 1 */ __this_cpu_write(current_word_idx, bit_idx ? word_idx : |
c81611c4e xen: event channe... |
1364 |
(word_idx+1) % BITS_PER_EVTCHN_WORD); |
ada6814c8 xen: events: Clea... |
1365 1366 |
__this_cpu_write(current_bit_idx, bit_idx); } while (bit_idx != 0); |
ab7f863e4 xen: events: Proc... |
1367 |
|
24b51c2f2 xen: events: Make... |
1368 1369 |
/* Scan start_l1i twice; all others once. */ if ((word_idx != start_word_idx) || (i != 0)) |
ab7f863e4 xen: events: Proc... |
1370 |
pending_words &= ~(1UL << word_idx); |
ada6814c8 xen: events: Clea... |
1371 |
|
c81611c4e xen: event channe... |
1372 |
word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD; |
e46cdb66c xen: event channels |
1373 |
} |
e46cdb66c xen: event channels |
1374 |
|
229664bee xen: short-cut fo... |
1375 |
BUG_ON(!irqs_disabled()); |
780f36d8b xen: Use this_cpu... |
1376 1377 |
count = __this_cpu_read(xed_nesting_count); __this_cpu_write(xed_nesting_count, 0); |
183d03cc4 xen: Xen PCI plat... |
1378 |
} while (count != 1 || vcpu_info->evtchn_upcall_pending); |
229664bee xen: short-cut fo... |
1379 1380 |
out: |
38e20b07e x86/xen: event ch... |
1381 1382 1383 1384 1385 1386 1387 |
put_cpu(); } void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); |
772aebcef xen/events: fix R... |
1388 |
irq_enter(); |
0ec53ecf3 xen/arm: receive ... |
1389 |
#ifdef CONFIG_X86 |
38e20b07e x86/xen: event ch... |
1390 |
exit_idle(); |
0ec53ecf3 xen/arm: receive ... |
1391 |
#endif |
38e20b07e x86/xen: event ch... |
1392 1393 |
__xen_evtchn_do_upcall(); |
3445a8fd7 xen: make sure th... |
1394 1395 |
irq_exit(); set_irq_regs(old_regs); |
38e20b07e x86/xen: event ch... |
1396 |
} |
3445a8fd7 xen: make sure th... |
1397 |
|
38e20b07e x86/xen: event ch... |
1398 1399 1400 |
void xen_hvm_evtchn_do_upcall(void) { __xen_evtchn_do_upcall(); |
e46cdb66c xen: event channels |
1401 |
} |
183d03cc4 xen: Xen PCI plat... |
1402 |
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); |
e46cdb66c xen: event channels |
1403 |
|
eb1e305f4 xen: add rebind_e... |
1404 1405 1406 |
/* Rebind a new event channel to an existing irq. */ void rebind_evtchn_irq(int evtchn, int irq) { |
d77bbd4db xen: remove irq b... |
1407 |
struct irq_info *info = info_for_irq(irq); |
94032c506 xen/events: Check... |
1408 1409 |
if (WARN_ON(!info)) return; |
eb1e305f4 xen: add rebind_e... |
1410 1411 1412 |
/* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); |
773659483 xen/irq: Alter th... |
1413 |
mutex_lock(&irq_mapping_update_lock); |
eb1e305f4 xen: add rebind_e... |
1414 1415 1416 1417 |
/* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(evtchn_to_irq[evtchn] != -1); /* Expect irq to have been bound before, |
d77bbd4db xen: remove irq b... |
1418 1419 |
so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); |
eb1e305f4 xen: add rebind_e... |
1420 |
|
9158c3588 xen: events: turn... |
1421 |
xen_irq_info_evtchn_init(irq, evtchn); |
eb1e305f4 xen: add rebind_e... |
1422 |
|
773659483 xen/irq: Alter th... |
1423 |
mutex_unlock(&irq_mapping_update_lock); |
eb1e305f4 xen: add rebind_e... |
1424 1425 |
/* new event channels are always bound to cpu 0 */ |
0de26520c cpumask: make irq... |
1426 |
irq_set_affinity(irq, cpumask_of(0)); |
eb1e305f4 xen: add rebind_e... |
1427 1428 1429 1430 |
/* Unmask the event channel. */ enable_irq(irq); } |
e46cdb66c xen: event channels |
1431 |
/* Rebind an evtchn so that it gets delivered to a specific cpu */ |
d5dedd450 irq: change ->set... |
1432 |
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
e46cdb66c xen: event channels |
1433 |
{ |
131cb95fd xen/events: mask ... |
1434 |
struct shared_info *s = HYPERVISOR_shared_info; |
e46cdb66c xen: event channels |
1435 1436 |
struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); |
131cb95fd xen/events: mask ... |
1437 |
int masked; |
e46cdb66c xen: event channels |
1438 |
|
be49472f0 xen: events: sepa... |
1439 1440 1441 1442 1443 1444 1445 1446 |
if (!VALID_EVTCHN(evtchn)) return -1; /* * Events delivered via platform PCI interrupts are always * routed to vcpu 0 and hence cannot be rebound. */ if (xen_hvm_domain() && !xen_have_vector_callback) |
d5dedd450 irq: change ->set... |
1447 |
return -1; |
e46cdb66c xen: event channels |
1448 1449 1450 1451 1452 1453 |
/* Send future instances of this interrupt to other vcpu. */ bind_vcpu.port = evtchn; bind_vcpu.vcpu = tcpu; /* |
131cb95fd xen/events: mask ... |
1454 1455 1456 1457 1458 1459 |
* Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); /* |
e46cdb66c xen: event channels |
1460 1461 1462 1463 1464 1465 |
* If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); |
e46cdb66c xen: event channels |
1466 |
|
131cb95fd xen/events: mask ... |
1467 1468 |
if (!masked) unmask_evtchn(evtchn); |
d5dedd450 irq: change ->set... |
1469 1470 |
return 0; } |
e46cdb66c xen: event channels |
1471 |
|
c9e265e03 xen: Switch to ne... |
1472 1473 |
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) |
e46cdb66c xen: event channels |
1474 |
{ |
0de26520c cpumask: make irq... |
1475 |
unsigned tcpu = cpumask_first(dest); |
d5dedd450 irq: change ->set... |
1476 |
|
c9e265e03 xen: Switch to ne... |
1477 |
return rebind_irq_to_cpu(data->irq, tcpu); |
e46cdb66c xen: event channels |
1478 |
} |
642e0c882 xen: add resend_i... |
1479 1480 1481 1482 1483 1484 1485 |
int resend_irq_on_evtchn(unsigned int irq) { int masked, evtchn = evtchn_from_irq(irq); struct shared_info *s = HYPERVISOR_shared_info; if (!VALID_EVTCHN(evtchn)) return 1; |
c81611c4e xen: event channe... |
1486 1487 |
masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); sync_set_bit(evtchn, BM(s->evtchn_pending)); |
642e0c882 xen: add resend_i... |
1488 1489 1490 1491 1492 |
if (!masked) unmask_evtchn(evtchn); return 1; } |
c9e265e03 xen: Switch to ne... |
1493 |
static void enable_dynirq(struct irq_data *data) |
e46cdb66c xen: event channels |
1494 |
{ |
c9e265e03 xen: Switch to ne... |
1495 |
int evtchn = evtchn_from_irq(data->irq); |
e46cdb66c xen: event channels |
1496 1497 1498 1499 |
if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } |
c9e265e03 xen: Switch to ne... |
1500 |
static void disable_dynirq(struct irq_data *data) |
e46cdb66c xen: event channels |
1501 |
{ |
c9e265e03 xen: Switch to ne... |
1502 |
int evtchn = evtchn_from_irq(data->irq); |
e46cdb66c xen: event channels |
1503 1504 1505 1506 |
if (VALID_EVTCHN(evtchn)) mask_evtchn(evtchn); } |
c9e265e03 xen: Switch to ne... |
1507 |
static void ack_dynirq(struct irq_data *data) |
e46cdb66c xen: event channels |
1508 |
{ |
c9e265e03 xen: Switch to ne... |
1509 |
int evtchn = evtchn_from_irq(data->irq); |
e46cdb66c xen: event channels |
1510 |
|
7e186bdd0 xen: do not clear... |
1511 |
irq_move_irq(data); |
e46cdb66c xen: event channels |
1512 1513 |
if (VALID_EVTCHN(evtchn)) |
7e186bdd0 xen: do not clear... |
1514 1515 1516 1517 1518 1519 1520 |
clear_evtchn(evtchn); } static void mask_ack_dynirq(struct irq_data *data) { disable_dynirq(data); ack_dynirq(data); |
e46cdb66c xen: event channels |
1521 |
} |
c9e265e03 xen: Switch to ne... |
1522 |
static int retrigger_dynirq(struct irq_data *data) |
e46cdb66c xen: event channels |
1523 |
{ |
c9e265e03 xen: Switch to ne... |
1524 |
int evtchn = evtchn_from_irq(data->irq); |
ee8fa1c67 xen: make sure re... |
1525 |
struct shared_info *sh = HYPERVISOR_shared_info; |
e46cdb66c xen: event channels |
1526 1527 1528 |
int ret = 0; if (VALID_EVTCHN(evtchn)) { |
ee8fa1c67 xen: make sure re... |
1529 |
int masked; |
c81611c4e xen: event channe... |
1530 1531 |
masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask)); sync_set_bit(evtchn, BM(sh->evtchn_pending)); |
ee8fa1c67 xen: make sure re... |
1532 1533 |
if (!masked) unmask_evtchn(evtchn); |
e46cdb66c xen: event channels |
1534 1535 1536 1537 1538 |
ret = 1; } return ret; } |
0a85226ff xen: events: rena... |
1539 |
static void restore_pirqs(void) |
9a069c33c xen: fix save/res... |
1540 1541 1542 |
{ int pirq, rc, irq, gsi; struct physdev_map_pirq map_irq; |
69c358ce3 xen: events: remo... |
1543 |
struct irq_info *info; |
9a069c33c xen: fix save/res... |
1544 |
|
69c358ce3 xen: events: remo... |
1545 1546 |
list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) |
9a069c33c xen: fix save/res... |
1547 |
continue; |
69c358ce3 xen: events: remo... |
1548 1549 1550 |
pirq = info->u.pirq.pirq; gsi = info->u.pirq.gsi; irq = info->irq; |
9a069c33c xen: fix save/res... |
1551 1552 |
/* save/restore of PT devices doesn't work, so at this point the * only devices present are GSI based emulated devices */ |
9a069c33c xen: fix save/res... |
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 |
if (!gsi) continue; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d ", gsi, irq, pirq, rc); |
9158c3588 xen: events: turn... |
1566 |
xen_free_irq(irq); |
9a069c33c xen: fix save/res... |
1567 1568 1569 1570 1571 |
continue; } printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d ", irq, map_irq.pirq); |
c9e265e03 xen: Switch to ne... |
1572 |
__startup_pirq(irq); |
9a069c33c xen: fix save/res... |
1573 1574 |
} } |
0e91398f2 xen: implement sa... |
1575 1576 1577 1578 1579 1580 1581 1582 |
static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; int virq, irq, evtchn; for (virq = 0; virq < NR_VIRQS; virq++) { if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; |
ced40d0f3 xen: pack all irq... |
1583 |
BUG_ON(virq_from_irq(irq) != virq); |
0e91398f2 xen: implement sa... |
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 |
/* Get a new binding from Xen. */ bind_virq.virq = virq; bind_virq.vcpu = cpu; if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0) BUG(); evtchn = bind_virq.port; /* Record the new mapping. */ |
3d4cfa373 xen: events: push... |
1594 |
xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
0e91398f2 xen: implement sa... |
1595 |
bind_evtchn_to_cpu(evtchn, cpu); |
0e91398f2 xen: implement sa... |
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 |
} } static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; int ipi, irq, evtchn; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; |
ced40d0f3 xen: pack all irq... |
1607 |
BUG_ON(ipi_from_irq(irq) != ipi); |
0e91398f2 xen: implement sa... |
1608 1609 1610 1611 1612 1613 1614 1615 1616 |
/* Get a new binding from Xen. */ bind_ipi.vcpu = cpu; if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; /* Record the new mapping. */ |
3d4cfa373 xen: events: push... |
1617 |
xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
0e91398f2 xen: implement sa... |
1618 |
bind_evtchn_to_cpu(evtchn, cpu); |
0e91398f2 xen: implement sa... |
1619 1620 |
} } |
2d9e1e2f5 xen: implement Xe... |
1621 1622 1623 1624 1625 1626 1627 1628 |
/* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { int evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) clear_evtchn(evtchn); } |
d9a8814f2 xen: Provide a va... |
1629 |
EXPORT_SYMBOL(xen_clear_irq_pending); |
168d2f464 xen: save previou... |
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 |
void xen_set_irq_pending(int irq) { int evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) set_evtchn(evtchn); } bool xen_test_irq_pending(int irq) { int evtchn = evtchn_from_irq(irq); bool ret = false; if (VALID_EVTCHN(evtchn)) ret = test_evtchn(evtchn); return ret; } |
d9a8814f2 xen: Provide a va... |
1648 1649 1650 |
/* Poll waiting for an irq to become pending with timeout. In the usual case, * the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq_timeout(int irq, u64 timeout) |
2d9e1e2f5 xen: implement Xe... |
1651 1652 1653 1654 1655 1656 1657 |
{ evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) { struct sched_poll poll; poll.nr_ports = 1; |
d9a8814f2 xen: Provide a va... |
1658 |
poll.timeout = timeout; |
ff3c53629 xen: compilation ... |
1659 |
set_xen_guest_handle(poll.ports, &evtchn); |
2d9e1e2f5 xen: implement Xe... |
1660 1661 1662 1663 1664 |
if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) BUG(); } } |
d9a8814f2 xen: Provide a va... |
1665 1666 1667 1668 1669 1670 1671 |
EXPORT_SYMBOL(xen_poll_irq_timeout); /* Poll waiting for an irq to become pending. In the usual case, the * irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq) { xen_poll_irq_timeout(irq, 0 /* no timeout */); } |
2d9e1e2f5 xen: implement Xe... |
1672 |
|
c7c2c3a28 xen/irq: Add supp... |
1673 1674 1675 1676 |
/* Check whether the IRQ line is shared with other guests. */ int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); |
94032c506 xen/events: Check... |
1677 1678 1679 1680 1681 1682 |
struct physdev_irq_status_query irq_status; if (WARN_ON(!info)) return -ENOENT; irq_status.irq = info->u.pirq.pirq; |
c7c2c3a28 xen/irq: Add supp... |
1683 1684 1685 1686 1687 1688 |
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; return !(irq_status.flags & XENIRQSTAT_shared); } EXPORT_SYMBOL_GPL(xen_test_irq_shared); |
0e91398f2 xen: implement sa... |
1689 1690 |
void xen_irq_resume(void) { |
6cb6537d3 xen: events: main... |
1691 1692 |
unsigned int cpu, evtchn; struct irq_info *info; |
0e91398f2 xen: implement sa... |
1693 1694 1695 1696 1697 1698 1699 1700 |
init_evtchn_cpu_bindings(); /* New event-channel space is not 'live' yet. */ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) mask_evtchn(evtchn); /* No IRQ <-> event-channel mappings. */ |
6cb6537d3 xen: events: main... |
1701 1702 |
list_for_each_entry(info, &xen_irq_list_head, list) info->evtchn = 0; /* zap event-channel binding */ |
0e91398f2 xen: implement sa... |
1703 1704 1705 1706 1707 1708 1709 1710 |
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) evtchn_to_irq[evtchn] = -1; for_each_possible_cpu(cpu) { restore_cpu_virqs(cpu); restore_cpu_ipis(cpu); } |
6903591f3 xen: events: do n... |
1711 |
|
0a85226ff xen: events: rena... |
1712 |
restore_pirqs(); |
0e91398f2 xen: implement sa... |
1713 |
} |
e46cdb66c xen: event channels |
1714 |
static struct irq_chip xen_dynamic_chip __read_mostly = { |
c9e265e03 xen: Switch to ne... |
1715 |
.name = "xen-dyn", |
54a353a0f xen: set irq_chip... |
1716 |
|
c9e265e03 xen: Switch to ne... |
1717 1718 1719 |
.irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, |
54a353a0f xen: set irq_chip... |
1720 |
|
7e186bdd0 xen: do not clear... |
1721 1722 |
.irq_ack = ack_dynirq, .irq_mask_ack = mask_ack_dynirq, |
c9e265e03 xen: Switch to ne... |
1723 1724 |
.irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, |
e46cdb66c xen: event channels |
1725 |
}; |
d46a78b05 xen: implement pi... |
1726 |
static struct irq_chip xen_pirq_chip __read_mostly = { |
c9e265e03 xen: Switch to ne... |
1727 |
.name = "xen-pirq", |
d46a78b05 xen: implement pi... |
1728 |
|
c9e265e03 xen: Switch to ne... |
1729 1730 |
.irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, |
c9e265e03 xen: Switch to ne... |
1731 |
.irq_enable = enable_pirq, |
c9e265e03 xen: Switch to ne... |
1732 |
.irq_disable = disable_pirq, |
d46a78b05 xen: implement pi... |
1733 |
|
7e186bdd0 xen: do not clear... |
1734 1735 1736 1737 1738 1739 |
.irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = eoi_pirq, .irq_eoi = eoi_pirq, .irq_mask_ack = mask_ack_pirq, |
d46a78b05 xen: implement pi... |
1740 |
|
c9e265e03 xen: Switch to ne... |
1741 |
.irq_set_affinity = set_affinity_irq, |
d46a78b05 xen: implement pi... |
1742 |
|
c9e265e03 xen: Switch to ne... |
1743 |
.irq_retrigger = retrigger_dynirq, |
d46a78b05 xen: implement pi... |
1744 |
}; |
aaca49642 xen: use percpu i... |
1745 |
static struct irq_chip xen_percpu_chip __read_mostly = { |
c9e265e03 xen: Switch to ne... |
1746 |
.name = "xen-percpu", |
aaca49642 xen: use percpu i... |
1747 |
|
c9e265e03 xen: Switch to ne... |
1748 1749 1750 |
.irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, |
aaca49642 xen: use percpu i... |
1751 |
|
c9e265e03 xen: Switch to ne... |
1752 |
.irq_ack = ack_dynirq, |
aaca49642 xen: use percpu i... |
1753 |
}; |
38e20b07e x86/xen: event ch... |
1754 1755 1756 1757 1758 1759 1760 1761 1762 |
int xen_set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } EXPORT_SYMBOL_GPL(xen_set_callback_via); |
ca65f9fc0 Introduce CONFIG_... |
1763 |
#ifdef CONFIG_XEN_PVHVM |
38e20b07e x86/xen: event ch... |
1764 1765 1766 1767 1768 1769 1770 1771 |
/* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any * vcpu and we don't need PCI support or APIC interactions. */ void xen_callback_vector(void) { int rc; uint64_t callback_via; if (xen_have_vector_callback) { |
bc2b0331e X86: Handle Hyper... |
1772 |
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); |
38e20b07e x86/xen: event ch... |
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 |
rc = xen_set_callback_via(callback_via); if (rc) { printk(KERN_ERR "Request for Xen HVM callback vector" " failed. "); xen_have_vector_callback = 0; return; } printk(KERN_INFO "Xen HVM callback vector for event delivery is " "enabled "); /* in the restore case the vector has already been allocated */ |
bc2b0331e X86: Handle Hyper... |
1785 1786 1787 |
if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, xen_hvm_callback_vector); |
38e20b07e x86/xen: event ch... |
1788 1789 |
} } |
ca65f9fc0 Introduce CONFIG_... |
1790 1791 1792 |
#else void xen_callback_vector(void) {} #endif |
38e20b07e x86/xen: event ch... |
1793 |
|
2e3d88602 xen: mark xen_ini... |
1794 |
void __init xen_init_IRQ(void) |
e46cdb66c xen: event channels |
1795 |
{ |
0ec53ecf3 xen/arm: receive ... |
1796 |
int i; |
c7a3589e7 Xen: reduce memor... |
1797 |
|
b21ddbf50 xen: dynamically ... |
1798 1799 |
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), GFP_KERNEL); |
9d093e295 xen/events: BUG()... |
1800 |
BUG_ON(!evtchn_to_irq); |
b21ddbf50 xen: dynamically ... |
1801 1802 |
for (i = 0; i < NR_EVENT_CHANNELS; i++) evtchn_to_irq[i] = -1; |
e46cdb66c xen: event channels |
1803 1804 1805 1806 1807 1808 |
init_evtchn_cpu_bindings(); /* No event channels are 'live' right now. */ for (i = 0; i < NR_EVENT_CHANNELS; i++) mask_evtchn(i); |
9846ff10a xen: support pirq... |
1809 |
pirq_needs_eoi = pirq_needs_eoi_flag; |
0ec53ecf3 xen/arm: receive ... |
1810 |
#ifdef CONFIG_X86 |
38e20b07e x86/xen: event ch... |
1811 1812 1813 |
if (xen_hvm_domain()) { xen_callback_vector(); native_init_IRQ(); |
3942b740e xen: support GSI ... |
1814 1815 1816 |
/* pci_xen_hvm_init must be called after native_init_IRQ so that * __acpi_register_gsi can point at the right function */ pci_xen_hvm_init(); |
38e20b07e x86/xen: event ch... |
1817 |
} else { |
0ec53ecf3 xen/arm: receive ... |
1818 |
int rc; |
9846ff10a xen: support pirq... |
1819 |
struct physdev_pirq_eoi_gmfn eoi_gmfn; |
38e20b07e x86/xen: event ch... |
1820 |
irq_ctx_init(smp_processor_id()); |
38aa66fcb xen: remap GSIs a... |
1821 |
if (xen_initial_domain()) |
a0ee05670 xen/pci: Squash p... |
1822 |
pci_xen_initial_domain(); |
9846ff10a xen: support pirq... |
1823 1824 1825 1826 1827 1828 1829 1830 1831 |
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); if (rc != 0) { free_page((unsigned long) pirq_eoi_map); pirq_eoi_map = NULL; } else pirq_needs_eoi = pirq_check_eoi_map; |
38e20b07e x86/xen: event ch... |
1832 |
} |
0ec53ecf3 xen/arm: receive ... |
1833 |
#endif |
e46cdb66c xen: event channels |
1834 |
} |