Blame view

arch/x86/xen/irq.c 3.32 KB
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
1
  #include <linux/hardirq.h>
66bcaf0bd   Thomas Gleixner   x86: Move irq_ini...
2
  #include <asm/x86_init.h>
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
  #include <xen/interface/xen.h>
  #include <xen/interface/sched.h>
  #include <xen/interface/vcpu.h>
  
  #include <asm/xen/hypercall.h>
  #include <asm/xen/hypervisor.h>
  
  #include "xen-ops.h"
  
  /*
   * Force a proper event-channel callback from Xen after clearing the
   * callback mask. We do this in a very simple manner, by making a call
   * down into Xen. The pending flag will be checked by Xen on return.
   */
  void xen_force_evtchn_callback(void)
  {
  	(void)HYPERVISOR_xen_version(0, NULL);
  }
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
21
22
23
24
  static unsigned long xen_save_fl(void)
  {
  	struct vcpu_info *vcpu;
  	unsigned long flags;
6dbde3530   Ingo Molnar   percpu: add optim...
25
  	vcpu = percpu_read(xen_vcpu);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
26
27
28
29
30
31
32
33
34
35
  
  	/* flag has opposite sense of mask */
  	flags = !vcpu->evtchn_upcall_mask;
  
  	/* convert to IF type flag
  	   -0 -> 0x00000000
  	   -1 -> 0xffffffff
  	*/
  	return (-flags) & X86_EFLAGS_IF;
  }
ecb93d1cc   Jeremy Fitzhardinge   x86/paravirt: add...
36
  PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
37
38
39
40
41
42
43
44
45
46
47
48
  
  static void xen_restore_fl(unsigned long flags)
  {
  	struct vcpu_info *vcpu;
  
  	/* convert from IF type flag */
  	flags = !(flags & X86_EFLAGS_IF);
  
  	/* There's a one instruction preempt window here.  We need to
  	   make sure we're don't switch CPUs between getting the vcpu
  	   pointer and updating the mask. */
  	preempt_disable();
6dbde3530   Ingo Molnar   percpu: add optim...
49
  	vcpu = percpu_read(xen_vcpu);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
50
51
52
53
54
55
56
57
58
59
60
61
62
  	vcpu->evtchn_upcall_mask = flags;
  	preempt_enable_no_resched();
  
  	/* Doesn't matter if we get preempted here, because any
  	   pending event will get dealt with anyway. */
  
  	if (flags == 0) {
  		preempt_check_resched();
  		barrier(); /* unmask then check (avoid races) */
  		if (unlikely(vcpu->evtchn_upcall_pending))
  			xen_force_evtchn_callback();
  	}
  }
ecb93d1cc   Jeremy Fitzhardinge   x86/paravirt: add...
63
  PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
64
65
66
67
68
69
70
  
  static void xen_irq_disable(void)
  {
  	/* There's a one instruction preempt window here.  We need to
  	   make sure we're don't switch CPUs between getting the vcpu
  	   pointer and updating the mask. */
  	preempt_disable();
6dbde3530   Ingo Molnar   percpu: add optim...
71
  	percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
72
73
  	preempt_enable_no_resched();
  }
ecb93d1cc   Jeremy Fitzhardinge   x86/paravirt: add...
74
  PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
75
76
77
78
79
80
81
82
83
  
  static void xen_irq_enable(void)
  {
  	struct vcpu_info *vcpu;
  
  	/* We don't need to worry about being preempted here, since
  	   either a) interrupts are disabled, so no preemption, or b)
  	   the caller is confused and is trying to re-enable interrupts
  	   on an indeterminate processor. */
6dbde3530   Ingo Molnar   percpu: add optim...
84
  	vcpu = percpu_read(xen_vcpu);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
85
86
87
88
89
90
91
92
93
  	vcpu->evtchn_upcall_mask = 0;
  
  	/* Doesn't matter if we get preempted here, because any
  	   pending event will get dealt with anyway. */
  
  	barrier(); /* unmask then check (avoid races) */
  	if (unlikely(vcpu->evtchn_upcall_pending))
  		xen_force_evtchn_callback();
  }
ecb93d1cc   Jeremy Fitzhardinge   x86/paravirt: add...
94
  PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
  
  static void xen_safe_halt(void)
  {
  	/* Blocking includes an implicit local_irq_enable(). */
  	if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
  		BUG();
  }
  
  static void xen_halt(void)
  {
  	if (irqs_disabled())
  		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
  	else
  		xen_safe_halt();
  }
251511a18   Daniel Kiper   arch/x86/xen/irq:...
110
  static const struct pv_irq_ops xen_irq_ops __initconst = {
ecb93d1cc   Jeremy Fitzhardinge   x86/paravirt: add...
111
112
113
114
  	.save_fl = PV_CALLEE_SAVE(xen_save_fl),
  	.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
  	.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
  	.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
115
116
117
118
119
120
  	.safe_halt = xen_safe_halt,
  	.halt = xen_halt,
  #ifdef CONFIG_X86_64
  	.adjust_exception_frame = xen_adjust_exception_frame,
  #endif
  };
7d81c3b9e   Randy Dunlap   xen: fix non-ANSI...
121
  void __init xen_init_irq_ops(void)
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
122
123
  {
  	pv_irq_ops = xen_irq_ops;
66bcaf0bd   Thomas Gleixner   x86: Move irq_ini...
124
  	x86_init.irqs.intr_init = xen_init_IRQ;
0d1edf46b   Jeremy Fitzhardinge   xen: compile irq ...
125
  }