Commit a60d4b9874dc62cf0fd0d42b247baaaef75d30f8

Authored by Linus Torvalds

Merge tag 'stable/for-linus-3.12-rc0-tag-two' of git://git.kernel.org/pub/scm/li…

…nux/kernel/git/xen/tip

Pull Xen bug-fixes from Konrad Rzeszutek Wilk:
 "This pull I usually do after rc1 is out but because we have a nice
  amount of fixes, some bootup related fixes for ARM, and it is early in
  the cycle we figured to do it now to help with tracking of potential
  regressions.

  The simple ones are the ARM ones - one of the patches fell through the
  cracks, other fixes a bootup issue (unconditionally using Xen
  functions).  Then a fix for a regression causing preempt count being
  off (patch causing this went in v3.12).

  Lastly are the fixes to make Xen PVHVM guests use PV ticketlocks (Xen
  PV already does).

  The enablement of that was supposed to be part of the x86 spinlock
  merge in commit 816434ec4a67 ("The biggest change here are
  paravirtualized ticket spinlocks (PV spinlocks), which bring a nice
  speedup on various benchmarks...") but unfortunatly it would cause
  hang when booting Xen PVHVM guests.  Yours truly got all of the bugs
  fixed last week and they (six of them) are included in this pull.

  Bug-fixes:
   - Boot on ARM without using Xen unconditionally
   - On Xen ARM don't run cpuidle/cpufreq
   - Fix regression in balloon driver, preempt count warnings
   - Fixes to make PVHVM able to use pv ticketlock.
   - Revert Xen PVHVM disabling pv ticketlock (aka, re-enable pv ticketlocks)"

* tag 'stable/for-linus-3.12-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/spinlock: Don't use __initdate for xen_pv_spin
  Revert "xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM"
  xen/spinlock: Don't setup xen spinlock IPI kicker if disabled.
  xen/smp: Update pv_lock_ops functions before alternative code starts under PVHVM
  xen/spinlock: We don't need the old structure anymore
  xen/spinlock: Fix locking path engaging too soon under PVHVM.
  xen/arm: disable cpuidle and cpufreq when linux is running as dom0
  xen/p2m: Don't call get_balloon_scratch_page() twice, keep interrupts disabled for multicalls
  ARM: xen: only set pm function ptrs for Xen guests

Showing 5 changed files Side-by-side Diff

arch/arm/xen/enlighten.c
... ... @@ -21,6 +21,8 @@
21 21 #include <linux/of.h>
22 22 #include <linux/of_irq.h>
23 23 #include <linux/of_address.h>
  24 +#include <linux/cpuidle.h>
  25 +#include <linux/cpufreq.h>
24 26  
25 27 #include <linux/mm.h>
26 28  
27 29  
28 30  
... ... @@ -267,18 +269,28 @@
267 269 if (!xen_initial_domain())
268 270 xenbus_probe(NULL);
269 271  
  272 + /*
  273 + * Making sure board specific code will not set up ops for
  274 + * cpu idle and cpu freq.
  275 + */
  276 + disable_cpuidle();
  277 + disable_cpufreq();
  278 +
270 279 return 0;
271 280 }
272 281 core_initcall(xen_guest_init);
273 282  
274 283 static int __init xen_pm_init(void)
275 284 {
  285 + if (!xen_domain())
  286 + return -ENODEV;
  287 +
276 288 pm_power_off = xen_power_off;
277 289 arm_pm_restart = xen_restart;
278 290  
279 291 return 0;
280 292 }
281   -subsys_initcall(xen_pm_init);
  293 +late_initcall(xen_pm_init);
282 294  
283 295 static irqreturn_t xen_arm_callback(int irq, void *arg)
284 296 {
arch/x86/xen/enlighten.c
... ... @@ -1692,7 +1692,6 @@
1692 1692 case CPU_UP_PREPARE:
1693 1693 xen_vcpu_setup(cpu);
1694 1694 if (xen_have_vector_callback) {
1695   - xen_init_lock_cpu(cpu);
1696 1695 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1697 1696 xen_setup_timer(cpu);
1698 1697 }
... ... @@ -990,10 +990,13 @@
990 990 printk(KERN_WARNING "m2p_remove_override: "
991 991 "pfn %lx mfn %lx, failed to modify kernel mappings",
992 992 pfn, mfn);
  993 + put_balloon_scratch_page();
993 994 return -1;
994 995 }
995 996  
996   - mcs = xen_mc_entry(
  997 + xen_mc_batch();
  998 +
  999 + mcs = __xen_mc_entry(
997 1000 sizeof(struct gnttab_unmap_and_replace));
998 1001 unmap_op = mcs.args;
999 1002 unmap_op->host_addr = kmap_op->host_addr;
1000 1003  
1001 1004  
... ... @@ -1003,12 +1006,11 @@
1003 1006 MULTI_grant_table_op(mcs.mc,
1004 1007 GNTTABOP_unmap_and_replace, unmap_op, 1);
1005 1008  
1006   - xen_mc_issue(PARAVIRT_LAZY_MMU);
1007   -
1008 1009 mcs = __xen_mc_entry(0);
1009 1010 MULTI_update_va_mapping(mcs.mc, scratch_page_address,
1010   - pfn_pte(page_to_pfn(get_balloon_scratch_page()),
  1011 + pfn_pte(page_to_pfn(scratch_page),
1011 1012 PAGE_KERNEL_RO), 0);
  1013 +
1012 1014 xen_mc_issue(PARAVIRT_LAZY_MMU);
1013 1015  
1014 1016 kmap_op->host_addr = 0;
... ... @@ -273,12 +273,20 @@
273 273 BUG_ON(smp_processor_id() != 0);
274 274 native_smp_prepare_boot_cpu();
275 275  
276   - /* We've switched to the "real" per-cpu gdt, so make sure the
277   - old memory can be recycled */
278   - make_lowmem_page_readwrite(xen_initial_gdt);
  276 + if (xen_pv_domain()) {
  277 + /* We've switched to the "real" per-cpu gdt, so make sure the
  278 + old memory can be recycled */
  279 + make_lowmem_page_readwrite(xen_initial_gdt);
279 280  
280   - xen_filter_cpu_maps();
281   - xen_setup_vcpu_info_placement();
  281 + xen_filter_cpu_maps();
  282 + xen_setup_vcpu_info_placement();
  283 + }
  284 + /*
  285 + * The alternative logic (which patches the unlock/lock) runs before
  286 + * the smp bootup up code is activated. Hence we need to set this up
  287 + * the core kernel is being patched. Otherwise we will have only
  288 + * modules patched but not core code.
  289 + */
282 290 xen_init_spinlocks();
283 291 }
284 292  
... ... @@ -709,6 +717,15 @@
709 717 WARN_ON(rc);
710 718 if (!rc)
711 719 rc = native_cpu_up(cpu, tidle);
  720 +
  721 + /*
  722 + * We must initialize the slowpath CPU kicker _after_ the native
  723 + * path has executed. If we initialized it before none of the
  724 + * unlocker IPI kicks would reach the booting CPU as the booting
  725 + * CPU had not set itself 'online' in cpu_online_mask. That mask
  726 + * is checked when IPIs are sent (on HVM at least).
  727 + */
  728 + xen_init_lock_cpu(cpu);
712 729 return rc;
713 730 }
714 731  
... ... @@ -728,5 +745,6 @@
728 745 smp_ops.cpu_die = xen_hvm_cpu_die;
729 746 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
730 747 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
  748 + smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
731 749 }
arch/x86/xen/spinlock.c
... ... @@ -81,7 +81,6 @@
81 81 spinlock_stats.time_blocked += delta;
82 82 }
83 83 #else /* !CONFIG_XEN_DEBUG_FS */
84   -#define TIMEOUT (1 << 10)
85 84 static inline void add_stats(enum xen_contention_stat var, u32 val)
86 85 {
87 86 }
... ... @@ -96,23 +95,6 @@
96 95 }
97 96 #endif /* CONFIG_XEN_DEBUG_FS */
98 97  
99   -/*
100   - * Size struct xen_spinlock so it's the same as arch_spinlock_t.
101   - */
102   -#if NR_CPUS < 256
103   -typedef u8 xen_spinners_t;
104   -# define inc_spinners(xl) \
105   - asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
106   -# define dec_spinners(xl) \
107   - asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
108   -#else
109   -typedef u16 xen_spinners_t;
110   -# define inc_spinners(xl) \
111   - asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
112   -# define dec_spinners(xl) \
113   - asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
114   -#endif
115   -
116 98 struct xen_lock_waiting {
117 99 struct arch_spinlock *lock;
118 100 __ticket_t want;
... ... @@ -123,6 +105,7 @@
123 105 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
124 106 static cpumask_t waiting_cpus;
125 107  
  108 +static bool xen_pvspin = true;
126 109 static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
127 110 {
128 111 int irq = __this_cpu_read(lock_kicker_irq);
129 112  
... ... @@ -241,16 +224,12 @@
241 224 int irq;
242 225 char *name;
243 226  
  227 + if (!xen_pvspin)
  228 + return;
  229 +
244 230 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
245 231 cpu, per_cpu(lock_kicker_irq, cpu));
246 232  
247   - /*
248   - * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
249   - * (xen: disable PV spinlocks on HVM)
250   - */
251   - if (xen_hvm_domain())
252   - return;
253   -
254 233 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
255 234 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
256 235 cpu,
... ... @@ -270,11 +249,7 @@
270 249  
271 250 void xen_uninit_lock_cpu(int cpu)
272 251 {
273   - /*
274   - * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
275   - * (xen: disable PV spinlocks on HVM)
276   - */
277   - if (xen_hvm_domain())
  252 + if (!xen_pvspin)
278 253 return;
279 254  
280 255 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
281 256  
... ... @@ -283,16 +258,9 @@
283 258 per_cpu(irq_name, cpu) = NULL;
284 259 }
285 260  
286   -static bool xen_pvspin __initdata = true;
287 261  
288 262 void __init xen_init_spinlocks(void)
289 263 {
290   - /*
291   - * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
292   - * (xen: disable PV spinlocks on HVM)
293   - */
294   - if (xen_hvm_domain())
295   - return;
296 264  
297 265 if (!xen_pvspin) {
298 266 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
... ... @@ -322,6 +290,9 @@
322 290  
323 291 if (d_xen == NULL)
324 292 return -ENOMEM;
  293 +
  294 + if (!xen_pvspin)
  295 + return 0;
325 296  
326 297 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
327 298