Blame view

kernel/up.c 2.02 KB
53ce3d956   Andrew Morton   smp_call_function...
1
2
3
  /*
   * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
   */
6e9628141   Ingo Molnar   smp_call_function...
4
  #include <linux/interrupt.h>
53ce3d956   Andrew Morton   smp_call_function...
5
  #include <linux/kernel.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
6
  #include <linux/export.h>
53ce3d956   Andrew Morton   smp_call_function...
7
  #include <linux/smp.h>
47ae4b05d   Juergen Gross   virt, sched: Add ...
8
  #include <linux/hypervisor.h>
53ce3d956   Andrew Morton   smp_call_function...
9
10
11
12
  
  int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
  				int wait)
  {
081192b25   David Daney   up.c: use local_i...
13
  	unsigned long flags;
93423b866   Ingo Molnar   smp_call_function...
14
  	WARN_ON(cpu != 0);
081192b25   David Daney   up.c: use local_i...
15
16
17
  	local_irq_save(flags);
  	func(info);
  	local_irq_restore(flags);
93423b866   Ingo Molnar   smp_call_function...
18

53ce3d956   Andrew Morton   smp_call_function...
19
20
21
  	return 0;
  }
  EXPORT_SYMBOL(smp_call_function_single);
fa688207c   David Daney   smp: quit uncondi...
22

c46fff2a3   Frederic Weisbecker   smp: Rename __smp...
23
  int smp_call_function_single_async(int cpu, struct call_single_data *csd)
40c01e8bd   Christoph Hellwig   kernel: provide a...
24
25
26
27
28
29
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	csd->func(csd->info);
  	local_irq_restore(flags);
08eed44c7   Jan Kara   smp: Teach __smp_...
30
  	return 0;
40c01e8bd   Christoph Hellwig   kernel: provide a...
31
  }
c46fff2a3   Frederic Weisbecker   smp: Rename __smp...
32
  EXPORT_SYMBOL(smp_call_function_single_async);
40c01e8bd   Christoph Hellwig   kernel: provide a...
33

bff2dc42b   David Daney   smp.h: move !SMP ...
34
35
36
37
38
39
40
41
42
43
  int on_each_cpu(smp_call_func_t func, void *info, int wait)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	func(info);
  	local_irq_restore(flags);
  	return 0;
  }
  EXPORT_SYMBOL(on_each_cpu);
fa688207c   David Daney   smp: quit uncondi...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
  /*
   * Note we still need to test the mask even for UP
   * because we actually can get an empty mask from
   * code that on SMP might call us without the local
   * CPU in the mask.
   */
  void on_each_cpu_mask(const struct cpumask *mask,
  		      smp_call_func_t func, void *info, bool wait)
  {
  	unsigned long flags;
  
  	if (cpumask_test_cpu(0, mask)) {
  		local_irq_save(flags);
  		func(info);
  		local_irq_restore(flags);
  	}
  }
  EXPORT_SYMBOL(on_each_cpu_mask);
  
  /*
   * Preemption is disabled here to make sure the cond_func is called under the
   * same condtions in UP and SMP.
   */
  void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
  		      smp_call_func_t func, void *info, bool wait,
  		      gfp_t gfp_flags)
  {
  	unsigned long flags;
  
  	preempt_disable();
  	if (cond_func(0, info)) {
  		local_irq_save(flags);
  		func(info);
  		local_irq_restore(flags);
  	}
  	preempt_enable();
  }
  EXPORT_SYMBOL(on_each_cpu_cond);
df8ce9d78   Juergen Gross   smp: Add function...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  
  int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
  {
  	int ret;
  
  	if (cpu != 0)
  		return -ENXIO;
  
  	if (phys)
  		hypervisor_pin_vcpu(0);
  	ret = func(par);
  	if (phys)
  		hypervisor_pin_vcpu(-1);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(smp_call_on_cpu);