Blame view
kernel/up.c
2.33 KB
457c89965 treewide: Add SPD... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
53ce3d956 smp_call_function... |
2 3 4 |
/* * Uniprocessor-only support functions. The counterpart to kernel/smp.c */ |
6e9628141 smp_call_function... |
5 |
#include <linux/interrupt.h> |
53ce3d956 smp_call_function... |
6 |
#include <linux/kernel.h> |
9984de1a5 kernel: Map most ... |
7 |
#include <linux/export.h> |
53ce3d956 smp_call_function... |
8 |
#include <linux/smp.h> |
47ae4b05d virt, sched: Add ... |
9 |
#include <linux/hypervisor.h> |
53ce3d956 smp_call_function... |
10 11 12 13 |
int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int wait) { |
081192b25 up.c: use local_i... |
14 |
unsigned long flags; |
93423b866 smp_call_function... |
15 |
WARN_ON(cpu != 0); |
081192b25 up.c: use local_i... |
16 17 18 |
local_irq_save(flags); func(info); local_irq_restore(flags); |
93423b866 smp_call_function... |
19 |
|
53ce3d956 smp_call_function... |
20 21 22 |
return 0; } EXPORT_SYMBOL(smp_call_function_single); |
fa688207c smp: quit uncondi... |
23 |
|
966a96711 smp: Avoid using ... |
24 |
int smp_call_function_single_async(int cpu, call_single_data_t *csd) |
40c01e8bd kernel: provide a... |
25 26 27 28 29 30 |
{ unsigned long flags; local_irq_save(flags); csd->func(csd->info); local_irq_restore(flags); |
08eed44c7 smp: Teach __smp_... |
31 |
return 0; |
40c01e8bd kernel: provide a... |
32 |
} |
c46fff2a3 smp: Rename __smp... |
33 |
EXPORT_SYMBOL(smp_call_function_single_async); |
40c01e8bd kernel: provide a... |
34 |
|
caa759323 smp: Remove smp_c... |
35 |
void on_each_cpu(smp_call_func_t func, void *info, int wait) |
bff2dc42b smp.h: move !SMP ... |
36 37 38 39 40 41 |
{ unsigned long flags; local_irq_save(flags); func(info); local_irq_restore(flags); |
bff2dc42b smp.h: move !SMP ... |
42 43 |
} EXPORT_SYMBOL(on_each_cpu); |
fa688207c smp: quit uncondi... |
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
/* * Note we still need to test the mask even for UP * because we actually can get an empty mask from * code that on SMP might call us without the local * CPU in the mask. */ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { unsigned long flags; if (cpumask_test_cpu(0, mask)) { local_irq_save(flags); func(info); local_irq_restore(flags); } } EXPORT_SYMBOL(on_each_cpu_mask); /* * Preemption is disabled here to make sure the cond_func is called under the * same condtions in UP and SMP. */ |
7d49b28a8 smp,cpumask: intr... |
67 68 69 |
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags, const struct cpumask *mask) |
fa688207c smp: quit uncondi... |
70 71 72 73 74 75 76 77 78 79 80 |
{ unsigned long flags; preempt_disable(); if (cond_func(0, info)) { local_irq_save(flags); func(info); local_irq_restore(flags); } preempt_enable(); } |
7d49b28a8 smp,cpumask: intr... |
81 82 83 84 85 86 87 88 |
EXPORT_SYMBOL(on_each_cpu_cond_mask); void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL); } |
fa688207c smp: quit uncondi... |
89 |
EXPORT_SYMBOL(on_each_cpu_cond); |
df8ce9d78 smp: Add function... |
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { int ret; if (cpu != 0) return -ENXIO; if (phys) hypervisor_pin_vcpu(0); ret = func(par); if (phys) hypervisor_pin_vcpu(-1); return ret; } EXPORT_SYMBOL_GPL(smp_call_on_cpu); |