Blame view

kernel/smp.c 13.4 KB
3d4422332   Jens Axboe   Add generic helpe...
1
2
3
4
  /*
   * Generic helpers for smp ipi calls
   *
   * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d4422332   Jens Axboe   Add generic helpe...
5
   */
3d4422332   Jens Axboe   Add generic helpe...
6
  #include <linux/rcupdate.h>
59190f421   Linus Torvalds   Merge branch 'gen...
7
  #include <linux/rculist.h>
641cd4cfc   Ingo Molnar   generic-ipi: elim...
8
  #include <linux/kernel.h>
0b13fda1e   Ingo Molnar   generic-ipi: clea...
9
10
11
  #include <linux/module.h>
  #include <linux/percpu.h>
  #include <linux/init.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
12
  #include <linux/gfp.h>
3d4422332   Jens Axboe   Add generic helpe...
13
  #include <linux/smp.h>
8969a5ede   Peter Zijlstra   generic-ipi: remo...
14
  #include <linux/cpu.h>
3d4422332   Jens Axboe   Add generic helpe...
15

8969a5ede   Peter Zijlstra   generic-ipi: remo...
16
17
  static struct {
  	struct list_head	queue;
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
18
  	raw_spinlock_t		lock;
0b13fda1e   Ingo Molnar   generic-ipi: clea...
19
20
21
  } call_function __cacheline_aligned_in_smp =
  	{
  		.queue		= LIST_HEAD_INIT(call_function.queue),
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
22
  		.lock		= __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
0b13fda1e   Ingo Molnar   generic-ipi: clea...
23
  	};
3d4422332   Jens Axboe   Add generic helpe...
24
25
  
  enum {
6e2756376   Peter Zijlstra   generic-ipi: remo...
26
  	CSD_FLAG_LOCK		= 0x01,
3d4422332   Jens Axboe   Add generic helpe...
27
28
29
  };
  
  struct call_function_data {
0b13fda1e   Ingo Molnar   generic-ipi: clea...
30
  	struct call_single_data	csd;
54fdade1c   Xiao Guangrong   generic-ipi: make...
31
  	atomic_t		refs;
0b13fda1e   Ingo Molnar   generic-ipi: clea...
32
  	cpumask_var_t		cpumask;
3d4422332   Jens Axboe   Add generic helpe...
33
  };
e03bcb686   Milton Miller   generic-ipi: Opti...
34
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
3d4422332   Jens Axboe   Add generic helpe...
35
  struct call_single_queue {
0b13fda1e   Ingo Molnar   generic-ipi: clea...
36
  	struct list_head	list;
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
37
  	raw_spinlock_t		lock;
3d4422332   Jens Axboe   Add generic helpe...
38
  };
e03bcb686   Milton Miller   generic-ipi: Opti...
39
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
40
41
42
43
44
45
46
47
48
49
  
  static int
  hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
  {
  	long cpu = (long)hcpu;
  	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  
  	switch (action) {
  	case CPU_UP_PREPARE:
  	case CPU_UP_PREPARE_FROZEN:
eaa958402   Yinghai Lu   cpumask: alloc ze...
50
  		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
8969a5ede   Peter Zijlstra   generic-ipi: remo...
51
  				cpu_to_node(cpu)))
80b5184cc   Akinobu Mita   kernel/: convert ...
52
  			return notifier_from_errno(-ENOMEM);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
53
  		break;
69dd647f9   Xiao Guangrong   generic-ipi: fix ...
54
  #ifdef CONFIG_HOTPLUG_CPU
8969a5ede   Peter Zijlstra   generic-ipi: remo...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
  	case CPU_UP_CANCELED:
  	case CPU_UP_CANCELED_FROZEN:
  
  	case CPU_DEAD:
  	case CPU_DEAD_FROZEN:
  		free_cpumask_var(cfd->cpumask);
  		break;
  #endif
  	};
  
  	return NOTIFY_OK;
  }
  
  static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
0b13fda1e   Ingo Molnar   generic-ipi: clea...
69
  	.notifier_call		= hotplug_cfd,
8969a5ede   Peter Zijlstra   generic-ipi: remo...
70
  };
7babe8db9   Eduard - Gabriel Munteanu   Full conversion t...
71
  static int __cpuinit init_call_single_data(void)
3d4422332   Jens Axboe   Add generic helpe...
72
  {
8969a5ede   Peter Zijlstra   generic-ipi: remo...
73
  	void *cpu = (void *)(long)smp_processor_id();
3d4422332   Jens Axboe   Add generic helpe...
74
75
76
77
  	int i;
  
  	for_each_possible_cpu(i) {
  		struct call_single_queue *q = &per_cpu(call_single_queue, i);
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
78
  		raw_spin_lock_init(&q->lock);
3d4422332   Jens Axboe   Add generic helpe...
79
80
  		INIT_LIST_HEAD(&q->list);
  	}
8969a5ede   Peter Zijlstra   generic-ipi: remo...
81
82
83
  
  	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
  	register_cpu_notifier(&hotplug_cfd_notifier);
7babe8db9   Eduard - Gabriel Munteanu   Full conversion t...
84
  	return 0;
3d4422332   Jens Axboe   Add generic helpe...
85
  }
7babe8db9   Eduard - Gabriel Munteanu   Full conversion t...
86
  early_initcall(init_call_single_data);
3d4422332   Jens Axboe   Add generic helpe...
87

8969a5ede   Peter Zijlstra   generic-ipi: remo...
88
  /*
8969a5ede   Peter Zijlstra   generic-ipi: remo...
89
90
   * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
   *
0b13fda1e   Ingo Molnar   generic-ipi: clea...
91
92
93
   * For non-synchronous ipi calls the csd can still be in use by the
   * previous function call. For multi-cpu calls its even more interesting
   * as we'll have to ensure no other cpu is observing our csd.
8969a5ede   Peter Zijlstra   generic-ipi: remo...
94
   */
6e2756376   Peter Zijlstra   generic-ipi: remo...
95
  static void csd_lock_wait(struct call_single_data *data)
8969a5ede   Peter Zijlstra   generic-ipi: remo...
96
97
98
  {
  	while (data->flags & CSD_FLAG_LOCK)
  		cpu_relax();
6e2756376   Peter Zijlstra   generic-ipi: remo...
99
100
101
102
103
  }
  
  static void csd_lock(struct call_single_data *data)
  {
  	csd_lock_wait(data);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
104
105
106
  	data->flags = CSD_FLAG_LOCK;
  
  	/*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
107
108
109
  	 * prevent CPU from reordering the above assignment
  	 * to ->flags with any subsequent assignments to other
  	 * fields of the specified call_single_data structure:
8969a5ede   Peter Zijlstra   generic-ipi: remo...
110
  	 */
8969a5ede   Peter Zijlstra   generic-ipi: remo...
111
112
113
114
115
116
  	smp_mb();
  }
  
  static void csd_unlock(struct call_single_data *data)
  {
  	WARN_ON(!(data->flags & CSD_FLAG_LOCK));
0b13fda1e   Ingo Molnar   generic-ipi: clea...
117

8969a5ede   Peter Zijlstra   generic-ipi: remo...
118
  	/*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
119
  	 * ensure we're all done before releasing data:
8969a5ede   Peter Zijlstra   generic-ipi: remo...
120
121
  	 */
  	smp_mb();
0b13fda1e   Ingo Molnar   generic-ipi: clea...
122

8969a5ede   Peter Zijlstra   generic-ipi: remo...
123
  	data->flags &= ~CSD_FLAG_LOCK;
3d4422332   Jens Axboe   Add generic helpe...
124
125
126
  }
  
  /*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
127
128
129
   * Insert a previously allocated call_single_data element
   * for execution on the given CPU. data must already have
   * ->func, ->info, and ->flags set.
3d4422332   Jens Axboe   Add generic helpe...
130
   */
6e2756376   Peter Zijlstra   generic-ipi: remo...
131
132
  static
  void generic_exec_single(int cpu, struct call_single_data *data, int wait)
3d4422332   Jens Axboe   Add generic helpe...
133
134
  {
  	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
3d4422332   Jens Axboe   Add generic helpe...
135
  	unsigned long flags;
6e2756376   Peter Zijlstra   generic-ipi: remo...
136
  	int ipi;
3d4422332   Jens Axboe   Add generic helpe...
137

9f5a5621e   Thomas Gleixner   smp: Convert smpl...
138
  	raw_spin_lock_irqsave(&dst->lock, flags);
3d4422332   Jens Axboe   Add generic helpe...
139
140
  	ipi = list_empty(&dst->list);
  	list_add_tail(&data->list, &dst->list);
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
141
  	raw_spin_unlock_irqrestore(&dst->lock, flags);
3d4422332   Jens Axboe   Add generic helpe...
142

561920a0d   Suresh Siddha   generic-ipi: fix ...
143
  	/*
15d0d3b33   Nick Piggin   generic IPI: simp...
144
145
146
147
148
149
150
  	 * The list addition should be visible before sending the IPI
  	 * handler locks the list to pull the entry off it because of
  	 * normal cache coherency rules implied by spinlocks.
  	 *
  	 * If IPIs can go out of order to the cache coherency protocol
  	 * in an architecture, sufficient synchronisation should be added
  	 * to arch code to make it appear to obey cache coherency WRT
0b13fda1e   Ingo Molnar   generic-ipi: clea...
151
152
  	 * locking and barrier primitives. Generic code isn't really
  	 * equipped to do the right thing...
561920a0d   Suresh Siddha   generic-ipi: fix ...
153
  	 */
3d4422332   Jens Axboe   Add generic helpe...
154
155
156
157
  	if (ipi)
  		arch_send_call_function_single_ipi(cpu);
  
  	if (wait)
6e2756376   Peter Zijlstra   generic-ipi: remo...
158
  		csd_lock_wait(data);
3d4422332   Jens Axboe   Add generic helpe...
159
160
161
162
163
164
165
166
167
  }
  
  /*
   * Invoked by arch to handle an IPI for call function. Must be called with
   * interrupts disabled.
   */
  void generic_smp_call_function_interrupt(void)
  {
  	struct call_function_data *data;
c0f68c2fa   Xiao Guangrong   generic-ipi: clea...
168
  	int cpu = smp_processor_id();
3d4422332   Jens Axboe   Add generic helpe...
169
170
  
  	/*
269c861ba   Suresh Siddha   generic-ipi: Allo...
171
172
173
174
175
  	 * Shouldn't receive this interrupt on a cpu that is not yet online.
  	 */
  	WARN_ON_ONCE(!cpu_online(cpu));
  
  	/*
15d0d3b33   Nick Piggin   generic IPI: simp...
176
177
178
179
180
181
182
183
  	 * Ensure entry is visible on call_function_queue after we have
  	 * entered the IPI. See comment in smp_call_function_many.
  	 * If we don't have this, then we may miss an entry on the list
  	 * and never get another IPI to process it.
  	 */
  	smp_mb();
  
  	/*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
184
185
  	 * It's ok to use list_for_each_rcu() here even though we may
  	 * delete 'pos', since list_del_rcu() doesn't clear ->next
3d4422332   Jens Axboe   Add generic helpe...
186
  	 */
8969a5ede   Peter Zijlstra   generic-ipi: remo...
187
  	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
3d4422332   Jens Axboe   Add generic helpe...
188
  		int refs;
54fdade1c   Xiao Guangrong   generic-ipi: make...
189
  		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
3d4422332   Jens Axboe   Add generic helpe...
190
191
192
  			continue;
  
  		data->csd.func(data->csd.info);
54fdade1c   Xiao Guangrong   generic-ipi: make...
193
194
  		refs = atomic_dec_return(&data->refs);
  		WARN_ON(refs < 0);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
195
  		if (!refs) {
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
196
  			raw_spin_lock(&call_function.lock);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
197
  			list_del_rcu(&data->csd.list);
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
198
  			raw_spin_unlock(&call_function.lock);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
199
  		}
3d4422332   Jens Axboe   Add generic helpe...
200
201
202
  
  		if (refs)
  			continue;
8969a5ede   Peter Zijlstra   generic-ipi: remo...
203
  		csd_unlock(&data->csd);
3d4422332   Jens Axboe   Add generic helpe...
204
  	}
3d4422332   Jens Axboe   Add generic helpe...
205

3d4422332   Jens Axboe   Add generic helpe...
206
207
208
  }
  
  /*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
209
210
   * Invoked by arch to handle an IPI for call function single. Must be
   * called from the arch with interrupts disabled.
3d4422332   Jens Axboe   Add generic helpe...
211
212
213
214
   */
  void generic_smp_call_function_single_interrupt(void)
  {
  	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
15d0d3b33   Nick Piggin   generic IPI: simp...
215
  	unsigned int data_flags;
0b13fda1e   Ingo Molnar   generic-ipi: clea...
216
  	LIST_HEAD(list);
3d4422332   Jens Axboe   Add generic helpe...
217

269c861ba   Suresh Siddha   generic-ipi: Allo...
218
219
220
221
  	/*
  	 * Shouldn't receive this interrupt on a cpu that is not yet online.
  	 */
  	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
222
  	raw_spin_lock(&q->lock);
15d0d3b33   Nick Piggin   generic IPI: simp...
223
  	list_replace_init(&q->list, &list);
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
224
  	raw_spin_unlock(&q->lock);
3d4422332   Jens Axboe   Add generic helpe...
225

15d0d3b33   Nick Piggin   generic IPI: simp...
226
227
  	while (!list_empty(&list)) {
  		struct call_single_data *data;
3d4422332   Jens Axboe   Add generic helpe...
228

0b13fda1e   Ingo Molnar   generic-ipi: clea...
229
  		data = list_entry(list.next, struct call_single_data, list);
15d0d3b33   Nick Piggin   generic IPI: simp...
230
  		list_del(&data->list);
3d4422332   Jens Axboe   Add generic helpe...
231

3d4422332   Jens Axboe   Add generic helpe...
232
  		/*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
233
234
235
  		 * 'data' can be invalid after this call if flags == 0
  		 * (when called through generic_exec_single()),
  		 * so save them away before making the call:
3d4422332   Jens Axboe   Add generic helpe...
236
  		 */
15d0d3b33   Nick Piggin   generic IPI: simp...
237
238
239
  		data_flags = data->flags;
  
  		data->func(data->info);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
240
  		/*
0b13fda1e   Ingo Molnar   generic-ipi: clea...
241
  		 * Unlocked CSDs are valid through generic_exec_single():
8969a5ede   Peter Zijlstra   generic-ipi: remo...
242
243
244
  		 */
  		if (data_flags & CSD_FLAG_LOCK)
  			csd_unlock(data);
3d4422332   Jens Axboe   Add generic helpe...
245
246
  	}
  }
e03bcb686   Milton Miller   generic-ipi: Opti...
247
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
d7240b988   Steven Rostedt   generic-ipi: use ...
248

3d4422332   Jens Axboe   Add generic helpe...
249
250
251
252
  /*
   * smp_call_function_single - Run a function on a specific CPU
   * @func: The function to run. This must be fast and non-blocking.
   * @info: An arbitrary pointer to pass to the function.
3d4422332   Jens Axboe   Add generic helpe...
253
254
   * @wait: If true, wait until function has completed on other CPUs.
   *
72f279b25   Sheng Yang   generic-ipi: Fix ...
255
   * Returns 0 on success, else a negative status code.
3d4422332   Jens Axboe   Add generic helpe...
256
257
   */
  int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
8691e5a8f   Jens Axboe   smp_call_function...
258
  			     int wait)
3d4422332   Jens Axboe   Add generic helpe...
259
  {
8969a5ede   Peter Zijlstra   generic-ipi: remo...
260
261
262
  	struct call_single_data d = {
  		.flags = 0,
  	};
3d4422332   Jens Axboe   Add generic helpe...
263
  	unsigned long flags;
0b13fda1e   Ingo Molnar   generic-ipi: clea...
264
  	int this_cpu;
f73be6ded   H. Peter Anvin   smp: have smp_cal...
265
  	int err = 0;
3d4422332   Jens Axboe   Add generic helpe...
266

0b13fda1e   Ingo Molnar   generic-ipi: clea...
267
268
269
270
271
  	/*
  	 * prevent preemption and reschedule on another processor,
  	 * as well as CPU removal
  	 */
  	this_cpu = get_cpu();
269c861ba   Suresh Siddha   generic-ipi: Allo...
272
273
274
275
276
277
278
279
  	/*
  	 * Can deadlock when called with interrupts disabled.
  	 * We allow cpu's that are not yet online though, as no one else can
  	 * send smp call function interrupt to this cpu and as such deadlocks
  	 * can't happen.
  	 */
  	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
  		     && !oops_in_progress);
3d4422332   Jens Axboe   Add generic helpe...
280

0b13fda1e   Ingo Molnar   generic-ipi: clea...
281
  	if (cpu == this_cpu) {
3d4422332   Jens Axboe   Add generic helpe...
282
283
284
  		local_irq_save(flags);
  		func(info);
  		local_irq_restore(flags);
0b13fda1e   Ingo Molnar   generic-ipi: clea...
285
286
287
  	} else {
  		if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
  			struct call_single_data *data = &d;
3d4422332   Jens Axboe   Add generic helpe...
288

0b13fda1e   Ingo Molnar   generic-ipi: clea...
289
290
  			if (!wait)
  				data = &__get_cpu_var(csd_data);
6e2756376   Peter Zijlstra   generic-ipi: remo...
291

0b13fda1e   Ingo Molnar   generic-ipi: clea...
292
  			csd_lock(data);
3d4422332   Jens Axboe   Add generic helpe...
293

0b13fda1e   Ingo Molnar   generic-ipi: clea...
294
295
296
297
298
299
  			data->func = func;
  			data->info = info;
  			generic_exec_single(cpu, data, wait);
  		} else {
  			err = -ENXIO;	/* CPU not online */
  		}
3d4422332   Jens Axboe   Add generic helpe...
300
301
302
  	}
  
  	put_cpu();
0b13fda1e   Ingo Molnar   generic-ipi: clea...
303

f73be6ded   H. Peter Anvin   smp: have smp_cal...
304
  	return err;
3d4422332   Jens Axboe   Add generic helpe...
305
306
  }
  EXPORT_SYMBOL(smp_call_function_single);
2ea6dec4a   Rusty Russell   generic-ipi: Add ...
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  /*
   * smp_call_function_any - Run a function on any of the given cpus
   * @mask: The mask of cpus it can run on.
   * @func: The function to run. This must be fast and non-blocking.
   * @info: An arbitrary pointer to pass to the function.
   * @wait: If true, wait until function has completed.
   *
   * Returns 0 on success, else a negative status code (if no cpus were online).
   * Note that @wait will be implicitly turned on in case of allocation failures,
   * since we fall back to on-stack allocation.
   *
   * Selection preference:
   *	1) current cpu if in @mask
   *	2) any cpu of current node if in @mask
   *	3) any other online cpu in @mask
   */
  int smp_call_function_any(const struct cpumask *mask,
  			  void (*func)(void *info), void *info, int wait)
  {
  	unsigned int cpu;
  	const struct cpumask *nodemask;
  	int ret;
  
  	/* Try for same CPU (cheapest) */
  	cpu = get_cpu();
  	if (cpumask_test_cpu(cpu, mask))
  		goto call;
  
  	/* Try for same node. */
af2422c42   David John   smp_call_function...
336
  	nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4a   Rusty Russell   generic-ipi: Add ...
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
  	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
  		if (cpu_online(cpu))
  			goto call;
  	}
  
  	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
  	cpu = cpumask_any_and(mask, cpu_online_mask);
  call:
  	ret = smp_call_function_single(cpu, func, info, wait);
  	put_cpu();
  	return ret;
  }
  EXPORT_SYMBOL_GPL(smp_call_function_any);
3d4422332   Jens Axboe   Add generic helpe...
351
352
353
354
355
  /**
   * __smp_call_function_single(): Run a function on another CPU
   * @cpu: The CPU to run on.
   * @data: Pre-allocated and setup data structure
   *
0b13fda1e   Ingo Molnar   generic-ipi: clea...
356
357
358
   * Like smp_call_function_single(), but allow caller to pass in a
   * pre-allocated data structure. Useful for embedding @data inside
   * other structures, for instance.
3d4422332   Jens Axboe   Add generic helpe...
359
   */
6e2756376   Peter Zijlstra   generic-ipi: remo...
360
361
  void __smp_call_function_single(int cpu, struct call_single_data *data,
  				int wait)
3d4422332   Jens Axboe   Add generic helpe...
362
  {
6e2756376   Peter Zijlstra   generic-ipi: remo...
363
  	csd_lock(data);
269c861ba   Suresh Siddha   generic-ipi: Allo...
364
365
366
367
368
369
370
371
  	/*
  	 * Can deadlock when called with interrupts disabled.
  	 * We allow cpu's that are not yet online though, as no one else can
  	 * send smp call function interrupt to this cpu and as such deadlocks
  	 * can't happen.
  	 */
  	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
  		     && !oops_in_progress);
3d4422332   Jens Axboe   Add generic helpe...
372

6e2756376   Peter Zijlstra   generic-ipi: remo...
373
  	generic_exec_single(cpu, data, wait);
3d4422332   Jens Axboe   Add generic helpe...
374
375
376
  }
  
  /**
54b11e6d5   Rusty Russell   cpumask: smp_call...
377
378
   * smp_call_function_many(): Run a function on a set of other CPUs.
   * @mask: The set of cpus to run on (only runs on online subset).
3d4422332   Jens Axboe   Add generic helpe...
379
380
   * @func: The function to run. This must be fast and non-blocking.
   * @info: An arbitrary pointer to pass to the function.
0b13fda1e   Ingo Molnar   generic-ipi: clea...
381
382
   * @wait: If true, wait (atomically) until function has completed
   *        on other CPUs.
3d4422332   Jens Axboe   Add generic helpe...
383
   *
72f279b25   Sheng Yang   generic-ipi: Fix ...
384
   * If @wait is true, then returns once @func has returned.
3d4422332   Jens Axboe   Add generic helpe...
385
386
387
388
389
   *
   * You must not call this function with disabled interrupts or from a
   * hardware interrupt handler or from a bottom half handler. Preemption
   * must be disabled when calling this function.
   */
54b11e6d5   Rusty Russell   cpumask: smp_call...
390
  void smp_call_function_many(const struct cpumask *mask,
0b13fda1e   Ingo Molnar   generic-ipi: clea...
391
  			    void (*func)(void *), void *info, bool wait)
3d4422332   Jens Axboe   Add generic helpe...
392
  {
54b11e6d5   Rusty Russell   cpumask: smp_call...
393
  	struct call_function_data *data;
3d4422332   Jens Axboe   Add generic helpe...
394
  	unsigned long flags;
0b13fda1e   Ingo Molnar   generic-ipi: clea...
395
  	int cpu, next_cpu, this_cpu = smp_processor_id();
3d4422332   Jens Axboe   Add generic helpe...
396

269c861ba   Suresh Siddha   generic-ipi: Allo...
397
398
399
400
401
402
403
404
  	/*
  	 * Can deadlock when called with interrupts disabled.
  	 * We allow cpu's that are not yet online though, as no one else can
  	 * send smp call function interrupt to this cpu and as such deadlocks
  	 * can't happen.
  	 */
  	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
  		     && !oops_in_progress);
3d4422332   Jens Axboe   Add generic helpe...
405

0b13fda1e   Ingo Molnar   generic-ipi: clea...
406
  	/* So, what's a CPU they want? Ignoring this one. */
54b11e6d5   Rusty Russell   cpumask: smp_call...
407
  	cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1e   Ingo Molnar   generic-ipi: clea...
408
  	if (cpu == this_cpu)
54b11e6d5   Rusty Russell   cpumask: smp_call...
409
  		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1e   Ingo Molnar   generic-ipi: clea...
410

54b11e6d5   Rusty Russell   cpumask: smp_call...
411
412
413
414
415
416
  	/* No online cpus?  We're done. */
  	if (cpu >= nr_cpu_ids)
  		return;
  
  	/* Do we have another CPU which isn't us? */
  	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1e   Ingo Molnar   generic-ipi: clea...
417
  	if (next_cpu == this_cpu)
54b11e6d5   Rusty Russell   cpumask: smp_call...
418
419
420
421
422
423
  		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
  
  	/* Fastpath: do that cpu by itself. */
  	if (next_cpu >= nr_cpu_ids) {
  		smp_call_function_single(cpu, func, info, wait);
  		return;
3d4422332   Jens Axboe   Add generic helpe...
424
  	}
8969a5ede   Peter Zijlstra   generic-ipi: remo...
425
426
  	data = &__get_cpu_var(cfd_data);
  	csd_lock(&data->csd);
3d4422332   Jens Axboe   Add generic helpe...
427

3d4422332   Jens Axboe   Add generic helpe...
428
429
  	data->csd.func = func;
  	data->csd.info = info;
8969a5ede   Peter Zijlstra   generic-ipi: remo...
430
  	cpumask_and(data->cpumask, mask, cpu_online_mask);
0b13fda1e   Ingo Molnar   generic-ipi: clea...
431
  	cpumask_clear_cpu(this_cpu, data->cpumask);
54fdade1c   Xiao Guangrong   generic-ipi: make...
432
  	atomic_set(&data->refs, cpumask_weight(data->cpumask));
3d4422332   Jens Axboe   Add generic helpe...
433

9f5a5621e   Thomas Gleixner   smp: Convert smpl...
434
  	raw_spin_lock_irqsave(&call_function.lock, flags);
8969a5ede   Peter Zijlstra   generic-ipi: remo...
435
436
  	/*
  	 * Place entry at the _HEAD_ of the list, so that any cpu still
0b13fda1e   Ingo Molnar   generic-ipi: clea...
437
438
  	 * observing the entry in generic_smp_call_function_interrupt()
  	 * will not miss any other list entries:
8969a5ede   Peter Zijlstra   generic-ipi: remo...
439
440
  	 */
  	list_add_rcu(&data->csd.list, &call_function.queue);
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
441
  	raw_spin_unlock_irqrestore(&call_function.lock, flags);
3d4422332   Jens Axboe   Add generic helpe...
442

561920a0d   Suresh Siddha   generic-ipi: fix ...
443
444
  	/*
  	 * Make the list addition visible before sending the ipi.
0b13fda1e   Ingo Molnar   generic-ipi: clea...
445
446
  	 * (IPIs must obey or appear to obey normal Linux cache
  	 * coherency rules -- see comment in generic_exec_single).
561920a0d   Suresh Siddha   generic-ipi: fix ...
447
448
  	 */
  	smp_mb();
3d4422332   Jens Axboe   Add generic helpe...
449
  	/* Send a message to all CPUs in the map */
8969a5ede   Peter Zijlstra   generic-ipi: remo...
450
  	arch_send_call_function_ipi_mask(data->cpumask);
3d4422332   Jens Axboe   Add generic helpe...
451

0b13fda1e   Ingo Molnar   generic-ipi: clea...
452
  	/* Optionally wait for the CPUs to complete */
54b11e6d5   Rusty Russell   cpumask: smp_call...
453
  	if (wait)
6e2756376   Peter Zijlstra   generic-ipi: remo...
454
  		csd_lock_wait(&data->csd);
3d4422332   Jens Axboe   Add generic helpe...
455
  }
54b11e6d5   Rusty Russell   cpumask: smp_call...
456
  EXPORT_SYMBOL(smp_call_function_many);
3d4422332   Jens Axboe   Add generic helpe...
457
458
459
460
461
  
  /**
   * smp_call_function(): Run a function on all other CPUs.
   * @func: The function to run. This must be fast and non-blocking.
   * @info: An arbitrary pointer to pass to the function.
0b13fda1e   Ingo Molnar   generic-ipi: clea...
462
463
   * @wait: If true, wait (atomically) until function has completed
   *        on other CPUs.
3d4422332   Jens Axboe   Add generic helpe...
464
   *
54b11e6d5   Rusty Russell   cpumask: smp_call...
465
   * Returns 0.
3d4422332   Jens Axboe   Add generic helpe...
466
467
   *
   * If @wait is true, then returns once @func has returned; otherwise
72f279b25   Sheng Yang   generic-ipi: Fix ...
468
   * it returns just before the target cpu calls @func.
3d4422332   Jens Axboe   Add generic helpe...
469
470
471
472
   *
   * You must not call this function with disabled interrupts or from a
   * hardware interrupt handler or from a bottom half handler.
   */
8691e5a8f   Jens Axboe   smp_call_function...
473
  int smp_call_function(void (*func)(void *), void *info, int wait)
3d4422332   Jens Axboe   Add generic helpe...
474
  {
3d4422332   Jens Axboe   Add generic helpe...
475
  	preempt_disable();
54b11e6d5   Rusty Russell   cpumask: smp_call...
476
  	smp_call_function_many(cpu_online_mask, func, info, wait);
3d4422332   Jens Axboe   Add generic helpe...
477
  	preempt_enable();
0b13fda1e   Ingo Molnar   generic-ipi: clea...
478

54b11e6d5   Rusty Russell   cpumask: smp_call...
479
  	return 0;
3d4422332   Jens Axboe   Add generic helpe...
480
481
482
483
484
  }
  EXPORT_SYMBOL(smp_call_function);
  
  void ipi_call_lock(void)
  {
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
485
  	raw_spin_lock(&call_function.lock);
3d4422332   Jens Axboe   Add generic helpe...
486
487
488
489
  }
  
  void ipi_call_unlock(void)
  {
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
490
  	raw_spin_unlock(&call_function.lock);
3d4422332   Jens Axboe   Add generic helpe...
491
492
493
494
  }
  
  void ipi_call_lock_irq(void)
  {
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
495
  	raw_spin_lock_irq(&call_function.lock);
3d4422332   Jens Axboe   Add generic helpe...
496
497
498
499
  }
  
  void ipi_call_unlock_irq(void)
  {
9f5a5621e   Thomas Gleixner   smp: Convert smpl...
500
  	raw_spin_unlock_irq(&call_function.lock);
3d4422332   Jens Axboe   Add generic helpe...
501
  }