Commit 31a6b11fed6ceec07ec4bdfefae56b8252d450cf

Authored by Xiantao Zhang
Committed by Tony Luck
1 parent 96651896b8

[IA64] Implement smp_call_function_mask for ia64

This interface provides more flexible functionality for smp
infrastructure ... e.g. KVM frequently needs to operate on
a subset of cpus.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

Showing 2 changed files with 85 additions and 0 deletions Side-by-side Diff

arch/ia64/kernel/smp.c
... ... @@ -213,6 +213,19 @@
213 213 * Called with preemption disabled.
214 214 */
215 215 static inline void
  216 +send_IPI_mask(cpumask_t mask, int op)
  217 +{
  218 + unsigned int cpu;
  219 +
  220 + for_each_cpu_mask(cpu, mask) {
  221 + send_IPI_single(cpu, op);
  222 + }
  223 +}
  224 +
  225 +/*
  226 + * Called with preemption disabled.
  227 + */
  228 +static inline void
216 229 send_IPI_all (int op)
217 230 {
218 231 int i;
... ... @@ -400,6 +413,75 @@
400 413 return 0;
401 414 }
402 415 EXPORT_SYMBOL(smp_call_function_single);
  416 +
  417 +/**
  418 + * smp_call_function_mask(): Run a function on a set of other CPUs.
  419 + * <mask> The set of cpus to run on. Must not include the current cpu.
  420 + * <func> The function to run. This must be fast and non-blocking.
  421 + * <info> An arbitrary pointer to pass to the function.
  422 + * <wait> If true, wait (atomically) until function
  423 + * has completed on other CPUs.
  424 + *
  425 + * Returns 0 on success, else a negative status code.
  426 + *
  427 + * If @wait is true, then returns once @func has returned; otherwise
  428 + * it returns just before the target cpu calls @func.
  429 + *
  430 + * You must not call this function with disabled interrupts or from a
  431 + * hardware interrupt handler or from a bottom half handler.
  432 + */
  433 +int smp_call_function_mask(cpumask_t mask,
  434 + void (*func)(void *), void *info,
  435 + int wait)
  436 +{
  437 + struct call_data_struct data;
  438 + cpumask_t allbutself;
  439 + int cpus;
  440 +
  441 + spin_lock(&call_lock);
  442 + allbutself = cpu_online_map;
  443 + cpu_clear(smp_processor_id(), allbutself);
  444 +
  445 + cpus_and(mask, mask, allbutself);
  446 + cpus = cpus_weight(mask);
  447 + if (!cpus) {
  448 + spin_unlock(&call_lock);
  449 + return 0;
  450 + }
  451 +
  452 + /* Can deadlock when called with interrupts disabled */
  453 + WARN_ON(irqs_disabled());
  454 +
  455 + data.func = func;
  456 + data.info = info;
  457 + atomic_set(&data.started, 0);
  458 + data.wait = wait;
  459 + if (wait)
  460 + atomic_set(&data.finished, 0);
  461 +
  462 + call_data = &data;
  463 + mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
  464 +
  465 + /* Send a message to other CPUs */
  466 + if (cpus_equal(mask, allbutself))
  467 + send_IPI_allbutself(IPI_CALL_FUNC);
  468 + else
  469 + send_IPI_mask(mask, IPI_CALL_FUNC);
  470 +
  471 + /* Wait for response */
  472 + while (atomic_read(&data.started) != cpus)
  473 + cpu_relax();
  474 +
  475 + if (wait)
  476 + while (atomic_read(&data.finished) != cpus)
  477 + cpu_relax();
  478 + call_data = NULL;
  479 +
  480 + spin_unlock(&call_lock);
  481 + return 0;
  482 +
  483 +}
  484 +EXPORT_SYMBOL(smp_call_function_mask);
403 485  
404 486 /*
405 487 * this function sends a 'generic call function' IPI to all other CPUs
include/asm-ia64/smp.h
... ... @@ -38,6 +38,9 @@
38 38 return lid.f.id << 8 | lid.f.eid;
39 39 }
40 40  
  41 +extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
  42 + void *info, int wait);
  43 +
41 44 #define hard_smp_processor_id() ia64_get_lid()
42 45  
43 46 #ifdef CONFIG_SMP