Commit d9e9e8e2fe832180f5c8f659a63def2e8fcaea4a

Authored by Linus Torvalds

Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
 "A slighlty large fix for a subtle issue in the CPU hotplug code of
  certain ARM SoCs, where the not yet online cpu needs to setup the cpu
  local timer and needs to set the interrupt affinity to itself.
  Setting interrupt affinity to a not online cpu is prohibited and
  therefor the timer interrupt ends up on the wrong cpu, which leads to
  nasty complications.

  The SoC folks tried to hack around that in the SoC code in some more
  than nasty ways.  The proper solution is to have a way to enforce the
  affinity setting to a not online cpu.  The core patch to the genirq
  code provides that facility and the follow up patches make use of it
  in the GIC interrupt controller and the exynos timer driver.

  The change to the core code has no implications to existing users,
  except for the rename of the locked function and therefor the
  necessary fixup in mips/cavium.  Aside of that, no runtime impact is
  possible, as none of the existing interrupt chips implements anything
  which depends on the force argument of the irq_set_affinity()
  callback"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  clocksource: Exynos_mct: Register clock event after request_irq()
  clocksource: Exynos_mct: Use irq_force_affinity() in cpu bringup
  irqchip: Gic: Support forced affinity setting
  genirq: Allow forcing cpu affinity of interrupts

Showing 6 changed files Side-by-side Diff

arch/mips/cavium-octeon/octeon-irq.c
... ... @@ -635,7 +635,7 @@
635 635 cpumask_clear(&new_affinity);
636 636 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
637 637 }
638   - __irq_set_affinity_locked(data, &new_affinity);
  638 + irq_set_affinity_locked(data, &new_affinity, false);
639 639 }
640 640  
641 641 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
drivers/clocksource/exynos_mct.c
... ... @@ -416,8 +416,6 @@
416 416 evt->set_mode = exynos4_tick_set_mode;
417 417 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
418 418 evt->rating = 450;
419   - clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
420   - 0xf, 0x7fffffff);
421 419  
422 420 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
423 421  
424 422  
... ... @@ -430,9 +428,12 @@
430 428 evt->irq);
431 429 return -EIO;
432 430 }
  431 + irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
433 432 } else {
434 433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
435 434 }
  435 + clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
  436 + 0xf, 0x7fffffff);
436 437  
437 438 return 0;
438 439 }
... ... @@ -450,7 +451,6 @@
450 451 unsigned long action, void *hcpu)
451 452 {
452 453 struct mct_clock_event_device *mevt;
453   - unsigned int cpu;
454 454  
455 455 /*
456 456 * Grab cpu pointer in each case to avoid spurious
... ... @@ -460,12 +460,6 @@
460 460 case CPU_STARTING:
461 461 mevt = this_cpu_ptr(&percpu_mct_tick);
462 462 exynos4_local_timer_setup(&mevt->evt);
463   - break;
464   - case CPU_ONLINE:
465   - cpu = (unsigned long)hcpu;
466   - if (mct_int_type == MCT_INT_SPI)
467   - irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
468   - cpumask_of(cpu));
469 463 break;
470 464 case CPU_DYING:
471 465 mevt = this_cpu_ptr(&percpu_mct_tick);
drivers/irqchip/irq-gic.c
... ... @@ -246,9 +246,13 @@
246 246 bool force)
247 247 {
248 248 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
249   - unsigned int shift = (gic_irq(d) % 4) * 8;
250   - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  249 + unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
251 250 u32 val, mask, bit;
  251 +
  252 + if (!force)
  253 + cpu = cpumask_any_and(mask_val, cpu_online_mask);
  254 + else
  255 + cpu = cpumask_first(mask_val);
252 256  
253 257 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
254 258 return -EINVAL;
include/linux/interrupt.h
... ... @@ -203,7 +203,40 @@
203 203  
204 204 extern cpumask_var_t irq_default_affinity;
205 205  
206   -extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
  206 +/* Internal implementation. Use the helpers below */
  207 +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
  208 + bool force);
  209 +
  210 +/**
  211 + * irq_set_affinity - Set the irq affinity of a given irq
  212 + * @irq: Interrupt to set affinity
  213 + * @mask: cpumask
  214 + *
  215 + * Fails if cpumask does not contain an online CPU
  216 + */
  217 +static inline int
  218 +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  219 +{
  220 + return __irq_set_affinity(irq, cpumask, false);
  221 +}
  222 +
  223 +/**
  224 + * irq_force_affinity - Force the irq affinity of a given irq
  225 + * @irq: Interrupt to set affinity
  226 + * @mask: cpumask
  227 + *
  228 + * Same as irq_set_affinity, but without checking the mask against
  229 + * online cpus.
  230 + *
  231 + * Solely for low level cpu hotplug code, where we need to make per
  232 + * cpu interrupts affine before the cpu becomes online.
  233 + */
  234 +static inline int
  235 +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  236 +{
  237 + return __irq_set_affinity(irq, cpumask, true);
  238 +}
  239 +
207 240 extern int irq_can_set_affinity(unsigned int irq);
208 241 extern int irq_select_affinity(unsigned int irq);
209 242  
... ... @@ -394,7 +394,8 @@
394 394  
395 395 extern void irq_cpu_online(void);
396 396 extern void irq_cpu_offline(void);
397   -extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
  397 +extern int irq_set_affinity_locked(struct irq_data *data,
  398 + const struct cpumask *cpumask, bool force);
398 399  
399 400 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
400 401 void irq_move_irq(struct irq_data *data);
... ... @@ -180,7 +180,7 @@
180 180 struct irq_chip *chip = irq_data_get_irq_chip(data);
181 181 int ret;
182 182  
183   - ret = chip->irq_set_affinity(data, mask, false);
  183 + ret = chip->irq_set_affinity(data, mask, force);
184 184 switch (ret) {
185 185 case IRQ_SET_MASK_OK:
186 186 cpumask_copy(data->affinity, mask);
... ... @@ -192,7 +192,8 @@
192 192 return ret;
193 193 }
194 194  
195   -int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
  195 +int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
  196 + bool force)
196 197 {
197 198 struct irq_chip *chip = irq_data_get_irq_chip(data);
198 199 struct irq_desc *desc = irq_data_to_desc(data);
... ... @@ -202,7 +203,7 @@
202 203 return -EINVAL;
203 204  
204 205 if (irq_can_move_pcntxt(data)) {
205   - ret = irq_do_set_affinity(data, mask, false);
  206 + ret = irq_do_set_affinity(data, mask, force);
206 207 } else {
207 208 irqd_set_move_pending(data);
208 209 irq_copy_pending(desc, mask);
... ... @@ -217,13 +218,7 @@
217 218 return ret;
218 219 }
219 220  
220   -/**
221   - * irq_set_affinity - Set the irq affinity of a given irq
222   - * @irq: Interrupt to set affinity
223   - * @mask: cpumask
224   - *
225   - */
226   -int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
  221 +int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
227 222 {
228 223 struct irq_desc *desc = irq_to_desc(irq);
229 224 unsigned long flags;
... ... @@ -233,7 +228,7 @@
233 228 return -EINVAL;
234 229  
235 230 raw_spin_lock_irqsave(&desc->lock, flags);
236   - ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
  231 + ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
237 232 raw_spin_unlock_irqrestore(&desc->lock, flags);
238 233 return ret;
239 234 }