Commit 9ec4fa271faf2db3b8e1419c998da1ca6b094eb6
Committed by
Ingo Molnar
1 parent
e25c2c873f
Exists in
master
and in
4 other branches
irq, cpumask: correct CPUMASKS_OFFSTACK typo and fix fallout
CPUMASKS_OFFSTACK is not defined anywhere (it is CPUMASK_OFFSTACK). It is a typo and init_allocate_desc_masks() is called before it set affinity to all cpus... Split init_alloc_desc_masks() into all_desc_masks() and init_desc_masks(). Also use CPUMASK_OFFSTACK in alloc_desc_masks(). [ Impact: fix smp_affinity copying/setup when moving irq_desc between CPUs ] Signed-off-by: Yinghai Lu <yinghai@kernel.org> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> LKML-Reference: <49F6546E.3040406@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 25 additions and 13 deletions Side-by-side Diff
include/linux/irq.h
... | ... | @@ -424,27 +424,25 @@ |
424 | 424 | |
425 | 425 | #ifdef CONFIG_SMP |
426 | 426 | /** |
427 | - * init_alloc_desc_masks - allocate cpumasks for irq_desc | |
427 | + * alloc_desc_masks - allocate cpumasks for irq_desc | |
428 | 428 | * @desc: pointer to irq_desc struct |
429 | 429 | * @cpu: cpu which will be handling the cpumasks |
430 | 430 | * @boot: true if need bootmem |
431 | 431 | * |
432 | 432 | * Allocates affinity and pending_mask cpumask if required. |
433 | 433 | * Returns true if successful (or not required). |
434 | - * Side effect: affinity has all bits set, pending_mask has all bits clear. | |
435 | 434 | */ |
436 | -static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | |
435 | +static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu, | |
437 | 436 | bool boot) |
438 | 437 | { |
438 | +#ifdef CONFIG_CPUMASK_OFFSTACK | |
439 | 439 | int node; |
440 | 440 | |
441 | 441 | if (boot) { |
442 | 442 | alloc_bootmem_cpumask_var(&desc->affinity); |
443 | - cpumask_setall(desc->affinity); | |
444 | 443 | |
445 | 444 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
446 | 445 | alloc_bootmem_cpumask_var(&desc->pending_mask); |
447 | - cpumask_clear(desc->pending_mask); | |
448 | 446 | #endif |
449 | 447 | return true; |
450 | 448 | } |
451 | 449 | |
452 | 450 | |
453 | 451 | |
... | ... | @@ -453,18 +451,25 @@ |
453 | 451 | |
454 | 452 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) |
455 | 453 | return false; |
456 | - cpumask_setall(desc->affinity); | |
457 | 454 | |
458 | 455 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
459 | 456 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { |
460 | 457 | free_cpumask_var(desc->affinity); |
461 | 458 | return false; |
462 | 459 | } |
463 | - cpumask_clear(desc->pending_mask); | |
464 | 460 | #endif |
461 | +#endif | |
465 | 462 | return true; |
466 | 463 | } |
467 | 464 | |
465 | +static inline void init_desc_masks(struct irq_desc *desc) | |
466 | +{ | |
467 | + cpumask_setall(desc->affinity); | |
468 | +#ifdef CONFIG_GENERIC_PENDING_IRQ | |
469 | + cpumask_clear(desc->pending_mask); | |
470 | +#endif | |
471 | +} | |
472 | + | |
468 | 473 | /** |
469 | 474 | * init_copy_desc_masks - copy cpumasks for irq_desc |
470 | 475 | * @old_desc: pointer to old irq_desc struct |
... | ... | @@ -478,7 +483,7 @@ |
478 | 483 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
479 | 484 | struct irq_desc *new_desc) |
480 | 485 | { |
481 | -#ifdef CONFIG_CPUMASKS_OFFSTACK | |
486 | +#ifdef CONFIG_CPUMASK_OFFSTACK | |
482 | 487 | cpumask_copy(new_desc->affinity, old_desc->affinity); |
483 | 488 | |
484 | 489 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
485 | 490 | |
... | ... | @@ -499,10 +504,14 @@ |
499 | 504 | |
500 | 505 | #else /* !CONFIG_SMP */ |
501 | 506 | |
502 | -static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | |
507 | +static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu, | |
503 | 508 | bool boot) |
504 | 509 | { |
505 | 510 | return true; |
511 | +} | |
512 | + | |
513 | +static inline void init_desc_masks(struct irq_desc *desc) | |
514 | +{ | |
506 | 515 | } |
507 | 516 | |
508 | 517 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
kernel/irq/handle.c
... | ... | @@ -115,10 +115,11 @@ |
115 | 115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
116 | 116 | BUG_ON(1); |
117 | 117 | } |
118 | - if (!init_alloc_desc_masks(desc, cpu, false)) { | |
118 | + if (!alloc_desc_masks(desc, cpu, false)) { | |
119 | 119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
120 | 120 | BUG_ON(1); |
121 | 121 | } |
122 | + init_desc_masks(desc); | |
122 | 123 | arch_init_chip_data(desc, cpu); |
123 | 124 | } |
124 | 125 | |
... | ... | @@ -169,7 +170,8 @@ |
169 | 170 | desc[i].irq = i; |
170 | 171 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
171 | 172 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
172 | - init_alloc_desc_masks(&desc[i], 0, true); | |
173 | + alloc_desc_masks(&desc[i], 0, true); | |
174 | + init_desc_masks(&desc[i]); | |
173 | 175 | irq_desc_ptrs[i] = desc + i; |
174 | 176 | } |
175 | 177 | |
... | ... | @@ -256,7 +258,8 @@ |
256 | 258 | |
257 | 259 | for (i = 0; i < count; i++) { |
258 | 260 | desc[i].irq = i; |
259 | - init_alloc_desc_masks(&desc[i], 0, true); | |
261 | + alloc_desc_masks(&desc[i], 0, true); | |
262 | + init_desc_masks(&desc[i]); | |
260 | 263 | desc[i].kstat_irqs = kstat_irqs_all[i]; |
261 | 264 | } |
262 | 265 | return arch_early_irq_init(); |
kernel/irq/numa_migrate.c
... | ... | @@ -37,7 +37,7 @@ |
37 | 37 | struct irq_desc *desc, int cpu) |
38 | 38 | { |
39 | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
40 | - if (!init_alloc_desc_masks(desc, cpu, false)) { | |
40 | + if (!alloc_desc_masks(desc, cpu, false)) { | |
41 | 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " |
42 | 42 | "for migration.\n", irq); |
43 | 43 | return false; |