Commit 9f5a5621e78cf48d86682a71ceb3fcdbde38b222
1 parent
d209d74d52
Exists in
master
and in
4 other branches
smp: Convert smplocks to raw_spinlocks
Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Showing 1 changed file with 16 additions and 16 deletions Side-by-side Diff
kernel/smp.c
... | ... | @@ -16,11 +16,11 @@ |
16 | 16 | |
17 | 17 | static struct { |
18 | 18 | struct list_head queue; |
19 | - spinlock_t lock; | |
19 | + raw_spinlock_t lock; | |
20 | 20 | } call_function __cacheline_aligned_in_smp = |
21 | 21 | { |
22 | 22 | .queue = LIST_HEAD_INIT(call_function.queue), |
23 | - .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), | |
23 | + .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock), | |
24 | 24 | }; |
25 | 25 | |
26 | 26 | enum { |
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | |
36 | 36 | struct call_single_queue { |
37 | 37 | struct list_head list; |
38 | - spinlock_t lock; | |
38 | + raw_spinlock_t lock; | |
39 | 39 | }; |
40 | 40 | |
41 | 41 | static DEFINE_PER_CPU(struct call_function_data, cfd_data); |
... | ... | @@ -80,7 +80,7 @@ |
80 | 80 | for_each_possible_cpu(i) { |
81 | 81 | struct call_single_queue *q = &per_cpu(call_single_queue, i); |
82 | 82 | |
83 | - spin_lock_init(&q->lock); | |
83 | + raw_spin_lock_init(&q->lock); | |
84 | 84 | INIT_LIST_HEAD(&q->list); |
85 | 85 | } |
86 | 86 | |
87 | 87 | |
... | ... | @@ -141,10 +141,10 @@ |
141 | 141 | unsigned long flags; |
142 | 142 | int ipi; |
143 | 143 | |
144 | - spin_lock_irqsave(&dst->lock, flags); | |
144 | + raw_spin_lock_irqsave(&dst->lock, flags); | |
145 | 145 | ipi = list_empty(&dst->list); |
146 | 146 | list_add_tail(&data->list, &dst->list); |
147 | - spin_unlock_irqrestore(&dst->lock, flags); | |
147 | + raw_spin_unlock_irqrestore(&dst->lock, flags); | |
148 | 148 | |
149 | 149 | /* |
150 | 150 | * The list addition should be visible before sending the IPI |
151 | 151 | |
... | ... | @@ -201,9 +201,9 @@ |
201 | 201 | refs = atomic_dec_return(&data->refs); |
202 | 202 | WARN_ON(refs < 0); |
203 | 203 | if (!refs) { |
204 | - spin_lock(&call_function.lock); | |
204 | + raw_spin_lock(&call_function.lock); | |
205 | 205 | list_del_rcu(&data->csd.list); |
206 | - spin_unlock(&call_function.lock); | |
206 | + raw_spin_unlock(&call_function.lock); | |
207 | 207 | } |
208 | 208 | |
209 | 209 | if (refs) |
210 | 210 | |
... | ... | @@ -230,9 +230,9 @@ |
230 | 230 | */ |
231 | 231 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); |
232 | 232 | |
233 | - spin_lock(&q->lock); | |
233 | + raw_spin_lock(&q->lock); | |
234 | 234 | list_replace_init(&q->list, &list); |
235 | - spin_unlock(&q->lock); | |
235 | + raw_spin_unlock(&q->lock); | |
236 | 236 | |
237 | 237 | while (!list_empty(&list)) { |
238 | 238 | struct call_single_data *data; |
239 | 239 | |
... | ... | @@ -449,14 +449,14 @@ |
449 | 449 | cpumask_clear_cpu(this_cpu, data->cpumask); |
450 | 450 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
451 | 451 | |
452 | - spin_lock_irqsave(&call_function.lock, flags); | |
452 | + raw_spin_lock_irqsave(&call_function.lock, flags); | |
453 | 453 | /* |
454 | 454 | * Place entry at the _HEAD_ of the list, so that any cpu still |
455 | 455 | * observing the entry in generic_smp_call_function_interrupt() |
456 | 456 | * will not miss any other list entries: |
457 | 457 | */ |
458 | 458 | list_add_rcu(&data->csd.list, &call_function.queue); |
459 | - spin_unlock_irqrestore(&call_function.lock, flags); | |
459 | + raw_spin_unlock_irqrestore(&call_function.lock, flags); | |
460 | 460 | |
461 | 461 | /* |
462 | 462 | * Make the list addition visible before sending the ipi. |
463 | 463 | |
464 | 464 | |
465 | 465 | |
... | ... | @@ -501,21 +501,21 @@ |
501 | 501 | |
502 | 502 | void ipi_call_lock(void) |
503 | 503 | { |
504 | - spin_lock(&call_function.lock); | |
504 | + raw_spin_lock(&call_function.lock); | |
505 | 505 | } |
506 | 506 | |
507 | 507 | void ipi_call_unlock(void) |
508 | 508 | { |
509 | - spin_unlock(&call_function.lock); | |
509 | + raw_spin_unlock(&call_function.lock); | |
510 | 510 | } |
511 | 511 | |
512 | 512 | void ipi_call_lock_irq(void) |
513 | 513 | { |
514 | - spin_lock_irq(&call_function.lock); | |
514 | + raw_spin_lock_irq(&call_function.lock); | |
515 | 515 | } |
516 | 516 | |
517 | 517 | void ipi_call_unlock_irq(void) |
518 | 518 | { |
519 | - spin_unlock_irq(&call_function.lock); | |
519 | + raw_spin_unlock_irq(&call_function.lock); | |
520 | 520 | } |