Commit 961ec6daa7b14f376c30d447a830fa4783a2112c
Committed by
Russell King
1 parent
4d6b7a779b
Exists in
master
and in
7 other branches
ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected by standard spinlocks are preemptible. This is not acceptable on perf as (a) we may be scheduled onto a different CPU whilst reading/writing banked PMU registers and (b) the latency when reading the PMU registers becomes unpredictable. This patch upgrades the pmu_lock spinlock to a raw_spinlock instead. Reported-by: Jamie Iles <jamie@jamieiles.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Showing 4 changed files with 35 additions and 35 deletions Side-by-side Diff
arch/arm/kernel/perf_event.c
... | ... | @@ -32,7 +32,7 @@ |
32 | 32 | * Hardware lock to serialize accesses to PMU registers. Needed for the |
33 | 33 | * read/modify/write sequences. |
34 | 34 | */ |
35 | -static DEFINE_SPINLOCK(pmu_lock); | |
35 | +static DEFINE_RAW_SPINLOCK(pmu_lock); | |
36 | 36 | |
37 | 37 | /* |
38 | 38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add |
arch/arm/kernel/perf_event_v6.c
... | ... | @@ -426,12 +426,12 @@ |
426 | 426 | * Mask out the current event and set the counter to count the event |
427 | 427 | * that we're interested in. |
428 | 428 | */ |
429 | - spin_lock_irqsave(&pmu_lock, flags); | |
429 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
430 | 430 | val = armv6_pmcr_read(); |
431 | 431 | val &= ~mask; |
432 | 432 | val |= evt; |
433 | 433 | armv6_pmcr_write(val); |
434 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
434 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
435 | 435 | } |
436 | 436 | |
437 | 437 | static irqreturn_t |
438 | 438 | |
... | ... | @@ -500,11 +500,11 @@ |
500 | 500 | { |
501 | 501 | unsigned long flags, val; |
502 | 502 | |
503 | - spin_lock_irqsave(&pmu_lock, flags); | |
503 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
504 | 504 | val = armv6_pmcr_read(); |
505 | 505 | val |= ARMV6_PMCR_ENABLE; |
506 | 506 | armv6_pmcr_write(val); |
507 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
507 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
508 | 508 | } |
509 | 509 | |
510 | 510 | static void |
511 | 511 | |
... | ... | @@ -512,11 +512,11 @@ |
512 | 512 | { |
513 | 513 | unsigned long flags, val; |
514 | 514 | |
515 | - spin_lock_irqsave(&pmu_lock, flags); | |
515 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
516 | 516 | val = armv6_pmcr_read(); |
517 | 517 | val &= ~ARMV6_PMCR_ENABLE; |
518 | 518 | armv6_pmcr_write(val); |
519 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
519 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
520 | 520 | } |
521 | 521 | |
522 | 522 | static int |
523 | 523 | |
... | ... | @@ -570,12 +570,12 @@ |
570 | 570 | * of ETM bus signal assertion cycles. The external reporting should |
571 | 571 | * be disabled and so this should never increment. |
572 | 572 | */ |
573 | - spin_lock_irqsave(&pmu_lock, flags); | |
573 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
574 | 574 | val = armv6_pmcr_read(); |
575 | 575 | val &= ~mask; |
576 | 576 | val |= evt; |
577 | 577 | armv6_pmcr_write(val); |
578 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
578 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
579 | 579 | } |
580 | 580 | |
581 | 581 | static void |
582 | 582 | |
... | ... | @@ -599,12 +599,12 @@ |
599 | 599 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We |
600 | 600 | * simply disable the interrupt reporting. |
601 | 601 | */ |
602 | - spin_lock_irqsave(&pmu_lock, flags); | |
602 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
603 | 603 | val = armv6_pmcr_read(); |
604 | 604 | val &= ~mask; |
605 | 605 | val |= evt; |
606 | 606 | armv6_pmcr_write(val); |
607 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
607 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
608 | 608 | } |
609 | 609 | |
610 | 610 | static const struct arm_pmu armv6pmu = { |
arch/arm/kernel/perf_event_v7.c
... | ... | @@ -689,7 +689,7 @@ |
689 | 689 | * Enable counter and interrupt, and set the counter to count |
690 | 690 | * the event that we're interested in. |
691 | 691 | */ |
692 | - spin_lock_irqsave(&pmu_lock, flags); | |
692 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
693 | 693 | |
694 | 694 | /* |
695 | 695 | * Disable counter |
... | ... | @@ -713,7 +713,7 @@ |
713 | 713 | */ |
714 | 714 | armv7_pmnc_enable_counter(idx); |
715 | 715 | |
716 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
716 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
717 | 717 | } |
718 | 718 | |
719 | 719 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) |
... | ... | @@ -723,7 +723,7 @@ |
723 | 723 | /* |
724 | 724 | * Disable counter and interrupt |
725 | 725 | */ |
726 | - spin_lock_irqsave(&pmu_lock, flags); | |
726 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
727 | 727 | |
728 | 728 | /* |
729 | 729 | * Disable counter |
... | ... | @@ -735,7 +735,7 @@ |
735 | 735 | */ |
736 | 736 | armv7_pmnc_disable_intens(idx); |
737 | 737 | |
738 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
738 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
739 | 739 | } |
740 | 740 | |
741 | 741 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) |
742 | 742 | |
743 | 743 | |
744 | 744 | |
... | ... | @@ -805,20 +805,20 @@ |
805 | 805 | { |
806 | 806 | unsigned long flags; |
807 | 807 | |
808 | - spin_lock_irqsave(&pmu_lock, flags); | |
808 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
809 | 809 | /* Enable all counters */ |
810 | 810 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); |
811 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
811 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
812 | 812 | } |
813 | 813 | |
814 | 814 | static void armv7pmu_stop(void) |
815 | 815 | { |
816 | 816 | unsigned long flags; |
817 | 817 | |
818 | - spin_lock_irqsave(&pmu_lock, flags); | |
818 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
819 | 819 | /* Disable all counters */ |
820 | 820 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); |
821 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
821 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
822 | 822 | } |
823 | 823 | |
824 | 824 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, |
arch/arm/kernel/perf_event_xscale.c
... | ... | @@ -291,12 +291,12 @@ |
291 | 291 | return; |
292 | 292 | } |
293 | 293 | |
294 | - spin_lock_irqsave(&pmu_lock, flags); | |
294 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
295 | 295 | val = xscale1pmu_read_pmnc(); |
296 | 296 | val &= ~mask; |
297 | 297 | val |= evt; |
298 | 298 | xscale1pmu_write_pmnc(val); |
299 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
299 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
300 | 300 | } |
301 | 301 | |
302 | 302 | static void |
303 | 303 | |
... | ... | @@ -322,12 +322,12 @@ |
322 | 322 | return; |
323 | 323 | } |
324 | 324 | |
325 | - spin_lock_irqsave(&pmu_lock, flags); | |
325 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
326 | 326 | val = xscale1pmu_read_pmnc(); |
327 | 327 | val &= ~mask; |
328 | 328 | val |= evt; |
329 | 329 | xscale1pmu_write_pmnc(val); |
330 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
330 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
331 | 331 | } |
332 | 332 | |
333 | 333 | static int |
334 | 334 | |
... | ... | @@ -355,11 +355,11 @@ |
355 | 355 | { |
356 | 356 | unsigned long flags, val; |
357 | 357 | |
358 | - spin_lock_irqsave(&pmu_lock, flags); | |
358 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
359 | 359 | val = xscale1pmu_read_pmnc(); |
360 | 360 | val |= XSCALE_PMU_ENABLE; |
361 | 361 | xscale1pmu_write_pmnc(val); |
362 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
362 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
363 | 363 | } |
364 | 364 | |
365 | 365 | static void |
366 | 366 | |
... | ... | @@ -367,11 +367,11 @@ |
367 | 367 | { |
368 | 368 | unsigned long flags, val; |
369 | 369 | |
370 | - spin_lock_irqsave(&pmu_lock, flags); | |
370 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
371 | 371 | val = xscale1pmu_read_pmnc(); |
372 | 372 | val &= ~XSCALE_PMU_ENABLE; |
373 | 373 | xscale1pmu_write_pmnc(val); |
374 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
374 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
375 | 375 | } |
376 | 376 | |
377 | 377 | static inline u32 |
378 | 378 | |
... | ... | @@ -635,10 +635,10 @@ |
635 | 635 | return; |
636 | 636 | } |
637 | 637 | |
638 | - spin_lock_irqsave(&pmu_lock, flags); | |
638 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
639 | 639 | xscale2pmu_write_event_select(evtsel); |
640 | 640 | xscale2pmu_write_int_enable(ien); |
641 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
641 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
642 | 642 | } |
643 | 643 | |
644 | 644 | static void |
645 | 645 | |
... | ... | @@ -678,10 +678,10 @@ |
678 | 678 | return; |
679 | 679 | } |
680 | 680 | |
681 | - spin_lock_irqsave(&pmu_lock, flags); | |
681 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
682 | 682 | xscale2pmu_write_event_select(evtsel); |
683 | 683 | xscale2pmu_write_int_enable(ien); |
684 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
684 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
685 | 685 | } |
686 | 686 | |
687 | 687 | static int |
688 | 688 | |
... | ... | @@ -705,11 +705,11 @@ |
705 | 705 | { |
706 | 706 | unsigned long flags, val; |
707 | 707 | |
708 | - spin_lock_irqsave(&pmu_lock, flags); | |
708 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
709 | 709 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
710 | 710 | val |= XSCALE_PMU_ENABLE; |
711 | 711 | xscale2pmu_write_pmnc(val); |
712 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
712 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
713 | 713 | } |
714 | 714 | |
715 | 715 | static void |
716 | 716 | |
... | ... | @@ -717,11 +717,11 @@ |
717 | 717 | { |
718 | 718 | unsigned long flags, val; |
719 | 719 | |
720 | - spin_lock_irqsave(&pmu_lock, flags); | |
720 | + raw_spin_lock_irqsave(&pmu_lock, flags); | |
721 | 721 | val = xscale2pmu_read_pmnc(); |
722 | 722 | val &= ~XSCALE_PMU_ENABLE; |
723 | 723 | xscale2pmu_write_pmnc(val); |
724 | - spin_unlock_irqrestore(&pmu_lock, flags); | |
724 | + raw_spin_unlock_irqrestore(&pmu_lock, flags); | |
725 | 725 | } |
726 | 726 | |
727 | 727 | static inline u32 |