Commit 7279adbd9bb8ef8ff669da50f0e84c65a14022b5

Authored by Sudeep KarkadaNagesha
Committed by Will Deacon
1 parent ed6f2a5223

ARM: perf: check ARMv7 counter validity on a per-pmu basis

Multi-cluster ARMv7 systems may have CPU PMUs with different number of
counters.

This patch updates armv7_pmnc_counter_valid so that it takes a pmu
argument and checks the counter validity against that. We also remove a
number of redundant counter checks whether the current PMU is not easily
retrievable.

Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

Showing 1 changed file with 30 additions and 64 deletions Side-by-side Diff

arch/arm/kernel/perf_event_v7.c
... ... @@ -736,7 +736,8 @@
736 736 */
737 737 #define ARMV7_IDX_CYCLE_COUNTER 0
738 738 #define ARMV7_IDX_COUNTER0 1
739   -#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  739 +#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
  740 + (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
740 741  
741 742 #define ARMV7_MAX_COUNTERS 32
742 743 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
743 744  
744 745  
745 746  
... ... @@ -802,38 +803,20 @@
802 803 return pmnc & ARMV7_OVERFLOWED_MASK;
803 804 }
804 805  
805   -static inline int armv7_pmnc_counter_valid(int idx)
  806 +static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
806 807 {
807   - return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
  808 + return idx >= ARMV7_IDX_CYCLE_COUNTER &&
  809 + idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
808 810 }
809 811  
810 812 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
811 813 {
812   - int ret = 0;
813   - u32 counter;
814   -
815   - if (!armv7_pmnc_counter_valid(idx)) {
816   - pr_err("CPU%u checking wrong counter %d overflow status\n",
817   - smp_processor_id(), idx);
818   - } else {
819   - counter = ARMV7_IDX_TO_COUNTER(idx);
820   - ret = pmnc & BIT(counter);
821   - }
822   -
823   - return ret;
  814 + return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
824 815 }
825 816  
826 817 static inline int armv7_pmnc_select_counter(int idx)
827 818 {
828   - u32 counter;
829   -
830   - if (!armv7_pmnc_counter_valid(idx)) {
831   - pr_err("CPU%u selecting wrong PMNC counter %d\n",
832   - smp_processor_id(), idx);
833   - return -EINVAL;
834   - }
835   -
836   - counter = ARMV7_IDX_TO_COUNTER(idx);
  819 + u32 counter = ARMV7_IDX_TO_COUNTER(idx);
837 820 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
838 821 isb();
839 822  
840 823  
... ... @@ -842,11 +825,12 @@
842 825  
843 826 static inline u32 armv7pmu_read_counter(struct perf_event *event)
844 827 {
  828 + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
845 829 struct hw_perf_event *hwc = &event->hw;
846 830 int idx = hwc->idx;
847 831 u32 value = 0;
848 832  
849   - if (!armv7_pmnc_counter_valid(idx))
  833 + if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
850 834 pr_err("CPU%u reading wrong counter %d\n",
851 835 smp_processor_id(), idx);
852 836 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
853 837  
... ... @@ -859,10 +843,11 @@
859 843  
860 844 static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
861 845 {
  846 + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
862 847 struct hw_perf_event *hwc = &event->hw;
863 848 int idx = hwc->idx;
864 849  
865   - if (!armv7_pmnc_counter_valid(idx))
  850 + if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
866 851 pr_err("CPU%u writing wrong counter %d\n",
867 852 smp_processor_id(), idx);
868 853 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
869 854  
870 855  
871 856  
... ... @@ -881,60 +866,28 @@
881 866  
882 867 static inline int armv7_pmnc_enable_counter(int idx)
883 868 {
884   - u32 counter;
885   -
886   - if (!armv7_pmnc_counter_valid(idx)) {
887   - pr_err("CPU%u enabling wrong PMNC counter %d\n",
888   - smp_processor_id(), idx);
889   - return -EINVAL;
890   - }
891   -
892   - counter = ARMV7_IDX_TO_COUNTER(idx);
  869 + u32 counter = ARMV7_IDX_TO_COUNTER(idx);
893 870 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
894 871 return idx;
895 872 }
896 873  
897 874 static inline int armv7_pmnc_disable_counter(int idx)
898 875 {
899   - u32 counter;
900   -
901   - if (!armv7_pmnc_counter_valid(idx)) {
902   - pr_err("CPU%u disabling wrong PMNC counter %d\n",
903   - smp_processor_id(), idx);
904   - return -EINVAL;
905   - }
906   -
907   - counter = ARMV7_IDX_TO_COUNTER(idx);
  876 + u32 counter = ARMV7_IDX_TO_COUNTER(idx);
908 877 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
909 878 return idx;
910 879 }
911 880  
912 881 static inline int armv7_pmnc_enable_intens(int idx)
913 882 {
914   - u32 counter;
915   -
916   - if (!armv7_pmnc_counter_valid(idx)) {
917   - pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
918   - smp_processor_id(), idx);
919   - return -EINVAL;
920   - }
921   -
922   - counter = ARMV7_IDX_TO_COUNTER(idx);
  883 + u32 counter = ARMV7_IDX_TO_COUNTER(idx);
923 884 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
924 885 return idx;
925 886 }
926 887  
927 888 static inline int armv7_pmnc_disable_intens(int idx)
928 889 {
929   - u32 counter;
930   -
931   - if (!armv7_pmnc_counter_valid(idx)) {
932   - pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
933   - smp_processor_id(), idx);
934   - return -EINVAL;
935   - }
936   -
937   - counter = ARMV7_IDX_TO_COUNTER(idx);
  890 + u32 counter = ARMV7_IDX_TO_COUNTER(idx);
938 891 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
939 892 isb();
940 893 /* Clear the overflow flag in case an interrupt is pending. */
... ... @@ -959,7 +912,7 @@
959 912 }
960 913  
961 914 #ifdef DEBUG
962   -static void armv7_pmnc_dump_regs(void)
  915 +static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
963 916 {
964 917 u32 val;
965 918 unsigned int cnt;
... ... @@ -984,7 +937,8 @@
984 937 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
985 938 printk(KERN_INFO "CCNT =0x%08x\n", val);
986 939  
987   - for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
  940 + for (cnt = ARMV7_IDX_COUNTER0;
  941 + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
988 942 armv7_pmnc_select_counter(cnt);
989 943 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
990 944 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
... ... @@ -1004,6 +958,12 @@
1004 958 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1005 959 int idx = hwc->idx;
1006 960  
  961 + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  962 + pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  963 + smp_processor_id(), idx);
  964 + return;
  965 + }
  966 +
1007 967 /*
1008 968 * Enable counter and interrupt, and set the counter to count
1009 969 * the event that we're interested in.
... ... @@ -1043,6 +1003,12 @@
1043 1003 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1044 1004 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1045 1005 int idx = hwc->idx;
  1006 +
  1007 + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  1008 + pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  1009 + smp_processor_id(), idx);
  1010 + return;
  1011 + }
1046 1012  
1047 1013 /*
1048 1014 * Disable counter and interrupt