Commit cde8e88498c8de69271fcb6d4dd974979368fa67

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent b0b2072df3

perf: Sanitize the RCU logic

Simplify things and simply synchronize against two RCU variants for
PMU unregister -- we don't care about performance, its module unload
if anything.

Reported-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 1 changed file with 9 additions and 8 deletions Side-by-side Diff

... ... @@ -3810,7 +3810,7 @@
3810 3810 struct pmu *pmu;
3811 3811 int ctxn;
3812 3812  
3813   - rcu_read_lock_sched();
  3813 + rcu_read_lock();
3814 3814 list_for_each_entry_rcu(pmu, &pmus, entry) {
3815 3815 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3816 3816 perf_event_task_ctx(&cpuctx->ctx, task_event);
... ... @@ -3825,7 +3825,7 @@
3825 3825 if (ctx)
3826 3826 perf_event_task_ctx(ctx, task_event);
3827 3827 }
3828   - rcu_read_unlock_sched();
  3828 + rcu_read_unlock();
3829 3829 }
3830 3830  
3831 3831 static void perf_event_task(struct task_struct *task,
... ... @@ -3943,7 +3943,7 @@
3943 3943  
3944 3944 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3945 3945  
3946   - rcu_read_lock_sched();
  3946 + rcu_read_lock();
3947 3947 list_for_each_entry_rcu(pmu, &pmus, entry) {
3948 3948 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3949 3949 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
... ... @@ -3956,7 +3956,7 @@
3956 3956 if (ctx)
3957 3957 perf_event_comm_ctx(ctx, comm_event);
3958 3958 }
3959   - rcu_read_unlock_sched();
  3959 + rcu_read_unlock();
3960 3960 }
3961 3961  
3962 3962 void perf_event_comm(struct task_struct *task)
... ... @@ -4126,7 +4126,7 @@
4126 4126  
4127 4127 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4128 4128  
4129   - rcu_read_lock_sched();
  4129 + rcu_read_lock();
4130 4130 list_for_each_entry_rcu(pmu, &pmus, entry) {
4131 4131 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
4132 4132 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
... ... @@ -4142,7 +4142,7 @@
4142 4142 vma->vm_flags & VM_EXEC);
4143 4143 }
4144 4144 }
4145   - rcu_read_unlock_sched();
  4145 + rcu_read_unlock();
4146 4146  
4147 4147 kfree(buf);
4148 4148 }
4149 4149  
... ... @@ -5218,10 +5218,11 @@
5218 5218 mutex_unlock(&pmus_lock);
5219 5219  
5220 5220 /*
5221   - * We use the pmu list either under SRCU or preempt_disable,
5222   - * synchronize_srcu() implies synchronize_sched() so we're good.
  5221 + * We dereference the pmu list under both SRCU and regular RCU, so
  5222 + * synchronize against both of those.
5223 5223 */
5224 5224 synchronize_srcu(&pmus_srcu);
  5225 + synchronize_rcu();
5225 5226  
5226 5227 free_percpu(pmu->pmu_disable_count);
5227 5228 free_pmu_context(pmu->pmu_cpu_context);