Commit 404ff638403e9286691b9b1f86d514c1d7737e8f

Authored by Deng-Cheng Zhu
Committed by Ralf Baechle
1 parent 91f017372a

MIPS, Perf-events: Work with the new PMU interface

This is the MIPS part of the following commits by Peter Zijlstra:

- a4eaf7f14675cb512d69f0c928055e73d0c6d252
    perf: Rework the PMU methods

    Replace pmu::{enable,disable,start,stop,unthrottle} with
    pmu::{add,del,start,stop}, all of which take a flags argument.

    The new interface extends the capability to stop a counter while
    keeping it scheduled on the PMU. We replace the throttled state with
    the generic stopped state.

    This also allows us to efficiently stop/start counters over certain
    code paths (like IRQ handlers).

    It also allows scheduling a counter without it starting, allowing for
    a generic frozen state (useful for rotating stopped counters).

    The stopped state is implemented in two different ways, depending on
    how the architecture implemented the throttled state:

     1) We disable the counter:
        a) the pmu has per-counter enable bits, we flip that
        b) we program a NOP event, preserving the counter state

     2) We store the counter state and ignore all read/overflow events

For MIPSXX, the stopped state is implemented in the way of 1.b as above.

- 33696fc0d141bbbcb12f75b69608ea83282e3117
    perf: Per PMU disable

    Changes perf_disable() into perf_pmu_disable().

- 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7
    perf: Reduce perf_disable() usage

    Since the current perf_disable() usage is only an optimization,
    remove it for now. This eases the removal of the __weak
    hw_perf_enable() interface.

- b0a873ebbf87bf38bf70b5e39a7cadc96099fa13
    perf: Register PMU implementations

    Simple registration interface for struct pmu, this provides the
    infrastructure for removing all the weak functions.

- 51b0fe39549a04858001922919ab355dee9bdfcf
    perf: Deconstify struct pmu

    sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"`

Reported-by: Wu Zhangjin <wuzhangjin@gmail.com>
Acked-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
To: a.p.zijlstra@chello.nl
To: fweisbec@gmail.com
To: will.deacon@arm.com
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: wuzhangjin@gmail.com
Cc: paulus@samba.org
Cc: mingo@elte.hu
Cc: acme@redhat.com
Cc: dengcheng.zhu@gmail.com
Cc: matt@console-pimps.org
Cc: sshtylyov@mvista.com
Cc: ddaney@caviumnetworks.com
Patchwork: http://patchwork.linux-mips.org/patch/2012/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

Showing 2 changed files with 158 additions and 119 deletions Side-by-side Diff

arch/mips/kernel/perf_event.c
... ... @@ -161,41 +161,6 @@
161 161 return ret;
162 162 }
163 163  
164   -static int mipspmu_enable(struct perf_event *event)
165   -{
166   - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
167   - struct hw_perf_event *hwc = &event->hw;
168   - int idx;
169   - int err = 0;
170   -
171   - /* To look for a free counter for this event. */
172   - idx = mipspmu->alloc_counter(cpuc, hwc);
173   - if (idx < 0) {
174   - err = idx;
175   - goto out;
176   - }
177   -
178   - /*
179   - * If there is an event in the counter we are going to use then
180   - * make sure it is disabled.
181   - */
182   - event->hw.idx = idx;
183   - mipspmu->disable_event(idx);
184   - cpuc->events[idx] = event;
185   -
186   - /* Set the period for the event. */
187   - mipspmu_event_set_period(event, hwc, idx);
188   -
189   - /* Enable the event. */
190   - mipspmu->enable_event(hwc, idx);
191   -
192   - /* Propagate our changes to the userspace mapping. */
193   - perf_event_update_userpage(event);
194   -
195   -out:
196   - return err;
197   -}
198   -
199 164 static void mipspmu_event_update(struct perf_event *event,
200 165 struct hw_perf_event *hwc,
201 166 int idx)
202 167  
203 168  
204 169  
205 170  
206 171  
207 172  
208 173  
209 174  
210 175  
211 176  
212 177  
213 178  
214 179  
... ... @@ -231,32 +196,90 @@
231 196 return;
232 197 }
233 198  
234   -static void mipspmu_disable(struct perf_event *event)
  199 +static void mipspmu_start(struct perf_event *event, int flags)
235 200 {
  201 + struct hw_perf_event *hwc = &event->hw;
  202 +
  203 + if (!mipspmu)
  204 + return;
  205 +
  206 + if (flags & PERF_EF_RELOAD)
  207 + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  208 +
  209 + hwc->state = 0;
  210 +
  211 + /* Set the period for the event. */
  212 + mipspmu_event_set_period(event, hwc, hwc->idx);
  213 +
  214 + /* Enable the event. */
  215 + mipspmu->enable_event(hwc, hwc->idx);
  216 +}
  217 +
  218 +static void mipspmu_stop(struct perf_event *event, int flags)
  219 +{
  220 + struct hw_perf_event *hwc = &event->hw;
  221 +
  222 + if (!mipspmu)
  223 + return;
  224 +
  225 + if (!(hwc->state & PERF_HES_STOPPED)) {
  226 + /* We are working on a local event. */
  227 + mipspmu->disable_event(hwc->idx);
  228 + barrier();
  229 + mipspmu_event_update(event, hwc, hwc->idx);
  230 + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  231 + }
  232 +}
  233 +
  234 +static int mipspmu_add(struct perf_event *event, int flags)
  235 +{
236 236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 237 struct hw_perf_event *hwc = &event->hw;
238   - int idx = hwc->idx;
  238 + int idx;
  239 + int err = 0;
239 240  
  241 + perf_pmu_disable(event->pmu);
240 242  
241   - WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  243 + /* To look for a free counter for this event. */
  244 + idx = mipspmu->alloc_counter(cpuc, hwc);
  245 + if (idx < 0) {
  246 + err = idx;
  247 + goto out;
  248 + }
242 249  
243   - /* We are working on a local event. */
  250 + /*
  251 + * If there is an event in the counter we are going to use then
  252 + * make sure it is disabled.
  253 + */
  254 + event->hw.idx = idx;
244 255 mipspmu->disable_event(idx);
  256 + cpuc->events[idx] = event;
245 257  
246   - barrier();
  258 + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  259 + if (flags & PERF_EF_START)
  260 + mipspmu_start(event, PERF_EF_RELOAD);
247 261  
248   - mipspmu_event_update(event, hwc, idx);
249   - cpuc->events[idx] = NULL;
250   - clear_bit(idx, cpuc->used_mask);
251   -
  262 + /* Propagate our changes to the userspace mapping. */
252 263 perf_event_update_userpage(event);
  264 +
  265 +out:
  266 + perf_pmu_enable(event->pmu);
  267 + return err;
253 268 }
254 269  
255   -static void mipspmu_unthrottle(struct perf_event *event)
  270 +static void mipspmu_del(struct perf_event *event, int flags)
256 271 {
  272 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
257 273 struct hw_perf_event *hwc = &event->hw;
  274 + int idx = hwc->idx;
258 275  
259   - mipspmu->enable_event(hwc, hwc->idx);
  276 + WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  277 +
  278 + mipspmu_stop(event, PERF_EF_UPDATE);
  279 + cpuc->events[idx] = NULL;
  280 + clear_bit(idx, cpuc->used_mask);
  281 +
  282 + perf_event_update_userpage(event);
260 283 }
261 284  
262 285 static void mipspmu_read(struct perf_event *event)
263 286  
... ... @@ -270,13 +293,18 @@
270 293 mipspmu_event_update(event, hwc, hwc->idx);
271 294 }
272 295  
273   -static struct pmu pmu = {
274   - .enable = mipspmu_enable,
275   - .disable = mipspmu_disable,
276   - .unthrottle = mipspmu_unthrottle,
277   - .read = mipspmu_read,
278   -};
  296 +static void mipspmu_enable(struct pmu *pmu)
  297 +{
  298 + if (mipspmu)
  299 + mipspmu->start();
  300 +}
279 301  
  302 +static void mipspmu_disable(struct pmu *pmu)
  303 +{
  304 + if (mipspmu)
  305 + mipspmu->stop();
  306 +}
  307 +
280 308 static atomic_t active_events = ATOMIC_INIT(0);
281 309 static DEFINE_MUTEX(pmu_reserve_mutex);
282 310 static int (*save_perf_irq)(void);
... ... @@ -318,6 +346,82 @@
318 346 perf_irq = save_perf_irq;
319 347 }
320 348  
  349 +/*
  350 + * mipsxx/rm9000/loongson2 have different performance counters, they have
  351 + * specific low-level init routines.
  352 + */
  353 +static void reset_counters(void *arg);
  354 +static int __hw_perf_event_init(struct perf_event *event);
  355 +
  356 +static void hw_perf_event_destroy(struct perf_event *event)
  357 +{
  358 + if (atomic_dec_and_mutex_lock(&active_events,
  359 + &pmu_reserve_mutex)) {
  360 + /*
  361 + * We must not call the destroy function with interrupts
  362 + * disabled.
  363 + */
  364 + on_each_cpu(reset_counters,
  365 + (void *)(long)mipspmu->num_counters, 1);
  366 + mipspmu_free_irq();
  367 + mutex_unlock(&pmu_reserve_mutex);
  368 + }
  369 +}
  370 +
  371 +static int mipspmu_event_init(struct perf_event *event)
  372 +{
  373 + int err = 0;
  374 +
  375 + switch (event->attr.type) {
  376 + case PERF_TYPE_RAW:
  377 + case PERF_TYPE_HARDWARE:
  378 + case PERF_TYPE_HW_CACHE:
  379 + break;
  380 +
  381 + default:
  382 + return -ENOENT;
  383 + }
  384 +
  385 + if (!mipspmu || event->cpu >= nr_cpumask_bits ||
  386 + (event->cpu >= 0 && !cpu_online(event->cpu)))
  387 + return -ENODEV;
  388 +
  389 + if (!atomic_inc_not_zero(&active_events)) {
  390 + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
  391 + atomic_dec(&active_events);
  392 + return -ENOSPC;
  393 + }
  394 +
  395 + mutex_lock(&pmu_reserve_mutex);
  396 + if (atomic_read(&active_events) == 0)
  397 + err = mipspmu_get_irq();
  398 +
  399 + if (!err)
  400 + atomic_inc(&active_events);
  401 + mutex_unlock(&pmu_reserve_mutex);
  402 + }
  403 +
  404 + if (err)
  405 + return err;
  406 +
  407 + err = __hw_perf_event_init(event);
  408 + if (err)
  409 + hw_perf_event_destroy(event);
  410 +
  411 + return err;
  412 +}
  413 +
  414 +static struct pmu pmu = {
  415 + .pmu_enable = mipspmu_enable,
  416 + .pmu_disable = mipspmu_disable,
  417 + .event_init = mipspmu_event_init,
  418 + .add = mipspmu_add,
  419 + .del = mipspmu_del,
  420 + .start = mipspmu_start,
  421 + .stop = mipspmu_stop,
  422 + .read = mipspmu_read,
  423 +};
  424 +
321 425 static inline unsigned int
322 426 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
323 427 {
... ... @@ -407,73 +511,6 @@
407 511 return -ENOSPC;
408 512  
409 513 return 0;
410   -}
411   -
412   -/*
413   - * mipsxx/rm9000/loongson2 have different performance counters, they have
414   - * specific low-level init routines.
415   - */
416   -static void reset_counters(void *arg);
417   -static int __hw_perf_event_init(struct perf_event *event);
418   -
419   -static void hw_perf_event_destroy(struct perf_event *event)
420   -{
421   - if (atomic_dec_and_mutex_lock(&active_events,
422   - &pmu_reserve_mutex)) {
423   - /*
424   - * We must not call the destroy function with interrupts
425   - * disabled.
426   - */
427   - on_each_cpu(reset_counters,
428   - (void *)(long)mipspmu->num_counters, 1);
429   - mipspmu_free_irq();
430   - mutex_unlock(&pmu_reserve_mutex);
431   - }
432   -}
433   -
434   -const struct pmu *hw_perf_event_init(struct perf_event *event)
435   -{
436   - int err = 0;
437   -
438   - if (!mipspmu || event->cpu >= nr_cpumask_bits ||
439   - (event->cpu >= 0 && !cpu_online(event->cpu)))
440   - return ERR_PTR(-ENODEV);
441   -
442   - if (!atomic_inc_not_zero(&active_events)) {
443   - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
444   - atomic_dec(&active_events);
445   - return ERR_PTR(-ENOSPC);
446   - }
447   -
448   - mutex_lock(&pmu_reserve_mutex);
449   - if (atomic_read(&active_events) == 0)
450   - err = mipspmu_get_irq();
451   -
452   - if (!err)
453   - atomic_inc(&active_events);
454   - mutex_unlock(&pmu_reserve_mutex);
455   - }
456   -
457   - if (err)
458   - return ERR_PTR(err);
459   -
460   - err = __hw_perf_event_init(event);
461   - if (err)
462   - hw_perf_event_destroy(event);
463   -
464   - return err ? ERR_PTR(err) : &pmu;
465   -}
466   -
467   -void hw_perf_enable(void)
468   -{
469   - if (mipspmu)
470   - mipspmu->start();
471   -}
472   -
473   -void hw_perf_disable(void)
474   -{
475   - if (mipspmu)
476   - mipspmu->stop();
477 514 }
478 515  
479 516 /* This is needed by specific irq handlers in perf_event_*.c */
arch/mips/kernel/perf_event_mipsxx.c
... ... @@ -1045,6 +1045,8 @@
1045 1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 1046 irq < 0 ? " (share with timer interrupt)" : "");
1047 1047  
  1048 + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
  1049 +
1048 1050 return 0;
1049 1051 }
1050 1052 early_initcall(init_hw_perf_events);