Commit 97d5a22005f38057b4bc0d95f81cd26510268794

Authored by Frederic Weisbecker
1 parent c530665c31

perf: Drop the obsolete profile naming for trace events

Drop the obsolete "profile" naming used by perf for trace events.
Perf can now do more than simple events counting, so generalize
the API naming.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Jason Baron <jbaron@redhat.com>

Showing 11 changed files with 262 additions and 262 deletions Side-by-side Diff

include/linux/ftrace_event.h
... ... @@ -131,12 +131,12 @@
131 131 void *mod;
132 132 void *data;
133 133  
134   - int profile_count;
135   - int (*profile_enable)(struct ftrace_event_call *);
136   - void (*profile_disable)(struct ftrace_event_call *);
  134 + int perf_refcount;
  135 + int (*perf_event_enable)(struct ftrace_event_call *);
  136 + void (*perf_event_disable)(struct ftrace_event_call *);
137 137 };
138 138  
139   -#define FTRACE_MAX_PROFILE_SIZE 2048
  139 +#define PERF_MAX_TRACE_SIZE 2048
140 140  
141 141 #define MAX_FILTER_PRED 32
142 142 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
143 143  
144 144  
... ... @@ -190,17 +190,17 @@
190 190  
191 191 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
192 192  
193   -extern int ftrace_profile_enable(int event_id);
194   -extern void ftrace_profile_disable(int event_id);
  193 +extern int perf_trace_enable(int event_id);
  194 +extern void perf_trace_disable(int event_id);
195 195 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
196 196 char *filter_str);
197 197 extern void ftrace_profile_free_filter(struct perf_event *event);
198 198 extern void *
199   -ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
  199 +perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
200 200 unsigned long *irq_flags);
201 201  
202 202 static inline void
203   -ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
  203 +perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
204 204 u64 count, unsigned long irq_flags, struct pt_regs *regs)
205 205 {
206 206 struct trace_entry *entry = raw_data;
include/linux/syscalls.h
... ... @@ -101,18 +101,18 @@
101 101  
102 102 #ifdef CONFIG_PERF_EVENTS
103 103  
104   -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
105   - .profile_enable = prof_sysenter_enable, \
106   - .profile_disable = prof_sysenter_disable,
  104 +#define TRACE_SYS_ENTER_PERF_INIT(sname) \
  105 + .perf_event_enable = perf_sysenter_enable, \
  106 + .perf_event_disable = perf_sysenter_disable,
107 107  
108   -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
109   - .profile_enable = prof_sysexit_enable, \
110   - .profile_disable = prof_sysexit_disable,
  108 +#define TRACE_SYS_EXIT_PERF_INIT(sname) \
  109 + .perf_event_enable = perf_sysexit_enable, \
  110 + .perf_event_disable = perf_sysexit_disable,
111 111 #else
112   -#define TRACE_SYS_ENTER_PROFILE(sname)
113   -#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
114   -#define TRACE_SYS_EXIT_PROFILE(sname)
115   -#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
  112 +#define TRACE_SYS_ENTER_PERF(sname)
  113 +#define TRACE_SYS_ENTER_PERF_INIT(sname)
  114 +#define TRACE_SYS_EXIT_PERF(sname)
  115 +#define TRACE_SYS_EXIT_PERF_INIT(sname)
116 116 #endif /* CONFIG_PERF_EVENTS */
117 117  
118 118 #ifdef CONFIG_FTRACE_SYSCALLS
... ... @@ -149,7 +149,7 @@
149 149 .regfunc = reg_event_syscall_enter, \
150 150 .unregfunc = unreg_event_syscall_enter, \
151 151 .data = (void *)&__syscall_meta_##sname,\
152   - TRACE_SYS_ENTER_PROFILE_INIT(sname) \
  152 + TRACE_SYS_ENTER_PERF_INIT(sname) \
153 153 }
154 154  
155 155 #define SYSCALL_TRACE_EXIT_EVENT(sname) \
... ... @@ -171,7 +171,7 @@
171 171 .regfunc = reg_event_syscall_exit, \
172 172 .unregfunc = unreg_event_syscall_exit, \
173 173 .data = (void *)&__syscall_meta_##sname,\
174   - TRACE_SYS_EXIT_PROFILE_INIT(sname) \
  174 + TRACE_SYS_EXIT_PERF_INIT(sname) \
175 175 }
176 176  
177 177 #define SYSCALL_METADATA(sname, nb) \
include/trace/ftrace.h
... ... @@ -401,18 +401,18 @@
401 401 #undef DEFINE_EVENT
402 402 #define DEFINE_EVENT(template, name, proto, args) \
403 403 \
404   -static void ftrace_profile_##name(proto); \
  404 +static void perf_trace_##name(proto); \
405 405 \
406 406 static notrace int \
407   -ftrace_profile_enable_##name(struct ftrace_event_call *unused) \
  407 +perf_trace_enable_##name(struct ftrace_event_call *unused) \
408 408 { \
409   - return register_trace_##name(ftrace_profile_##name); \
  409 + return register_trace_##name(perf_trace_##name); \
410 410 } \
411 411 \
412 412 static notrace void \
413   -ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
  413 +perf_trace_disable_##name(struct ftrace_event_call *unused) \
414 414 { \
415   - unregister_trace_##name(ftrace_profile_##name); \
  415 + unregister_trace_##name(perf_trace_##name); \
416 416 }
417 417  
418 418 #undef DEFINE_EVENT_PRINT
419 419  
... ... @@ -507,12 +507,12 @@
507 507  
508 508 #ifdef CONFIG_PERF_EVENTS
509 509  
510   -#define _TRACE_PROFILE_INIT(call) \
511   - .profile_enable = ftrace_profile_enable_##call, \
512   - .profile_disable = ftrace_profile_disable_##call,
  510 +#define _TRACE_PERF_INIT(call) \
  511 + .perf_event_enable = perf_trace_enable_##call, \
  512 + .perf_event_disable = perf_trace_disable_##call,
513 513  
514 514 #else
515   -#define _TRACE_PROFILE_INIT(call)
  515 +#define _TRACE_PERF_INIT(call)
516 516 #endif /* CONFIG_PERF_EVENTS */
517 517  
518 518 #undef __entry
... ... @@ -638,7 +638,7 @@
638 638 .unregfunc = ftrace_raw_unreg_event_##call, \
639 639 .print_fmt = print_fmt_##template, \
640 640 .define_fields = ftrace_define_fields_##template, \
641   - _TRACE_PROFILE_INIT(call) \
  641 + _TRACE_PERF_INIT(call) \
642 642 }
643 643  
644 644 #undef DEFINE_EVENT_PRINT
645 645  
646 646  
... ... @@ -657,18 +657,18 @@
657 657 .unregfunc = ftrace_raw_unreg_event_##call, \
658 658 .print_fmt = print_fmt_##call, \
659 659 .define_fields = ftrace_define_fields_##template, \
660   - _TRACE_PROFILE_INIT(call) \
  660 + _TRACE_PERF_INIT(call) \
661 661 }
662 662  
663 663 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
664 664  
665 665 /*
666   - * Define the insertion callback to profile events
  666 + * Define the insertion callback to perf events
667 667 *
668 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
669 669 * insert in the ring buffer but in a perf counter.
670 670 *
671   - * static void ftrace_profile_<call>(proto)
  671 + * static void ftrace_perf_<call>(proto)
672 672 * {
673 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
674 674 * struct ftrace_event_call *event_call = &event_<call>;
... ... @@ -757,7 +757,7 @@
757 757 #undef DECLARE_EVENT_CLASS
758 758 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759 759 static notrace void \
760   -ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
  760 +perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 761 proto) \
762 762 { \
763 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 764  
... ... @@ -774,10 +774,10 @@
774 774 sizeof(u64)); \
775 775 __entry_size -= sizeof(u32); \
776 776 \
777   - if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
  777 + if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
778 778 "profile buffer not large enough")) \
779 779 return; \
780   - entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
  780 + entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
781 781 __entry_size, event_call->id, &rctx, &irq_flags); \
782 782 if (!entry) \
783 783 return; \
784 784  
785 785  
... ... @@ -788,17 +788,17 @@
788 788 __regs = &__get_cpu_var(perf_trace_regs); \
789 789 perf_fetch_caller_regs(__regs, 2); \
790 790 \
791   - ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
  791 + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 792 __count, irq_flags, __regs); \
793 793 }
794 794  
795 795 #undef DEFINE_EVENT
796 796 #define DEFINE_EVENT(template, call, proto, args) \
797   -static notrace void ftrace_profile_##call(proto) \
  797 +static notrace void perf_trace_##call(proto) \
798 798 { \
799 799 struct ftrace_event_call *event_call = &event_##call; \
800 800 \
801   - ftrace_profile_templ_##template(event_call, args); \
  801 + perf_trace_templ_##template(event_call, args); \
802 802 }
803 803  
804 804 #undef DEFINE_EVENT_PRINT
include/trace/syscall.h
... ... @@ -47,10 +47,10 @@
47 47 #endif
48 48  
49 49 #ifdef CONFIG_PERF_EVENTS
50   -int prof_sysenter_enable(struct ftrace_event_call *call);
51   -void prof_sysenter_disable(struct ftrace_event_call *call);
52   -int prof_sysexit_enable(struct ftrace_event_call *call);
53   -void prof_sysexit_disable(struct ftrace_event_call *call);
  50 +int perf_sysenter_enable(struct ftrace_event_call *call);
  51 +void perf_sysenter_disable(struct ftrace_event_call *call);
  52 +int perf_sysexit_enable(struct ftrace_event_call *call);
  53 +void perf_sysexit_disable(struct ftrace_event_call *call);
54 54 #endif
55 55  
56 56 #endif /* _TRACE_SYSCALL_H */
... ... @@ -4347,7 +4347,7 @@
4347 4347  
4348 4348 static void tp_perf_event_destroy(struct perf_event *event)
4349 4349 {
4350   - ftrace_profile_disable(event->attr.config);
  4350 + perf_trace_disable(event->attr.config);
4351 4351 }
4352 4352  
4353 4353 static const struct pmu *tp_perf_event_init(struct perf_event *event)
... ... @@ -4361,7 +4361,7 @@
4361 4361 !capable(CAP_SYS_ADMIN))
4362 4362 return ERR_PTR(-EPERM);
4363 4363  
4364   - if (ftrace_profile_enable(event->attr.config))
  4364 + if (perf_trace_enable(event->attr.config))
4365 4365 return NULL;
4366 4366  
4367 4367 event->destroy = tp_perf_event_destroy;
kernel/trace/Makefile
... ... @@ -52,7 +52,7 @@
52 52 obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53 53 obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54 54 ifeq ($(CONFIG_PERF_EVENTS),y)
55   -obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
  55 +obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
56 56 endif
57 57 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
58 58 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
kernel/trace/trace_event_perf.c
  1 +/*
  2 + * trace event based perf event profiling/tracing
  3 + *
  4 + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5 + * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6 + */
  7 +
  8 +#include <linux/module.h>
  9 +#include <linux/kprobes.h>
  10 +#include "trace.h"
  11 +
  12 +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
  13 +
  14 +static char *perf_trace_buf;
  15 +static char *perf_trace_buf_nmi;
  16 +
  17 +typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
  18 +
  19 +/* Count the events in use (per event id, not per instance) */
  20 +static int total_ref_count;
  21 +
  22 +static int perf_trace_event_enable(struct ftrace_event_call *event)
  23 +{
  24 + char *buf;
  25 + int ret = -ENOMEM;
  26 +
  27 + if (event->perf_refcount++ > 0)
  28 + return 0;
  29 +
  30 + if (!total_ref_count) {
  31 + buf = (char *)alloc_percpu(perf_trace_t);
  32 + if (!buf)
  33 + goto fail_buf;
  34 +
  35 + rcu_assign_pointer(perf_trace_buf, buf);
  36 +
  37 + buf = (char *)alloc_percpu(perf_trace_t);
  38 + if (!buf)
  39 + goto fail_buf_nmi;
  40 +
  41 + rcu_assign_pointer(perf_trace_buf_nmi, buf);
  42 + }
  43 +
  44 + ret = event->perf_event_enable(event);
  45 + if (!ret) {
  46 + total_ref_count++;
  47 + return 0;
  48 + }
  49 +
  50 +fail_buf_nmi:
  51 + if (!total_ref_count) {
  52 + free_percpu(perf_trace_buf_nmi);
  53 + free_percpu(perf_trace_buf);
  54 + perf_trace_buf_nmi = NULL;
  55 + perf_trace_buf = NULL;
  56 + }
  57 +fail_buf:
  58 + event->perf_refcount--;
  59 +
  60 + return ret;
  61 +}
  62 +
  63 +int perf_trace_enable(int event_id)
  64 +{
  65 + struct ftrace_event_call *event;
  66 + int ret = -EINVAL;
  67 +
  68 + mutex_lock(&event_mutex);
  69 + list_for_each_entry(event, &ftrace_events, list) {
  70 + if (event->id == event_id && event->perf_event_enable &&
  71 + try_module_get(event->mod)) {
  72 + ret = perf_trace_event_enable(event);
  73 + break;
  74 + }
  75 + }
  76 + mutex_unlock(&event_mutex);
  77 +
  78 + return ret;
  79 +}
  80 +
  81 +static void perf_trace_event_disable(struct ftrace_event_call *event)
  82 +{
  83 + char *buf, *nmi_buf;
  84 +
  85 + if (--event->perf_refcount > 0)
  86 + return;
  87 +
  88 + event->perf_event_disable(event);
  89 +
  90 + if (!--total_ref_count) {
  91 + buf = perf_trace_buf;
  92 + rcu_assign_pointer(perf_trace_buf, NULL);
  93 +
  94 + nmi_buf = perf_trace_buf_nmi;
  95 + rcu_assign_pointer(perf_trace_buf_nmi, NULL);
  96 +
  97 + /*
  98 + * Ensure every events in profiling have finished before
  99 + * releasing the buffers
  100 + */
  101 + synchronize_sched();
  102 +
  103 + free_percpu(buf);
  104 + free_percpu(nmi_buf);
  105 + }
  106 +}
  107 +
  108 +void perf_trace_disable(int event_id)
  109 +{
  110 + struct ftrace_event_call *event;
  111 +
  112 + mutex_lock(&event_mutex);
  113 + list_for_each_entry(event, &ftrace_events, list) {
  114 + if (event->id == event_id) {
  115 + perf_trace_event_disable(event);
  116 + module_put(event->mod);
  117 + break;
  118 + }
  119 + }
  120 + mutex_unlock(&event_mutex);
  121 +}
  122 +
  123 +__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
  124 + int *rctxp, unsigned long *irq_flags)
  125 +{
  126 + struct trace_entry *entry;
  127 + char *trace_buf, *raw_data;
  128 + int pc, cpu;
  129 +
  130 + pc = preempt_count();
  131 +
  132 + /* Protect the per cpu buffer, begin the rcu read side */
  133 + local_irq_save(*irq_flags);
  134 +
  135 + *rctxp = perf_swevent_get_recursion_context();
  136 + if (*rctxp < 0)
  137 + goto err_recursion;
  138 +
  139 + cpu = smp_processor_id();
  140 +
  141 + if (in_nmi())
  142 + trace_buf = rcu_dereference(perf_trace_buf_nmi);
  143 + else
  144 + trace_buf = rcu_dereference(perf_trace_buf);
  145 +
  146 + if (!trace_buf)
  147 + goto err;
  148 +
  149 + raw_data = per_cpu_ptr(trace_buf, cpu);
  150 +
  151 + /* zero the dead bytes from align to not leak stack to user */
  152 + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  153 +
  154 + entry = (struct trace_entry *)raw_data;
  155 + tracing_generic_entry_update(entry, *irq_flags, pc);
  156 + entry->type = type;
  157 +
  158 + return raw_data;
  159 +err:
  160 + perf_swevent_put_recursion_context(*rctxp);
  161 +err_recursion:
  162 + local_irq_restore(*irq_flags);
  163 + return NULL;
  164 +}
  165 +EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
kernel/trace/trace_event_profile.c
1   -/*
2   - * trace event based perf counter profiling
3   - *
4   - * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5   - * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6   - */
7   -
8   -#include <linux/module.h>
9   -#include <linux/kprobes.h>
10   -#include "trace.h"
11   -
12   -DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13   -
14   -static char *perf_trace_buf;
15   -static char *perf_trace_buf_nmi;
16   -
17   -typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
18   -
19   -/* Count the events in use (per event id, not per instance) */
20   -static int total_profile_count;
21   -
22   -static int ftrace_profile_enable_event(struct ftrace_event_call *event)
23   -{
24   - char *buf;
25   - int ret = -ENOMEM;
26   -
27   - if (event->profile_count++ > 0)
28   - return 0;
29   -
30   - if (!total_profile_count) {
31   - buf = (char *)alloc_percpu(perf_trace_t);
32   - if (!buf)
33   - goto fail_buf;
34   -
35   - rcu_assign_pointer(perf_trace_buf, buf);
36   -
37   - buf = (char *)alloc_percpu(perf_trace_t);
38   - if (!buf)
39   - goto fail_buf_nmi;
40   -
41   - rcu_assign_pointer(perf_trace_buf_nmi, buf);
42   - }
43   -
44   - ret = event->profile_enable(event);
45   - if (!ret) {
46   - total_profile_count++;
47   - return 0;
48   - }
49   -
50   -fail_buf_nmi:
51   - if (!total_profile_count) {
52   - free_percpu(perf_trace_buf_nmi);
53   - free_percpu(perf_trace_buf);
54   - perf_trace_buf_nmi = NULL;
55   - perf_trace_buf = NULL;
56   - }
57   -fail_buf:
58   - event->profile_count--;
59   -
60   - return ret;
61   -}
62   -
63   -int ftrace_profile_enable(int event_id)
64   -{
65   - struct ftrace_event_call *event;
66   - int ret = -EINVAL;
67   -
68   - mutex_lock(&event_mutex);
69   - list_for_each_entry(event, &ftrace_events, list) {
70   - if (event->id == event_id && event->profile_enable &&
71   - try_module_get(event->mod)) {
72   - ret = ftrace_profile_enable_event(event);
73   - break;
74   - }
75   - }
76   - mutex_unlock(&event_mutex);
77   -
78   - return ret;
79   -}
80   -
81   -static void ftrace_profile_disable_event(struct ftrace_event_call *event)
82   -{
83   - char *buf, *nmi_buf;
84   -
85   - if (--event->profile_count > 0)
86   - return;
87   -
88   - event->profile_disable(event);
89   -
90   - if (!--total_profile_count) {
91   - buf = perf_trace_buf;
92   - rcu_assign_pointer(perf_trace_buf, NULL);
93   -
94   - nmi_buf = perf_trace_buf_nmi;
95   - rcu_assign_pointer(perf_trace_buf_nmi, NULL);
96   -
97   - /*
98   - * Ensure every events in profiling have finished before
99   - * releasing the buffers
100   - */
101   - synchronize_sched();
102   -
103   - free_percpu(buf);
104   - free_percpu(nmi_buf);
105   - }
106   -}
107   -
108   -void ftrace_profile_disable(int event_id)
109   -{
110   - struct ftrace_event_call *event;
111   -
112   - mutex_lock(&event_mutex);
113   - list_for_each_entry(event, &ftrace_events, list) {
114   - if (event->id == event_id) {
115   - ftrace_profile_disable_event(event);
116   - module_put(event->mod);
117   - break;
118   - }
119   - }
120   - mutex_unlock(&event_mutex);
121   -}
122   -
123   -__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
124   - int *rctxp, unsigned long *irq_flags)
125   -{
126   - struct trace_entry *entry;
127   - char *trace_buf, *raw_data;
128   - int pc, cpu;
129   -
130   - pc = preempt_count();
131   -
132   - /* Protect the per cpu buffer, begin the rcu read side */
133   - local_irq_save(*irq_flags);
134   -
135   - *rctxp = perf_swevent_get_recursion_context();
136   - if (*rctxp < 0)
137   - goto err_recursion;
138   -
139   - cpu = smp_processor_id();
140   -
141   - if (in_nmi())
142   - trace_buf = rcu_dereference(perf_trace_buf_nmi);
143   - else
144   - trace_buf = rcu_dereference(perf_trace_buf);
145   -
146   - if (!trace_buf)
147   - goto err;
148   -
149   - raw_data = per_cpu_ptr(trace_buf, cpu);
150   -
151   - /* zero the dead bytes from align to not leak stack to user */
152   - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
153   -
154   - entry = (struct trace_entry *)raw_data;
155   - tracing_generic_entry_update(entry, *irq_flags, pc);
156   - entry->type = type;
157   -
158   - return raw_data;
159   -err:
160   - perf_swevent_put_recursion_context(*rctxp);
161   -err_recursion:
162   - local_irq_restore(*irq_flags);
163   - return NULL;
164   -}
165   -EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
kernel/trace/trace_events.c
... ... @@ -938,7 +938,7 @@
938 938 trace_create_file("enable", 0644, call->dir, call,
939 939 enable);
940 940  
941   - if (call->id && call->profile_enable)
  941 + if (call->id && call->perf_event_enable)
942 942 trace_create_file("id", 0444, call->dir, call,
943 943 id);
944 944  
kernel/trace/trace_kprobe.c
... ... @@ -1214,7 +1214,7 @@
1214 1214 #ifdef CONFIG_PERF_EVENTS
1215 1215  
1216 1216 /* Kprobe profile handler */
1217   -static __kprobes void kprobe_profile_func(struct kprobe *kp,
  1217 +static __kprobes void kprobe_perf_func(struct kprobe *kp,
1218 1218 struct pt_regs *regs)
1219 1219 {
1220 1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1221 1221  
... ... @@ -1227,11 +1227,11 @@
1227 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1228 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229 1229 size -= sizeof(u32);
1230   - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  1230 + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1231 1231 "profile buffer not large enough"))
1232 1232 return;
1233 1233  
1234   - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
  1234 + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1235 1235 if (!entry)
1236 1236 return;
1237 1237  
1238 1238  
... ... @@ -1240,11 +1240,11 @@
1240 1240 for (i = 0; i < tp->nr_args; i++)
1241 1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1242 1242  
1243   - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
  1243 + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1244 1244 }
1245 1245  
1246 1246 /* Kretprobe profile handler */
1247   -static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
  1247 +static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1248 1248 struct pt_regs *regs)
1249 1249 {
1250 1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1251 1251  
... ... @@ -1257,11 +1257,11 @@
1257 1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1258 1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259 1259 size -= sizeof(u32);
1260   - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  1260 + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1261 1261 "profile buffer not large enough"))
1262 1262 return;
1263 1263  
1264   - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
  1264 + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1265 1265 if (!entry)
1266 1266 return;
1267 1267  
1268 1268  
... ... @@ -1271,11 +1271,11 @@
1271 1271 for (i = 0; i < tp->nr_args; i++)
1272 1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1273 1273  
1274   - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1,
  1274 + perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1275 1275 irq_flags, regs);
1276 1276 }
1277 1277  
1278   -static int probe_profile_enable(struct ftrace_event_call *call)
  1278 +static int probe_perf_enable(struct ftrace_event_call *call)
1279 1279 {
1280 1280 struct trace_probe *tp = (struct trace_probe *)call->data;
1281 1281  
... ... @@ -1287,7 +1287,7 @@
1287 1287 return enable_kprobe(&tp->rp.kp);
1288 1288 }
1289 1289  
1290   -static void probe_profile_disable(struct ftrace_event_call *call)
  1290 +static void probe_perf_disable(struct ftrace_event_call *call)
1291 1291 {
1292 1292 struct trace_probe *tp = (struct trace_probe *)call->data;
1293 1293  
... ... @@ -1312,7 +1312,7 @@
1312 1312 kprobe_trace_func(kp, regs);
1313 1313 #ifdef CONFIG_PERF_EVENTS
1314 1314 if (tp->flags & TP_FLAG_PROFILE)
1315   - kprobe_profile_func(kp, regs);
  1315 + kprobe_perf_func(kp, regs);
1316 1316 #endif
1317 1317 return 0; /* We don't tweek kernel, so just return 0 */
1318 1318 }
... ... @@ -1326,7 +1326,7 @@
1326 1326 kretprobe_trace_func(ri, regs);
1327 1327 #ifdef CONFIG_PERF_EVENTS
1328 1328 if (tp->flags & TP_FLAG_PROFILE)
1329   - kretprobe_profile_func(ri, regs);
  1329 + kretprobe_perf_func(ri, regs);
1330 1330 #endif
1331 1331 return 0; /* We don't tweek kernel, so just return 0 */
1332 1332 }
... ... @@ -1359,8 +1359,8 @@
1359 1359 call->unregfunc = probe_event_disable;
1360 1360  
1361 1361 #ifdef CONFIG_PERF_EVENTS
1362   - call->profile_enable = probe_profile_enable;
1363   - call->profile_disable = probe_profile_disable;
  1362 + call->perf_event_enable = probe_perf_enable;
  1363 + call->perf_event_disable = probe_perf_disable;
1364 1364 #endif
1365 1365 call->data = tp;
1366 1366 ret = trace_add_event_call(call);
kernel/trace/trace_syscalls.c
... ... @@ -428,12 +428,12 @@
428 428  
429 429 #ifdef CONFIG_PERF_EVENTS
430 430  
431   -static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
432   -static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
433   -static int sys_prof_refcount_enter;
434   -static int sys_prof_refcount_exit;
  431 +static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  432 +static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  433 +static int sys_perf_refcount_enter;
  434 +static int sys_perf_refcount_exit;
435 435  
436   -static void prof_syscall_enter(struct pt_regs *regs, long id)
  436 +static void perf_syscall_enter(struct pt_regs *regs, long id)
437 437 {
438 438 struct syscall_metadata *sys_data;
439 439 struct syscall_trace_enter *rec;
... ... @@ -443,7 +443,7 @@
443 443 int size;
444 444  
445 445 syscall_nr = syscall_get_nr(current, regs);
446   - if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
  446 + if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
447 447 return;
448 448  
449 449 sys_data = syscall_nr_to_meta(syscall_nr);
450 450  
... ... @@ -455,11 +455,11 @@
455 455 size = ALIGN(size + sizeof(u32), sizeof(u64));
456 456 size -= sizeof(u32);
457 457  
458   - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
459   - "profile buffer not large enough"))
  458 + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  459 + "perf buffer not large enough"))
460 460 return;
461 461  
462   - rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
  462 + rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
463 463 sys_data->enter_event->id, &rctx, &flags);
464 464 if (!rec)
465 465 return;
466 466  
... ... @@ -467,10 +467,10 @@
467 467 rec->nr = syscall_nr;
468 468 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
469 469 (unsigned long *)&rec->args);
470   - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
  470 + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
471 471 }
472 472  
473   -int prof_sysenter_enable(struct ftrace_event_call *call)
  473 +int perf_sysenter_enable(struct ftrace_event_call *call)
474 474 {
475 475 int ret = 0;
476 476 int num;
477 477  
478 478  
479 479  
480 480  
... ... @@ -478,34 +478,34 @@
478 478 num = ((struct syscall_metadata *)call->data)->syscall_nr;
479 479  
480 480 mutex_lock(&syscall_trace_lock);
481   - if (!sys_prof_refcount_enter)
482   - ret = register_trace_sys_enter(prof_syscall_enter);
  481 + if (!sys_perf_refcount_enter)
  482 + ret = register_trace_sys_enter(perf_syscall_enter);
483 483 if (ret) {
484 484 pr_info("event trace: Could not activate"
485 485 "syscall entry trace point");
486 486 } else {
487   - set_bit(num, enabled_prof_enter_syscalls);
488   - sys_prof_refcount_enter++;
  487 + set_bit(num, enabled_perf_enter_syscalls);
  488 + sys_perf_refcount_enter++;
489 489 }
490 490 mutex_unlock(&syscall_trace_lock);
491 491 return ret;
492 492 }
493 493  
494   -void prof_sysenter_disable(struct ftrace_event_call *call)
  494 +void perf_sysenter_disable(struct ftrace_event_call *call)
495 495 {
496 496 int num;
497 497  
498 498 num = ((struct syscall_metadata *)call->data)->syscall_nr;
499 499  
500 500 mutex_lock(&syscall_trace_lock);
501   - sys_prof_refcount_enter--;
502   - clear_bit(num, enabled_prof_enter_syscalls);
503   - if (!sys_prof_refcount_enter)
504   - unregister_trace_sys_enter(prof_syscall_enter);
  501 + sys_perf_refcount_enter--;
  502 + clear_bit(num, enabled_perf_enter_syscalls);
  503 + if (!sys_perf_refcount_enter)
  504 + unregister_trace_sys_enter(perf_syscall_enter);
505 505 mutex_unlock(&syscall_trace_lock);
506 506 }
507 507  
508   -static void prof_syscall_exit(struct pt_regs *regs, long ret)
  508 +static void perf_syscall_exit(struct pt_regs *regs, long ret)
509 509 {
510 510 struct syscall_metadata *sys_data;
511 511 struct syscall_trace_exit *rec;
... ... @@ -515,7 +515,7 @@
515 515 int size;
516 516  
517 517 syscall_nr = syscall_get_nr(current, regs);
518   - if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
  518 + if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
519 519 return;
520 520  
521 521 sys_data = syscall_nr_to_meta(syscall_nr);
522 522  
... ... @@ -530,11 +530,11 @@
530 530 * Impossible, but be paranoid with the future
531 531 * How to put this check outside runtime?
532 532 */
533   - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
534   - "exit event has grown above profile buffer size"))
  533 + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  534 + "exit event has grown above perf buffer size"))
535 535 return;
536 536  
537   - rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
  537 + rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
538 538 sys_data->exit_event->id, &rctx, &flags);
539 539 if (!rec)
540 540 return;
541 541  
... ... @@ -542,10 +542,10 @@
542 542 rec->nr = syscall_nr;
543 543 rec->ret = syscall_get_return_value(current, regs);
544 544  
545   - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
  545 + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
546 546 }
547 547  
548   -int prof_sysexit_enable(struct ftrace_event_call *call)
  548 +int perf_sysexit_enable(struct ftrace_event_call *call)
549 549 {
550 550 int ret = 0;
551 551 int num;
552 552  
553 553  
554 554  
... ... @@ -553,30 +553,30 @@
553 553 num = ((struct syscall_metadata *)call->data)->syscall_nr;
554 554  
555 555 mutex_lock(&syscall_trace_lock);
556   - if (!sys_prof_refcount_exit)
557   - ret = register_trace_sys_exit(prof_syscall_exit);
  556 + if (!sys_perf_refcount_exit)
  557 + ret = register_trace_sys_exit(perf_syscall_exit);
558 558 if (ret) {
559 559 pr_info("event trace: Could not activate"
560 560 "syscall exit trace point");
561 561 } else {
562   - set_bit(num, enabled_prof_exit_syscalls);
563   - sys_prof_refcount_exit++;
  562 + set_bit(num, enabled_perf_exit_syscalls);
  563 + sys_perf_refcount_exit++;
564 564 }
565 565 mutex_unlock(&syscall_trace_lock);
566 566 return ret;
567 567 }
568 568  
569   -void prof_sysexit_disable(struct ftrace_event_call *call)
  569 +void perf_sysexit_disable(struct ftrace_event_call *call)
570 570 {
571 571 int num;
572 572  
573 573 num = ((struct syscall_metadata *)call->data)->syscall_nr;
574 574  
575 575 mutex_lock(&syscall_trace_lock);
576   - sys_prof_refcount_exit--;
577   - clear_bit(num, enabled_prof_exit_syscalls);
578   - if (!sys_prof_refcount_exit)
579   - unregister_trace_sys_exit(prof_syscall_exit);
  576 + sys_perf_refcount_exit--;
  577 + clear_bit(num, enabled_perf_exit_syscalls);
  578 + if (!sys_perf_refcount_exit)
  579 + unregister_trace_sys_exit(perf_syscall_exit);
580 580 mutex_unlock(&syscall_trace_lock);
581 581 }
582 582