Blame view

kernel/trace/trace_event_perf.c 9.04 KB
ac199db01   Peter Zijlstra   ftrace: event pro...
1
  /*
97d5a2200   Frederic Weisbecker   perf: Drop the ob...
2
   * trace event based perf event profiling/tracing
ac199db01   Peter Zijlstra   ftrace: event pro...
3
   *
90eec103b   Peter Zijlstra   treewide: Remove ...
4
   * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
c530665c3   Frederic Weisbecker   perf: Take a hot ...
5
   * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
ac199db01   Peter Zijlstra   ftrace: event pro...
6
   */
558e6547e   Li Zefan   tracing/profile: ...
7
  #include <linux/module.h>
430ad5a60   Xiao Guangrong   perf: Factorize t...
8
  #include <linux/kprobes.h>
ac199db01   Peter Zijlstra   ftrace: event pro...
9
  #include "trace.h"
6016ee13d   Namhyung Kim   perf, tracing: ad...
10
  static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
20ab4425a   Frederic Weisbecker   tracing: Allocate...
11

eb1e79611   Frederic Weisbecker   perf: Correctly a...
12
13
14
15
16
17
  /*
   * Force it to be aligned to unsigned long to avoid misaligned accesses
   * suprises
   */
  typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  	perf_trace_t;
ce71b9df8   Frederic Weisbecker   tracing: Use the ...
18

20ab4425a   Frederic Weisbecker   tracing: Allocate...
19
  /* Count the events in use (per event id, not per instance) */
97d5a2200   Frederic Weisbecker   perf: Drop the ob...
20
  static int	total_ref_count;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
21

2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
22
  static int perf_trace_event_perm(struct trace_event_call *tp_event,
61c32659b   Frederic Weisbecker   tracing: New flag...
23
24
  				 struct perf_event *p_event)
  {
d5b5f391d   Peter Zijlstra   ftrace, perf: Avo...
25
26
27
28
29
  	if (tp_event->perf_perm) {
  		int ret = tp_event->perf_perm(tp_event, p_event);
  		if (ret)
  			return ret;
  	}
f4be073db   Jiri Olsa   perf: Check permi...
30
31
32
33
34
35
36
37
38
39
40
  	/*
  	 * We checked and allowed to create parent,
  	 * allow children without checking.
  	 */
  	if (p_event->parent)
  		return 0;
  
  	/*
  	 * It's ok to check current process (owner) permissions in here,
  	 * because code below is called only via perf_event_open syscall.
  	 */
ced39002f   Jiri Olsa   ftrace, perf: Add...
41
  	/* The ftrace function trace is allowed only for root. */
cfa77bc4a   Jiri Olsa   perf: Disallow us...
42
43
44
  	if (ftrace_event_is_function(tp_event)) {
  		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  			return -EPERM;
0a74c5b3d   Jiri Olsa   ftrace/perf: Chec...
45
46
  		if (!is_sampling_event(p_event))
  			return 0;
cfa77bc4a   Jiri Olsa   perf: Disallow us...
47
48
49
50
51
52
53
  		/*
  		 * We don't allow user space callchains for  function trace
  		 * event, due to issues with page faults while tracing page
  		 * fault handler and its overall trickiness nature.
  		 */
  		if (!p_event->attr.exclude_callchain_user)
  			return -EINVAL;
63c45f4ba   Jiri Olsa   perf: Disallow us...
54
55
56
57
58
59
60
  
  		/*
  		 * Same reason to disable user stack dump as for user space
  		 * callchains above.
  		 */
  		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
  			return -EINVAL;
cfa77bc4a   Jiri Olsa   perf: Disallow us...
61
  	}
ced39002f   Jiri Olsa   ftrace, perf: Add...
62

61c32659b   Frederic Weisbecker   tracing: New flag...
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
  	/* No tracing, just counting, so no obvious leak */
  	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
  		return 0;
  
  	/* Some events are ok to be traced by non-root users... */
  	if (p_event->attach_state == PERF_ATTACH_TASK) {
  		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
  			return 0;
  	}
  
  	/*
  	 * ...otherwise raw tracepoint data can be a severe data leak,
  	 * only allow root to have these.
  	 */
  	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  		return -EPERM;
  
  	return 0;
  }
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
82
  static int perf_trace_event_reg(struct trace_event_call *tp_event,
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
83
  				struct perf_event *p_event)
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
84
  {
6016ee13d   Namhyung Kim   perf, tracing: ad...
85
  	struct hlist_head __percpu *list;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
86
  	int ret = -ENOMEM;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
87
  	int cpu;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
88

1c024eca5   Peter Zijlstra   perf, trace: Opti...
89
90
  	p_event->tp_event = tp_event;
  	if (tp_event->perf_refcount++ > 0)
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
91
  		return 0;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
92
93
94
95
96
97
  	list = alloc_percpu(struct hlist_head);
  	if (!list)
  		goto fail;
  
  	for_each_possible_cpu(cpu)
  		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
20ab4425a   Frederic Weisbecker   tracing: Allocate...
98

1c024eca5   Peter Zijlstra   perf, trace: Opti...
99
  	tp_event->perf_events = list;
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
100

97d5a2200   Frederic Weisbecker   perf: Drop the ob...
101
  	if (!total_ref_count) {
6016ee13d   Namhyung Kim   perf, tracing: ad...
102
  		char __percpu *buf;
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
103
  		int i;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
104

7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
105
  		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
6016ee13d   Namhyung Kim   perf, tracing: ad...
106
  			buf = (char __percpu *)alloc_percpu(perf_trace_t);
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
107
  			if (!buf)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
108
  				goto fail;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
109

1c024eca5   Peter Zijlstra   perf, trace: Opti...
110
  			perf_trace_buf[i] = buf;
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
111
  		}
20ab4425a   Frederic Weisbecker   tracing: Allocate...
112
  	}
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
113
  	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
114
115
  	if (ret)
  		goto fail;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
116

1c024eca5   Peter Zijlstra   perf, trace: Opti...
117
118
119
120
  	total_ref_count++;
  	return 0;
  
  fail:
97d5a2200   Frederic Weisbecker   perf: Drop the ob...
121
  	if (!total_ref_count) {
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
122
  		int i;
7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
123
  		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
124
125
126
  			free_percpu(perf_trace_buf[i]);
  			perf_trace_buf[i] = NULL;
  		}
fe8e5b5a6   Frederic Weisbecker   tracing: Check to...
127
  	}
1c024eca5   Peter Zijlstra   perf, trace: Opti...
128
129
130
131
  
  	if (!--tp_event->perf_refcount) {
  		free_percpu(tp_event->perf_events);
  		tp_event->perf_events = NULL;
fe8e5b5a6   Frederic Weisbecker   tracing: Check to...
132
  	}
20ab4425a   Frederic Weisbecker   tracing: Allocate...
133
134
  
  	return ret;
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
135
  }
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
136
137
  static void perf_trace_event_unreg(struct perf_event *p_event)
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
138
  	struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
  	int i;
  
  	if (--tp_event->perf_refcount > 0)
  		goto out;
  
  	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
  
  	/*
  	 * Ensure our callback won't be called anymore. The buffers
  	 * will be freed after that.
  	 */
  	tracepoint_synchronize_unregister();
  
  	free_percpu(tp_event->perf_events);
  	tp_event->perf_events = NULL;
  
  	if (!--total_ref_count) {
  		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  			free_percpu(perf_trace_buf[i]);
  			perf_trace_buf[i] = NULL;
  		}
  	}
  out:
  	module_put(tp_event->mod);
  }
  
  static int perf_trace_event_open(struct perf_event *p_event)
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
167
  	struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
168
169
170
171
172
  	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
  }
  
  static void perf_trace_event_close(struct perf_event *p_event)
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
173
  	struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
174
175
  	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
  }
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
176
  static int perf_trace_event_init(struct trace_event_call *tp_event,
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
  				 struct perf_event *p_event)
  {
  	int ret;
  
  	ret = perf_trace_event_perm(tp_event, p_event);
  	if (ret)
  		return ret;
  
  	ret = perf_trace_event_reg(tp_event, p_event);
  	if (ret)
  		return ret;
  
  	ret = perf_trace_event_open(p_event);
  	if (ret) {
  		perf_trace_event_unreg(p_event);
  		return ret;
  	}
  
  	return 0;
  }
1c024eca5   Peter Zijlstra   perf, trace: Opti...
197
  int perf_trace_init(struct perf_event *p_event)
ac199db01   Peter Zijlstra   ftrace: event pro...
198
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
199
  	struct trace_event_call *tp_event;
0022cedd4   Vince Weaver   perf/trace: Prope...
200
  	u64 event_id = p_event->attr.config;
20c8928ab   Li Zefan   tracing/events: f...
201
  	int ret = -EINVAL;
ac199db01   Peter Zijlstra   ftrace: event pro...
202

20c8928ab   Li Zefan   tracing/events: f...
203
  	mutex_lock(&event_mutex);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
204
  	list_for_each_entry(tp_event, &ftrace_events, list) {
ff5f149b6   Steven Rostedt   Merge branch 'per...
205
  		if (tp_event->event.type == event_id &&
a1d0ce821   Steven Rostedt   tracing: Use clas...
206
  		    tp_event->class && tp_event->class->reg &&
1c024eca5   Peter Zijlstra   perf, trace: Opti...
207
208
  		    try_module_get(tp_event->mod)) {
  			ret = perf_trace_event_init(tp_event, p_event);
9cb627d5f   Li Zefan   perf, trace: Fix ...
209
210
  			if (ret)
  				module_put(tp_event->mod);
20c8928ab   Li Zefan   tracing/events: f...
211
212
  			break;
  		}
ac199db01   Peter Zijlstra   ftrace: event pro...
213
  	}
20c8928ab   Li Zefan   tracing/events: f...
214
  	mutex_unlock(&event_mutex);
ac199db01   Peter Zijlstra   ftrace: event pro...
215

20c8928ab   Li Zefan   tracing/events: f...
216
  	return ret;
ac199db01   Peter Zijlstra   ftrace: event pro...
217
  }
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
218
219
220
221
222
223
224
  void perf_trace_destroy(struct perf_event *p_event)
  {
  	mutex_lock(&event_mutex);
  	perf_trace_event_close(p_event);
  	perf_trace_event_unreg(p_event);
  	mutex_unlock(&event_mutex);
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
225
  int perf_trace_add(struct perf_event *p_event, int flags)
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
226
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
227
  	struct trace_event_call *tp_event = p_event->tp_event;
6016ee13d   Namhyung Kim   perf, tracing: ad...
228
  	struct hlist_head __percpu *pcpu_list;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
229
  	struct hlist_head *list;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
230

6016ee13d   Namhyung Kim   perf, tracing: ad...
231
232
  	pcpu_list = tp_event->perf_events;
  	if (WARN_ON_ONCE(!pcpu_list))
1c024eca5   Peter Zijlstra   perf, trace: Opti...
233
  		return -EINVAL;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
234

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
235
236
  	if (!(flags & PERF_EF_START))
  		p_event->hw.state = PERF_HES_STOPPED;
6016ee13d   Namhyung Kim   perf, tracing: ad...
237
  	list = this_cpu_ptr(pcpu_list);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
238
  	hlist_add_head_rcu(&p_event->hlist_entry, list);
20ab4425a   Frederic Weisbecker   tracing: Allocate...
239

489c75c3b   Jiri Olsa   ftrace, perf: Add...
240
  	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
241
  }
20ab4425a   Frederic Weisbecker   tracing: Allocate...
242

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
243
  void perf_trace_del(struct perf_event *p_event, int flags)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
244
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
245
  	struct trace_event_call *tp_event = p_event->tp_event;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
246
  	hlist_del_rcu(&p_event->hlist_entry);
489c75c3b   Jiri Olsa   ftrace, perf: Add...
247
  	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
248
  }
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
249
  void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
430ad5a60   Xiao Guangrong   perf: Factorize t...
250
  {
1c024eca5   Peter Zijlstra   perf, trace: Opti...
251
  	char *raw_data;
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
252
  	int rctx;
430ad5a60   Xiao Guangrong   perf: Factorize t...
253

eb1e79611   Frederic Weisbecker   perf: Correctly a...
254
  	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
cd92bf61d   Oleg Nesterov   tracing/perf: Mov...
255
  	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
256
  		      "perf buffer not large enough"))
cd92bf61d   Oleg Nesterov   tracing/perf: Mov...
257
  		return NULL;
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
258
259
  	*rctxp = rctx = perf_swevent_get_recursion_context();
  	if (rctx < 0)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
260
  		return NULL;
430ad5a60   Xiao Guangrong   perf: Factorize t...
261

86038c5ea   Peter Zijlstra (Intel)   perf: Avoid horri...
262
  	if (regs)
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
263
264
  		*regs = this_cpu_ptr(&__perf_regs[rctx]);
  	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
430ad5a60   Xiao Guangrong   perf: Factorize t...
265
266
  
  	/* zero the dead bytes from align to not leak stack to user */
eb1e79611   Frederic Weisbecker   perf: Correctly a...
267
  	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
268
269
270
271
272
273
274
275
276
277
  	return raw_data;
  }
  EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
  NOKPROBE_SYMBOL(perf_trace_buf_alloc);
  
  void perf_trace_buf_update(void *record, u16 type)
  {
  	struct trace_entry *entry = record;
  	int pc = preempt_count();
  	unsigned long flags;
430ad5a60   Xiao Guangrong   perf: Factorize t...
278

87f44bbc2   Peter Zijlstra   perf, trace: Fix ...
279
280
  	local_save_flags(flags);
  	tracing_generic_entry_update(entry, flags, pc);
430ad5a60   Xiao Guangrong   perf: Factorize t...
281
  	entry->type = type;
430ad5a60   Xiao Guangrong   perf: Factorize t...
282
  }
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
283
  NOKPROBE_SYMBOL(perf_trace_buf_update);
ced39002f   Jiri Olsa   ftrace, perf: Add...
284
285
286
  
  #ifdef CONFIG_FUNCTION_TRACER
  static void
2f5f6ad93   Steven Rostedt   ftrace: Pass ftra...
287
  perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d1   Steven Rostedt   ftrace: Return pt...
288
  			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
ced39002f   Jiri Olsa   ftrace, perf: Add...
289
  {
75e838768   Zhou Chengming   perf/ftrace: Fix ...
290
  	struct perf_event *event;
ced39002f   Jiri Olsa   ftrace, perf: Add...
291
292
293
294
  	struct ftrace_entry *entry;
  	struct hlist_head *head;
  	struct pt_regs regs;
  	int rctx;
b8ebfd3f7   Oleg Nesterov   tracing/function:...
295
296
297
  	head = this_cpu_ptr(event_function.perf_events);
  	if (hlist_empty(head))
  		return;
ced39002f   Jiri Olsa   ftrace, perf: Add...
298
299
300
301
  #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
  		    sizeof(u64)) - sizeof(u32))
  
  	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
ec5e099d6   Alexei Starovoitov   perf: optimize pe...
302
  	memset(&regs, 0, sizeof(regs));
ced39002f   Jiri Olsa   ftrace, perf: Add...
303
  	perf_fetch_caller_regs(&regs);
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
304
  	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
ced39002f   Jiri Olsa   ftrace, perf: Add...
305
306
307
308
309
  	if (!entry)
  		return;
  
  	entry->ip = ip;
  	entry->parent_ip = parent_ip;
75e838768   Zhou Chengming   perf/ftrace: Fix ...
310
  	event = container_of(ops, struct perf_event, ftrace_ops);
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
311
  	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
75e838768   Zhou Chengming   perf/ftrace: Fix ...
312
  			      1, &regs, head, NULL, event);
ced39002f   Jiri Olsa   ftrace, perf: Add...
313
314
315
316
317
318
319
  
  #undef ENTRY_SIZE
  }
  
  static int perf_ftrace_function_register(struct perf_event *event)
  {
  	struct ftrace_ops *ops = &event->ftrace_ops;
ba27f2bc7   Steven Rostedt (Red Hat)   ftrace: Remove us...
320
  	ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
ced39002f   Jiri Olsa   ftrace, perf: Add...
321
322
323
324
325
326
327
  	ops->func = perf_ftrace_function_call;
  	return register_ftrace_function(ops);
  }
  
  static int perf_ftrace_function_unregister(struct perf_event *event)
  {
  	struct ftrace_ops *ops = &event->ftrace_ops;
5500fa511   Jiri Olsa   ftrace, perf: Add...
328
329
330
  	int ret = unregister_ftrace_function(ops);
  	ftrace_free_filter(ops);
  	return ret;
ced39002f   Jiri Olsa   ftrace, perf: Add...
331
332
333
334
335
336
337
338
339
340
341
  }
  
  static void perf_ftrace_function_enable(struct perf_event *event)
  {
  	ftrace_function_local_enable(&event->ftrace_ops);
  }
  
  static void perf_ftrace_function_disable(struct perf_event *event)
  {
  	ftrace_function_local_disable(&event->ftrace_ops);
  }
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
342
  int perf_ftrace_event_register(struct trace_event_call *call,
ced39002f   Jiri Olsa   ftrace, perf: Add...
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
  			       enum trace_reg type, void *data)
  {
  	switch (type) {
  	case TRACE_REG_REGISTER:
  	case TRACE_REG_UNREGISTER:
  		break;
  	case TRACE_REG_PERF_REGISTER:
  	case TRACE_REG_PERF_UNREGISTER:
  		return 0;
  	case TRACE_REG_PERF_OPEN:
  		return perf_ftrace_function_register(data);
  	case TRACE_REG_PERF_CLOSE:
  		return perf_ftrace_function_unregister(data);
  	case TRACE_REG_PERF_ADD:
  		perf_ftrace_function_enable(data);
  		return 0;
  	case TRACE_REG_PERF_DEL:
  		perf_ftrace_function_disable(data);
  		return 0;
  	}
  
  	return -EINVAL;
  }
  #endif /* CONFIG_FUNCTION_TRACER */