Blame view

kernel/trace/trace_event_perf.c 12 KB
bcea3f96e   Steven Rostedt (VMware)   tracing: Add SPDX...
1
  // SPDX-License-Identifier: GPL-2.0
ac199db01   Peter Zijlstra   ftrace: event pro...
2
  /*
97d5a2200   Frederic Weisbecker   perf: Drop the ob...
3
   * trace event based perf event profiling/tracing
ac199db01   Peter Zijlstra   ftrace: event pro...
4
   *
90eec103b   Peter Zijlstra   treewide: Remove ...
5
   * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
c530665c3   Frederic Weisbecker   perf: Take a hot ...
6
   * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
ac199db01   Peter Zijlstra   ftrace: event pro...
7
   */
558e6547e   Li Zefan   tracing/profile: ...
8
  #include <linux/module.h>
430ad5a60   Xiao Guangrong   perf: Factorize t...
9
  #include <linux/kprobes.h>
da97e1845   Joel Fernandes (Google)   perf_event: Add s...
10
  #include <linux/security.h>
ac199db01   Peter Zijlstra   ftrace: event pro...
11
  #include "trace.h"
e12f03d70   Song Liu   perf/core: Implem...
12
  #include "trace_probe.h"
ac199db01   Peter Zijlstra   ftrace: event pro...
13

6016ee13d   Namhyung Kim   perf, tracing: ad...
14
  static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
20ab4425a   Frederic Weisbecker   tracing: Allocate...
15

eb1e79611   Frederic Weisbecker   perf: Correctly a...
16
17
18
19
20
21
  /*
   * Force it to be aligned to unsigned long to avoid misaligned accesses
   * suprises
   */
  typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  	perf_trace_t;
ce71b9df8   Frederic Weisbecker   tracing: Use the ...
22

20ab4425a   Frederic Weisbecker   tracing: Allocate...
23
  /* Count the events in use (per event id, not per instance) */
97d5a2200   Frederic Weisbecker   perf: Drop the ob...
24
  static int	total_ref_count;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
25

2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
26
  static int perf_trace_event_perm(struct trace_event_call *tp_event,
61c32659b   Frederic Weisbecker   tracing: New flag...
27
28
  				 struct perf_event *p_event)
  {
da97e1845   Joel Fernandes (Google)   perf_event: Add s...
29
  	int ret;
d5b5f391d   Peter Zijlstra   ftrace, perf: Avo...
30
  	if (tp_event->perf_perm) {
da97e1845   Joel Fernandes (Google)   perf_event: Add s...
31
  		ret = tp_event->perf_perm(tp_event, p_event);
d5b5f391d   Peter Zijlstra   ftrace, perf: Avo...
32
33
34
  		if (ret)
  			return ret;
  	}
f4be073db   Jiri Olsa   perf: Check permi...
35
36
37
38
39
40
41
42
43
44
45
  	/*
  	 * We checked and allowed to create parent,
  	 * allow children without checking.
  	 */
  	if (p_event->parent)
  		return 0;
  
  	/*
  	 * It's ok to check current process (owner) permissions in here,
  	 * because code below is called only via perf_event_open syscall.
  	 */
ced39002f   Jiri Olsa   ftrace, perf: Add...
46
  	/* The ftrace function trace is allowed only for root. */
cfa77bc4a   Jiri Olsa   perf: Disallow us...
47
  	if (ftrace_event_is_function(tp_event)) {
da97e1845   Joel Fernandes (Google)   perf_event: Add s...
48
49
50
  		ret = perf_allow_tracepoint(&p_event->attr);
  		if (ret)
  			return ret;
cfa77bc4a   Jiri Olsa   perf: Disallow us...
51

0a74c5b3d   Jiri Olsa   ftrace/perf: Chec...
52
53
  		if (!is_sampling_event(p_event))
  			return 0;
cfa77bc4a   Jiri Olsa   perf: Disallow us...
54
55
56
57
58
59
60
  		/*
  		 * We don't allow user space callchains for  function trace
  		 * event, due to issues with page faults while tracing page
  		 * fault handler and its overall trickiness nature.
  		 */
  		if (!p_event->attr.exclude_callchain_user)
  			return -EINVAL;
63c45f4ba   Jiri Olsa   perf: Disallow us...
61
62
63
64
65
66
67
  
  		/*
  		 * Same reason to disable user stack dump as for user space
  		 * callchains above.
  		 */
  		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
  			return -EINVAL;
cfa77bc4a   Jiri Olsa   perf: Disallow us...
68
  	}
ced39002f   Jiri Olsa   ftrace, perf: Add...
69

61c32659b   Frederic Weisbecker   tracing: New flag...
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  	/* No tracing, just counting, so no obvious leak */
  	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
  		return 0;
  
  	/* Some events are ok to be traced by non-root users... */
  	if (p_event->attach_state == PERF_ATTACH_TASK) {
  		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
  			return 0;
  	}
  
  	/*
  	 * ...otherwise raw tracepoint data can be a severe data leak,
  	 * only allow root to have these.
  	 */
da97e1845   Joel Fernandes (Google)   perf_event: Add s...
84
85
86
  	ret = perf_allow_tracepoint(&p_event->attr);
  	if (ret)
  		return ret;
61c32659b   Frederic Weisbecker   tracing: New flag...
87
88
89
  
  	return 0;
  }
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
90
  static int perf_trace_event_reg(struct trace_event_call *tp_event,
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
91
  				struct perf_event *p_event)
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
92
  {
6016ee13d   Namhyung Kim   perf, tracing: ad...
93
  	struct hlist_head __percpu *list;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
94
  	int ret = -ENOMEM;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
95
  	int cpu;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
96

1c024eca5   Peter Zijlstra   perf, trace: Opti...
97
98
  	p_event->tp_event = tp_event;
  	if (tp_event->perf_refcount++ > 0)
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
99
  		return 0;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
100
101
102
103
104
105
  	list = alloc_percpu(struct hlist_head);
  	if (!list)
  		goto fail;
  
  	for_each_possible_cpu(cpu)
  		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
20ab4425a   Frederic Weisbecker   tracing: Allocate...
106

1c024eca5   Peter Zijlstra   perf, trace: Opti...
107
  	tp_event->perf_events = list;
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
108

97d5a2200   Frederic Weisbecker   perf: Drop the ob...
109
  	if (!total_ref_count) {
6016ee13d   Namhyung Kim   perf, tracing: ad...
110
  		char __percpu *buf;
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
111
  		int i;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
112

7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
113
  		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
6016ee13d   Namhyung Kim   perf, tracing: ad...
114
  			buf = (char __percpu *)alloc_percpu(perf_trace_t);
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
115
  			if (!buf)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
116
  				goto fail;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
117

1c024eca5   Peter Zijlstra   perf, trace: Opti...
118
  			perf_trace_buf[i] = buf;
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
119
  		}
20ab4425a   Frederic Weisbecker   tracing: Allocate...
120
  	}
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
121
  	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
122
123
  	if (ret)
  		goto fail;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
124

1c024eca5   Peter Zijlstra   perf, trace: Opti...
125
126
127
128
  	total_ref_count++;
  	return 0;
  
  fail:
97d5a2200   Frederic Weisbecker   perf: Drop the ob...
129
  	if (!total_ref_count) {
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
130
  		int i;
7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
131
  		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
b7e2ecef9   Peter Zijlstra   perf, trace: Opti...
132
133
134
  			free_percpu(perf_trace_buf[i]);
  			perf_trace_buf[i] = NULL;
  		}
fe8e5b5a6   Frederic Weisbecker   tracing: Check to...
135
  	}
1c024eca5   Peter Zijlstra   perf, trace: Opti...
136
137
138
139
  
  	if (!--tp_event->perf_refcount) {
  		free_percpu(tp_event->perf_events);
  		tp_event->perf_events = NULL;
fe8e5b5a6   Frederic Weisbecker   tracing: Check to...
140
  	}
20ab4425a   Frederic Weisbecker   tracing: Allocate...
141
142
  
  	return ret;
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
143
  }
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
144
145
  static void perf_trace_event_unreg(struct perf_event *p_event)
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
146
  	struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
  	int i;
  
  	if (--tp_event->perf_refcount > 0)
  		goto out;
  
  	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
  
  	/*
  	 * Ensure our callback won't be called anymore. The buffers
  	 * will be freed after that.
  	 */
  	tracepoint_synchronize_unregister();
  
  	free_percpu(tp_event->perf_events);
  	tp_event->perf_events = NULL;
  
  	if (!--total_ref_count) {
  		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  			free_percpu(perf_trace_buf[i]);
  			perf_trace_buf[i] = NULL;
  		}
  	}
  out:
  	module_put(tp_event->mod);
  }
  
  static int perf_trace_event_open(struct perf_event *p_event)
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
175
  	struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
176
177
178
179
180
  	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
  }
  
  static void perf_trace_event_close(struct perf_event *p_event)
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
181
  	struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
182
183
  	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
  }
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
184
  static int perf_trace_event_init(struct trace_event_call *tp_event,
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  				 struct perf_event *p_event)
  {
  	int ret;
  
  	ret = perf_trace_event_perm(tp_event, p_event);
  	if (ret)
  		return ret;
  
  	ret = perf_trace_event_reg(tp_event, p_event);
  	if (ret)
  		return ret;
  
  	ret = perf_trace_event_open(p_event);
  	if (ret) {
  		perf_trace_event_unreg(p_event);
  		return ret;
  	}
  
  	return 0;
  }
1c024eca5   Peter Zijlstra   perf, trace: Opti...
205
  int perf_trace_init(struct perf_event *p_event)
ac199db01   Peter Zijlstra   ftrace: event pro...
206
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
207
  	struct trace_event_call *tp_event;
0022cedd4   Vince Weaver   perf/trace: Prope...
208
  	u64 event_id = p_event->attr.config;
20c8928ab   Li Zefan   tracing/events: f...
209
  	int ret = -EINVAL;
ac199db01   Peter Zijlstra   ftrace: event pro...
210

20c8928ab   Li Zefan   tracing/events: f...
211
  	mutex_lock(&event_mutex);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
212
  	list_for_each_entry(tp_event, &ftrace_events, list) {
ff5f149b6   Steven Rostedt   Merge branch 'per...
213
  		if (tp_event->event.type == event_id &&
a1d0ce821   Steven Rostedt   tracing: Use clas...
214
  		    tp_event->class && tp_event->class->reg &&
1c024eca5   Peter Zijlstra   perf, trace: Opti...
215
216
  		    try_module_get(tp_event->mod)) {
  			ret = perf_trace_event_init(tp_event, p_event);
9cb627d5f   Li Zefan   perf, trace: Fix ...
217
218
  			if (ret)
  				module_put(tp_event->mod);
20c8928ab   Li Zefan   tracing/events: f...
219
220
  			break;
  		}
ac199db01   Peter Zijlstra   ftrace: event pro...
221
  	}
20c8928ab   Li Zefan   tracing/events: f...
222
  	mutex_unlock(&event_mutex);
ac199db01   Peter Zijlstra   ftrace: event pro...
223

20c8928ab   Li Zefan   tracing/events: f...
224
  	return ret;
ac199db01   Peter Zijlstra   ftrace: event pro...
225
  }
ceec0b6fc   Jiri Olsa   ftrace, perf: Add...
226
227
228
229
230
231
232
  void perf_trace_destroy(struct perf_event *p_event)
  {
  	mutex_lock(&event_mutex);
  	perf_trace_event_close(p_event);
  	perf_trace_event_unreg(p_event);
  	mutex_unlock(&event_mutex);
  }
e12f03d70   Song Liu   perf/core: Implem...
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  #ifdef CONFIG_KPROBE_EVENTS
  int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
  {
  	int ret;
  	char *func = NULL;
  	struct trace_event_call *tp_event;
  
  	if (p_event->attr.kprobe_func) {
  		func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
  		if (!func)
  			return -ENOMEM;
  		ret = strncpy_from_user(
  			func, u64_to_user_ptr(p_event->attr.kprobe_func),
  			KSYM_NAME_LEN);
5da13ab8b   Masami Hiramatsu   perf/core: Fix pe...
247
248
  		if (ret == KSYM_NAME_LEN)
  			ret = -E2BIG;
e12f03d70   Song Liu   perf/core: Implem...
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
  		if (ret < 0)
  			goto out;
  
  		if (func[0] == '\0') {
  			kfree(func);
  			func = NULL;
  		}
  	}
  
  	tp_event = create_local_trace_kprobe(
  		func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
  		p_event->attr.probe_offset, is_retprobe);
  	if (IS_ERR(tp_event)) {
  		ret = PTR_ERR(tp_event);
  		goto out;
  	}
6b1340cc0   Prateek Sood   tracing: Fix race...
265
  	mutex_lock(&event_mutex);
e12f03d70   Song Liu   perf/core: Implem...
266
267
268
  	ret = perf_trace_event_init(tp_event, p_event);
  	if (ret)
  		destroy_local_trace_kprobe(tp_event);
6b1340cc0   Prateek Sood   tracing: Fix race...
269
  	mutex_unlock(&event_mutex);
e12f03d70   Song Liu   perf/core: Implem...
270
271
272
273
274
275
276
  out:
  	kfree(func);
  	return ret;
  }
  
  void perf_kprobe_destroy(struct perf_event *p_event)
  {
6b1340cc0   Prateek Sood   tracing: Fix race...
277
  	mutex_lock(&event_mutex);
e12f03d70   Song Liu   perf/core: Implem...
278
279
  	perf_trace_event_close(p_event);
  	perf_trace_event_unreg(p_event);
6b1340cc0   Prateek Sood   tracing: Fix race...
280
  	mutex_unlock(&event_mutex);
e12f03d70   Song Liu   perf/core: Implem...
281
282
283
284
  
  	destroy_local_trace_kprobe(p_event->tp_event);
  }
  #endif /* CONFIG_KPROBE_EVENTS */
33ea4b242   Song Liu   perf/core: Implem...
285
  #ifdef CONFIG_UPROBE_EVENTS
a6ca88b24   Song Liu   trace_uprobe: sup...
286
287
  int perf_uprobe_init(struct perf_event *p_event,
  		     unsigned long ref_ctr_offset, bool is_retprobe)
33ea4b242   Song Liu   perf/core: Implem...
288
289
290
291
292
293
294
  {
  	int ret;
  	char *path = NULL;
  	struct trace_event_call *tp_event;
  
  	if (!p_event->attr.uprobe_path)
  		return -EINVAL;
83540fbc8   Jann Horn   tracing/perf: Use...
295
296
297
298
299
300
301
  
  	path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
  			    PATH_MAX);
  	if (IS_ERR(path)) {
  		ret = PTR_ERR(path);
  		return (ret == -EINVAL) ? -E2BIG : ret;
  	}
33ea4b242   Song Liu   perf/core: Implem...
302
303
304
305
  	if (path[0] == '\0') {
  		ret = -EINVAL;
  		goto out;
  	}
a6ca88b24   Song Liu   trace_uprobe: sup...
306
307
  	tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
  					     ref_ctr_offset, is_retprobe);
33ea4b242   Song Liu   perf/core: Implem...
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
  	if (IS_ERR(tp_event)) {
  		ret = PTR_ERR(tp_event);
  		goto out;
  	}
  
  	/*
  	 * local trace_uprobe need to hold event_mutex to call
  	 * uprobe_buffer_enable() and uprobe_buffer_disable().
  	 * event_mutex is not required for local trace_kprobes.
  	 */
  	mutex_lock(&event_mutex);
  	ret = perf_trace_event_init(tp_event, p_event);
  	if (ret)
  		destroy_local_trace_uprobe(tp_event);
  	mutex_unlock(&event_mutex);
  out:
  	kfree(path);
  	return ret;
  }
  
  void perf_uprobe_destroy(struct perf_event *p_event)
  {
  	mutex_lock(&event_mutex);
  	perf_trace_event_close(p_event);
  	perf_trace_event_unreg(p_event);
  	mutex_unlock(&event_mutex);
  	destroy_local_trace_uprobe(p_event->tp_event);
  }
  #endif /* CONFIG_UPROBE_EVENTS */
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
337
  int perf_trace_add(struct perf_event *p_event, int flags)
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
338
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
339
  	struct trace_event_call *tp_event = p_event->tp_event;
20ab4425a   Frederic Weisbecker   tracing: Allocate...
340

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
341
342
  	if (!(flags & PERF_EF_START))
  		p_event->hw.state = PERF_HES_STOPPED;
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
  	/*
  	 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
  	 * and we need to take the default action of enqueueing our event on
  	 * the right per-cpu hlist.
  	 */
  	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
  		struct hlist_head __percpu *pcpu_list;
  		struct hlist_head *list;
  
  		pcpu_list = tp_event->perf_events;
  		if (WARN_ON_ONCE(!pcpu_list))
  			return -EINVAL;
  
  		list = this_cpu_ptr(pcpu_list);
  		hlist_add_head_rcu(&p_event->hlist_entry, list);
  	}
20ab4425a   Frederic Weisbecker   tracing: Allocate...
359

466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
360
  	return 0;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
361
  }
20ab4425a   Frederic Weisbecker   tracing: Allocate...
362

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
363
  void perf_trace_del(struct perf_event *p_event, int flags)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
364
  {
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
365
  	struct trace_event_call *tp_event = p_event->tp_event;
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
366
367
368
369
370
371
372
373
  
  	/*
  	 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
  	 * and we need to take the default action of dequeueing our event from
  	 * the right per-cpu hlist.
  	 */
  	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
  		hlist_del_rcu(&p_event->hlist_entry);
e5e25cf47   Frederic Weisbecker   tracing: Factoriz...
374
  }
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
375
  void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
430ad5a60   Xiao Guangrong   perf: Factorize t...
376
  {
1c024eca5   Peter Zijlstra   perf, trace: Opti...
377
  	char *raw_data;
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
378
  	int rctx;
430ad5a60   Xiao Guangrong   perf: Factorize t...
379

eb1e79611   Frederic Weisbecker   perf: Correctly a...
380
  	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
cd92bf61d   Oleg Nesterov   tracing/perf: Mov...
381
  	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
382
  		      "perf buffer not large enough"))
cd92bf61d   Oleg Nesterov   tracing/perf: Mov...
383
  		return NULL;
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
384
385
  	*rctxp = rctx = perf_swevent_get_recursion_context();
  	if (rctx < 0)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
386
  		return NULL;
430ad5a60   Xiao Guangrong   perf: Factorize t...
387

86038c5ea   Peter Zijlstra (Intel)   perf: Avoid horri...
388
  	if (regs)
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
389
390
  		*regs = this_cpu_ptr(&__perf_regs[rctx]);
  	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
430ad5a60   Xiao Guangrong   perf: Factorize t...
391
392
  
  	/* zero the dead bytes from align to not leak stack to user */
eb1e79611   Frederic Weisbecker   perf: Correctly a...
393
  	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
394
395
396
397
398
399
400
401
402
403
  	return raw_data;
  }
  EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
  NOKPROBE_SYMBOL(perf_trace_buf_alloc);
  
  void perf_trace_buf_update(void *record, u16 type)
  {
  	struct trace_entry *entry = record;
  	int pc = preempt_count();
  	unsigned long flags;
430ad5a60   Xiao Guangrong   perf: Factorize t...
404

87f44bbc2   Peter Zijlstra   perf, trace: Fix ...
405
  	local_save_flags(flags);
46710f3a3   Cong Wang   tracing: Pass typ...
406
  	tracing_generic_entry_update(entry, type, flags, pc);
430ad5a60   Xiao Guangrong   perf: Factorize t...
407
  }
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
408
  NOKPROBE_SYMBOL(perf_trace_buf_update);
ced39002f   Jiri Olsa   ftrace, perf: Add...
409
410
411
  
  #ifdef CONFIG_FUNCTION_TRACER
  static void
2f5f6ad93   Steven Rostedt   ftrace: Pass ftra...
412
  perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d1   Steven Rostedt   ftrace: Return pt...
413
  			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
ced39002f   Jiri Olsa   ftrace, perf: Add...
414
415
  {
  	struct ftrace_entry *entry;
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
416
417
  	struct perf_event *event;
  	struct hlist_head head;
ced39002f   Jiri Olsa   ftrace, perf: Add...
418
419
  	struct pt_regs regs;
  	int rctx;
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
420
  	if ((unsigned long)ops->private != smp_processor_id())
b8ebfd3f7   Oleg Nesterov   tracing/function:...
421
  		return;
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
422
423
424
425
426
427
428
429
430
  	event = container_of(ops, struct perf_event, ftrace_ops);
  
  	/*
  	 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
  	 * the perf code does is hlist_for_each_entry_rcu(), so we can
  	 * get away with simply setting the @head.first pointer in order
  	 * to create a singular list.
  	 */
  	head.first = &event->hlist_entry;
ced39002f   Jiri Olsa   ftrace, perf: Add...
431
432
433
434
  #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
  		    sizeof(u64)) - sizeof(u32))
  
  	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
ec5e099d6   Alexei Starovoitov   perf: optimize pe...
435
  	memset(&regs, 0, sizeof(regs));
ced39002f   Jiri Olsa   ftrace, perf: Add...
436
  	perf_fetch_caller_regs(&regs);
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
437
  	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
ced39002f   Jiri Olsa   ftrace, perf: Add...
438
439
440
441
442
  	if (!entry)
  		return;
  
  	entry->ip = ip;
  	entry->parent_ip = parent_ip;
1e1dcd93b   Alexei Starovoitov   perf: split perf_...
443
  	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
444
  			      1, &regs, &head, NULL);
ced39002f   Jiri Olsa   ftrace, perf: Add...
445
446
447
448
449
450
451
  
  #undef ENTRY_SIZE
  }
  
  static int perf_ftrace_function_register(struct perf_event *event)
  {
  	struct ftrace_ops *ops = &event->ftrace_ops;
1dd311e6d   Peter Zijlstra   perf/ftrace: Smal...
452
  	ops->flags   = FTRACE_OPS_FL_RCU;
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
453
454
  	ops->func    = perf_ftrace_function_call;
  	ops->private = (void *)(unsigned long)nr_cpu_ids;
ced39002f   Jiri Olsa   ftrace, perf: Add...
455
456
457
458
459
460
  	return register_ftrace_function(ops);
  }
  
  static int perf_ftrace_function_unregister(struct perf_event *event)
  {
  	struct ftrace_ops *ops = &event->ftrace_ops;
5500fa511   Jiri Olsa   ftrace, perf: Add...
461
462
463
  	int ret = unregister_ftrace_function(ops);
  	ftrace_free_filter(ops);
  	return ret;
ced39002f   Jiri Olsa   ftrace, perf: Add...
464
  }
2425bcb92   Steven Rostedt (Red Hat)   tracing: Rename f...
465
  int perf_ftrace_event_register(struct trace_event_call *call,
ced39002f   Jiri Olsa   ftrace, perf: Add...
466
467
  			       enum trace_reg type, void *data)
  {
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
468
  	struct perf_event *event = data;
ced39002f   Jiri Olsa   ftrace, perf: Add...
469
470
471
472
473
474
475
476
477
478
479
480
  	switch (type) {
  	case TRACE_REG_REGISTER:
  	case TRACE_REG_UNREGISTER:
  		break;
  	case TRACE_REG_PERF_REGISTER:
  	case TRACE_REG_PERF_UNREGISTER:
  		return 0;
  	case TRACE_REG_PERF_OPEN:
  		return perf_ftrace_function_register(data);
  	case TRACE_REG_PERF_CLOSE:
  		return perf_ftrace_function_unregister(data);
  	case TRACE_REG_PERF_ADD:
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
481
482
  		event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
  		return 1;
ced39002f   Jiri Olsa   ftrace, perf: Add...
483
  	case TRACE_REG_PERF_DEL:
466c81c45   Peter Zijlstra   perf/ftrace: Fix ...
484
485
  		event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
  		return 1;
ced39002f   Jiri Olsa   ftrace, perf: Add...
486
487
488
489
490
  	}
  
  	return -EINVAL;
  }
  #endif /* CONFIG_FUNCTION_TRACER */