Blame view

kernel/events/core.c 161 KB
0793a61d4   Thomas Gleixner   performance count...
1
  /*
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
2
   * Performance events core code:
0793a61d4   Thomas Gleixner   performance count...
3
   *
981445114   Ingo Molnar   perf_counter: add...
4
   *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2ea   Ingo Molnar   perf events: Clea...
5
6
   *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
   *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
c5dd016cd   Paul Mackerras   perf_counter: upd...
7
   *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a750   Peter Zijlstra   perf_counter: new...
8
   *
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
9
   * For licensing details see kernel-base/COPYING
0793a61d4   Thomas Gleixner   performance count...
10
11
12
   */
  
  #include <linux/fs.h>
b9cacc7bf   Peter Zijlstra   perf_counter: mor...
13
  #include <linux/mm.h>
0793a61d4   Thomas Gleixner   performance count...
14
15
  #include <linux/cpu.h>
  #include <linux/smp.h>
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
16
  #include <linux/idr.h>
04289bb98   Ingo Molnar   perf counters: ad...
17
  #include <linux/file.h>
0793a61d4   Thomas Gleixner   performance count...
18
  #include <linux/poll.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
19
  #include <linux/slab.h>
76e1d9047   Frederic Weisbecker   perf: Store activ...
20
  #include <linux/hash.h>
0793a61d4   Thomas Gleixner   performance count...
21
  #include <linux/sysfs.h>
22a4f650d   Ingo Molnar   perf_counter: Tid...
22
  #include <linux/dcache.h>
0793a61d4   Thomas Gleixner   performance count...
23
  #include <linux/percpu.h>
22a4f650d   Ingo Molnar   perf_counter: Tid...
24
  #include <linux/ptrace.h>
c277443cf   Peter Zijlstra   perf: Stop all co...
25
  #include <linux/reboot.h>
b9cacc7bf   Peter Zijlstra   perf_counter: mor...
26
  #include <linux/vmstat.h>
abe434005   Peter Zijlstra   perf: Sysfs enume...
27
  #include <linux/device.h>
906010b21   Peter Zijlstra   perf_event: Provi...
28
  #include <linux/vmalloc.h>
b9cacc7bf   Peter Zijlstra   perf_counter: mor...
29
30
  #include <linux/hardirq.h>
  #include <linux/rculist.h>
0793a61d4   Thomas Gleixner   performance count...
31
32
33
  #include <linux/uaccess.h>
  #include <linux/syscalls.h>
  #include <linux/anon_inodes.h>
aa9c4c0f9   Ingo Molnar   perfcounters: fix...
34
  #include <linux/kernel_stat.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
35
  #include <linux/perf_event.h>
6fb2915df   Li Zefan   tracing/profile: ...
36
  #include <linux/ftrace_event.h>
3c502e7a0   Jason Wessel   perf,hw_breakpoin...
37
  #include <linux/hw_breakpoint.h>
0793a61d4   Thomas Gleixner   performance count...
38

76369139c   Frederic Weisbecker   perf: Split up bu...
39
  #include "internal.h"
4e193bd4d   Tim Blechmann   perf_counter: inc...
40
  #include <asm/irq_regs.h>
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
41
  struct remote_function_call {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
42
43
44
45
  	struct task_struct	*p;
  	int			(*func)(void *info);
  	void			*info;
  	int			ret;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
  };
  
  static void remote_function(void *data)
  {
  	struct remote_function_call *tfc = data;
  	struct task_struct *p = tfc->p;
  
  	if (p) {
  		tfc->ret = -EAGAIN;
  		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
  			return;
  	}
  
  	tfc->ret = tfc->func(tfc->info);
  }
  
  /**
   * task_function_call - call a function on the cpu on which a task runs
   * @p:		the task to evaluate
   * @func:	the function to be called
   * @info:	the function call argument
   *
   * Calls the function @func when the task is currently running. This might
   * be on the current CPU, which just calls the function directly
   *
   * returns: @func return value, or
   *	    -ESRCH  - when the process isn't running
   *	    -EAGAIN - when the process moved away
   */
  static int
  task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
  {
  	struct remote_function_call data = {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
79
80
81
82
  		.p	= p,
  		.func	= func,
  		.info	= info,
  		.ret	= -ESRCH, /* No such (running) process */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  	};
  
  	if (task_curr(p))
  		smp_call_function_single(task_cpu(p), remote_function, &data, 1);
  
  	return data.ret;
  }
  
  /**
   * cpu_function_call - call a function on the cpu
   * @func:	the function to be called
   * @info:	the function call argument
   *
   * Calls the function @func on the remote cpu.
   *
   * returns: @func return value or -ENXIO when the cpu is offline
   */
  static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
  {
  	struct remote_function_call data = {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
103
104
105
106
  		.p	= NULL,
  		.func	= func,
  		.info	= info,
  		.ret	= -ENXIO, /* No such CPU */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
107
108
109
110
111
112
  	};
  
  	smp_call_function_single(cpu, remote_function, &data, 1);
  
  	return data.ret;
  }
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
113
114
115
  #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
  		       PERF_FLAG_FD_OUTPUT  |\
  		       PERF_FLAG_PID_CGROUP)
0b3fcf178   Stephane Eranian   perf_events: Move...
116
117
118
119
120
  enum event_type_t {
  	EVENT_FLEXIBLE = 0x1,
  	EVENT_PINNED = 0x2,
  	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
  };
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
121
122
123
124
  /*
   * perf_sched_events : >0 events exist
   * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
   */
d430d3d7e   Jason Baron   jump label: Intro...
125
  struct jump_label_key perf_sched_events __read_mostly;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
126
  static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
cdd6c482c   Ingo Molnar   perf: Do the big ...
127
128
129
  static atomic_t nr_mmap_events __read_mostly;
  static atomic_t nr_comm_events __read_mostly;
  static atomic_t nr_task_events __read_mostly;
9ee318a78   Peter Zijlstra   perf_counter: opt...
130

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
131
132
133
  static LIST_HEAD(pmus);
  static DEFINE_MUTEX(pmus_lock);
  static struct srcu_struct pmus_srcu;
0764771da   Peter Zijlstra   perf_counter: Mor...
134
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
135
   * perf event paranoia level:
0fbdea19e   Ingo Molnar   perf_counter: Int...
136
137
   *  -1 - not paranoid at all
   *   0 - disallow raw tracepoint access for unpriv
cdd6c482c   Ingo Molnar   perf: Do the big ...
138
   *   1 - disallow cpu events for unpriv
0fbdea19e   Ingo Molnar   perf_counter: Int...
139
   *   2 - disallow kernel profiling for unpriv
0764771da   Peter Zijlstra   perf_counter: Mor...
140
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
141
  int sysctl_perf_event_paranoid __read_mostly = 1;
0764771da   Peter Zijlstra   perf_counter: Mor...
142

20443384f   Frederic Weisbecker   perf: Rebase max ...
143
144
  /* Minimum for 512 kiB + 1 user control page */
  int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24b   Peter Zijlstra   perf_counter: Ren...
145
146
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
147
   * max perf event sample rate
df58ab24b   Peter Zijlstra   perf_counter: Ren...
148
   */
163ec4354   Peter Zijlstra   perf: Optimize th...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
  #define DEFAULT_MAX_SAMPLE_RATE 100000
  int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
  static int max_samples_per_tick __read_mostly =
  	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
  
  int perf_proc_update_handler(struct ctl_table *table, int write,
  		void __user *buffer, size_t *lenp,
  		loff_t *ppos)
  {
  	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
  
  	if (ret || !write)
  		return ret;
  
  	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
  
  	return 0;
  }
1ccd15497   Peter Zijlstra   perf_counter: sys...
167

cdd6c482c   Ingo Molnar   perf: Do the big ...
168
  static atomic64_t perf_event_id;
a96bbc164   Peter Zijlstra   perf_counter: Fix...
169

0b3fcf178   Stephane Eranian   perf_events: Move...
170
171
172
173
  static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  			      enum event_type_t event_type);
  
  static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
174
175
176
177
178
  			     enum event_type_t event_type,
  			     struct task_struct *task);
  
  static void update_context_time(struct perf_event_context *ctx);
  static u64 perf_event_time(struct perf_event *event);
0b3fcf178   Stephane Eranian   perf_events: Move...
179

cdd6c482c   Ingo Molnar   perf: Do the big ...
180
  void __weak perf_event_print_debug(void)	{ }
0793a61d4   Thomas Gleixner   performance count...
181

84c799105   Matt Fleming   perf: New helper ...
182
  extern __weak const char *perf_pmu_name(void)
0793a61d4   Thomas Gleixner   performance count...
183
  {
84c799105   Matt Fleming   perf: New helper ...
184
  	return "pmu";
0793a61d4   Thomas Gleixner   performance count...
185
  }
0b3fcf178   Stephane Eranian   perf_events: Move...
186
187
188
189
  static inline u64 perf_clock(void)
  {
  	return local_clock();
  }
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
190
191
192
193
194
  static inline struct perf_cpu_context *
  __get_cpu_context(struct perf_event_context *ctx)
  {
  	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
  }
facc43071   Peter Zijlstra   perf: Optimize ev...
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
  static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
  			  struct perf_event_context *ctx)
  {
  	raw_spin_lock(&cpuctx->ctx.lock);
  	if (ctx)
  		raw_spin_lock(&ctx->lock);
  }
  
  static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
  			    struct perf_event_context *ctx)
  {
  	if (ctx)
  		raw_spin_unlock(&ctx->lock);
  	raw_spin_unlock(&cpuctx->ctx.lock);
  }
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
210
  #ifdef CONFIG_CGROUP_PERF
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
211
212
213
214
215
  /*
   * Must ensure cgroup is pinned (css_get) before calling
   * this function. In other words, we cannot call this function
   * if there is no cgroup event for the current CPU context.
   */
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
  static inline struct perf_cgroup *
  perf_cgroup_from_task(struct task_struct *task)
  {
  	return container_of(task_subsys_state(task, perf_subsys_id),
  			struct perf_cgroup, css);
  }
  
  static inline bool
  perf_cgroup_match(struct perf_event *event)
  {
  	struct perf_event_context *ctx = event->ctx;
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  
  	return !event->cgrp || event->cgrp == cpuctx->cgrp;
  }
  
  static inline void perf_get_cgroup(struct perf_event *event)
  {
  	css_get(&event->cgrp->css);
  }
  
  static inline void perf_put_cgroup(struct perf_event *event)
  {
  	css_put(&event->cgrp->css);
  }
  
  static inline void perf_detach_cgroup(struct perf_event *event)
  {
  	perf_put_cgroup(event);
  	event->cgrp = NULL;
  }
  
  static inline int is_cgroup_event(struct perf_event *event)
  {
  	return event->cgrp != NULL;
  }
  
  static inline u64 perf_cgroup_event_time(struct perf_event *event)
  {
  	struct perf_cgroup_info *t;
  
  	t = per_cpu_ptr(event->cgrp->info, event->cpu);
  	return t->time;
  }
  
  static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
  {
  	struct perf_cgroup_info *info;
  	u64 now;
  
  	now = perf_clock();
  
  	info = this_cpu_ptr(cgrp->info);
  
  	info->time += now - info->timestamp;
  	info->timestamp = now;
  }
  
  static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
  {
  	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
  	if (cgrp_out)
  		__update_cgrp_time(cgrp_out);
  }
  
  static inline void update_cgrp_time_from_event(struct perf_event *event)
  {
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
283
  	struct perf_cgroup *cgrp;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
284
  	/*
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
285
286
  	 * ensure we access cgroup data only when needed and
  	 * when we know the cgroup is pinned (css_get)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
287
  	 */
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
288
  	if (!is_cgroup_event(event))
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
289
  		return;
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
290
291
292
293
294
295
  	cgrp = perf_cgroup_from_task(current);
  	/*
  	 * Do not update time when cgroup is not active
  	 */
  	if (cgrp == event->cgrp)
  		__update_cgrp_time(event->cgrp);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
296
297
298
  }
  
  static inline void
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
299
300
  perf_cgroup_set_timestamp(struct task_struct *task,
  			  struct perf_event_context *ctx)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
301
302
303
  {
  	struct perf_cgroup *cgrp;
  	struct perf_cgroup_info *info;
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
304
305
306
307
308
309
  	/*
  	 * ctx->lock held by caller
  	 * ensure we do not access cgroup data
  	 * unless we have the cgroup pinned (css_get)
  	 */
  	if (!task || !ctx->nr_cgroups)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
310
311
312
313
  		return;
  
  	cgrp = perf_cgroup_from_task(task);
  	info = this_cpu_ptr(cgrp->info);
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
314
  	info->timestamp = ctx->timestamp;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
  }
  
  #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
  #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */
  
  /*
   * reschedule events based on the cgroup constraint of task.
   *
   * mode SWOUT : schedule out everything
   * mode SWIN : schedule in based on cgroup for next
   */
  void perf_cgroup_switch(struct task_struct *task, int mode)
  {
  	struct perf_cpu_context *cpuctx;
  	struct pmu *pmu;
  	unsigned long flags;
  
  	/*
  	 * disable interrupts to avoid geting nr_cgroup
  	 * changes via __perf_event_disable(). Also
  	 * avoids preemption.
  	 */
  	local_irq_save(flags);
  
  	/*
  	 * we reschedule only in the presence of cgroup
  	 * constrained events.
  	 */
  	rcu_read_lock();
  
  	list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
346
  		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
347
348
349
350
351
352
353
354
  		/*
  		 * perf_cgroup_events says at least one
  		 * context on this CPU has cgroup events.
  		 *
  		 * ctx->nr_cgroups reports the number of cgroup
  		 * events for a context.
  		 */
  		if (cpuctx->ctx.nr_cgroups > 0) {
facc43071   Peter Zijlstra   perf: Optimize ev...
355
356
  			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
  			perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
357
358
359
360
361
362
363
364
365
366
367
  
  			if (mode & PERF_CGROUP_SWOUT) {
  				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
  				/*
  				 * must not be done before ctxswout due
  				 * to event_filter_match() in event_sched_out()
  				 */
  				cpuctx->cgrp = NULL;
  			}
  
  			if (mode & PERF_CGROUP_SWIN) {
e566b76ed   Stephane Eranian   perf_event: Fix c...
368
  				WARN_ON_ONCE(cpuctx->cgrp);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
369
370
371
372
373
374
375
  				/* set cgrp before ctxsw in to
  				 * allow event_filter_match() to not
  				 * have to pass task around
  				 */
  				cpuctx->cgrp = perf_cgroup_from_task(task);
  				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
  			}
facc43071   Peter Zijlstra   perf: Optimize ev...
376
377
  			perf_pmu_enable(cpuctx->ctx.pmu);
  			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
378
  		}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
379
380
381
382
383
384
  	}
  
  	rcu_read_unlock();
  
  	local_irq_restore(flags);
  }
a8d757ef0   Stephane Eranian   perf events: Fix ...
385
386
  static inline void perf_cgroup_sched_out(struct task_struct *task,
  					 struct task_struct *next)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
387
  {
a8d757ef0   Stephane Eranian   perf events: Fix ...
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
  	struct perf_cgroup *cgrp1;
  	struct perf_cgroup *cgrp2 = NULL;
  
  	/*
  	 * we come here when we know perf_cgroup_events > 0
  	 */
  	cgrp1 = perf_cgroup_from_task(task);
  
  	/*
  	 * next is NULL when called from perf_event_enable_on_exec()
  	 * that will systematically cause a cgroup_switch()
  	 */
  	if (next)
  		cgrp2 = perf_cgroup_from_task(next);
  
  	/*
  	 * only schedule out current cgroup events if we know
  	 * that we are switching to a different cgroup. Otherwise,
  	 * do no touch the cgroup events.
  	 */
  	if (cgrp1 != cgrp2)
  		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
410
  }
a8d757ef0   Stephane Eranian   perf events: Fix ...
411
412
  static inline void perf_cgroup_sched_in(struct task_struct *prev,
  					struct task_struct *task)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
413
  {
a8d757ef0   Stephane Eranian   perf events: Fix ...
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
  	struct perf_cgroup *cgrp1;
  	struct perf_cgroup *cgrp2 = NULL;
  
  	/*
  	 * we come here when we know perf_cgroup_events > 0
  	 */
  	cgrp1 = perf_cgroup_from_task(task);
  
  	/* prev can never be NULL */
  	cgrp2 = perf_cgroup_from_task(prev);
  
  	/*
  	 * only need to schedule in cgroup events if we are changing
  	 * cgroup during ctxsw. Cgroup events were not scheduled
  	 * out of ctxsw out if that was not the case.
  	 */
  	if (cgrp1 != cgrp2)
  		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
  }
  
  static inline int perf_cgroup_connect(int fd, struct perf_event *event,
  				      struct perf_event_attr *attr,
  				      struct perf_event *group_leader)
  {
  	struct perf_cgroup *cgrp;
  	struct cgroup_subsys_state *css;
  	struct file *file;
  	int ret = 0, fput_needed;
  
  	file = fget_light(fd, &fput_needed);
  	if (!file)
  		return -EBADF;
  
  	css = cgroup_css_from_dir(file, perf_subsys_id);
3db272c04   Li Zefan   perf cgroup: Fix ...
448
449
450
451
  	if (IS_ERR(css)) {
  		ret = PTR_ERR(css);
  		goto out;
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
452
453
454
  
  	cgrp = container_of(css, struct perf_cgroup, css);
  	event->cgrp = cgrp;
f75e18cb9   Li Zefan   perf cgroup: Fix ...
455
456
  	/* must be done before we fput() the file */
  	perf_get_cgroup(event);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
457
458
459
460
461
462
463
464
  	/*
  	 * all events in a group must monitor
  	 * the same cgroup because a task belongs
  	 * to only one perf cgroup at a time
  	 */
  	if (group_leader && group_leader->cgrp != cgrp) {
  		perf_detach_cgroup(event);
  		ret = -EINVAL;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
465
  	}
3db272c04   Li Zefan   perf cgroup: Fix ...
466
  out:
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
  	fput_light(file, fput_needed);
  	return ret;
  }
  
  static inline void
  perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
  {
  	struct perf_cgroup_info *t;
  	t = per_cpu_ptr(event->cgrp->info, event->cpu);
  	event->shadow_ctx_time = now - t->timestamp;
  }
  
  static inline void
  perf_cgroup_defer_enabled(struct perf_event *event)
  {
  	/*
  	 * when the current task's perf cgroup does not match
  	 * the event's, we need to remember to call the
  	 * perf_mark_enable() function the first time a task with
  	 * a matching perf cgroup is scheduled in.
  	 */
  	if (is_cgroup_event(event) && !perf_cgroup_match(event))
  		event->cgrp_defer_enabled = 1;
  }
  
  static inline void
  perf_cgroup_mark_enabled(struct perf_event *event,
  			 struct perf_event_context *ctx)
  {
  	struct perf_event *sub;
  	u64 tstamp = perf_event_time(event);
  
  	if (!event->cgrp_defer_enabled)
  		return;
  
  	event->cgrp_defer_enabled = 0;
  
  	event->tstamp_enabled = tstamp - event->total_time_enabled;
  	list_for_each_entry(sub, &event->sibling_list, group_entry) {
  		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
  			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
  			sub->cgrp_defer_enabled = 0;
  		}
  	}
  }
  #else /* !CONFIG_CGROUP_PERF */
  
  static inline bool
  perf_cgroup_match(struct perf_event *event)
  {
  	return true;
  }
  
  static inline void perf_detach_cgroup(struct perf_event *event)
  {}
  
  static inline int is_cgroup_event(struct perf_event *event)
  {
  	return 0;
  }
  
  static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
  {
  	return 0;
  }
  
  static inline void update_cgrp_time_from_event(struct perf_event *event)
  {
  }
  
  static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
  {
  }
a8d757ef0   Stephane Eranian   perf events: Fix ...
540
541
  static inline void perf_cgroup_sched_out(struct task_struct *task,
  					 struct task_struct *next)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
542
543
  {
  }
a8d757ef0   Stephane Eranian   perf events: Fix ...
544
545
  static inline void perf_cgroup_sched_in(struct task_struct *prev,
  					struct task_struct *task)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
546
547
548
549
550
551
552
553
554
555
556
  {
  }
  
  static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
  				      struct perf_event_attr *attr,
  				      struct perf_event *group_leader)
  {
  	return -EINVAL;
  }
  
  static inline void
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
557
558
  perf_cgroup_set_timestamp(struct task_struct *task,
  			  struct perf_event_context *ctx)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
  {
  }
  
  void
  perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
  {
  }
  
  static inline void
  perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
  {
  }
  
  static inline u64 perf_cgroup_event_time(struct perf_event *event)
  {
  	return 0;
  }
  
  static inline void
  perf_cgroup_defer_enabled(struct perf_event *event)
  {
  }
  
  static inline void
  perf_cgroup_mark_enabled(struct perf_event *event,
  			 struct perf_event_context *ctx)
  {
  }
  #endif
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
588
  void perf_pmu_disable(struct pmu *pmu)
9e35ad388   Peter Zijlstra   perf_counter: Rew...
589
  {
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
590
591
592
  	int *count = this_cpu_ptr(pmu->pmu_disable_count);
  	if (!(*count)++)
  		pmu->pmu_disable(pmu);
9e35ad388   Peter Zijlstra   perf_counter: Rew...
593
  }
9e35ad388   Peter Zijlstra   perf_counter: Rew...
594

33696fc0d   Peter Zijlstra   perf: Per PMU dis...
595
  void perf_pmu_enable(struct pmu *pmu)
9e35ad388   Peter Zijlstra   perf_counter: Rew...
596
  {
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
597
598
599
  	int *count = this_cpu_ptr(pmu->pmu_disable_count);
  	if (!--(*count))
  		pmu->pmu_enable(pmu);
9e35ad388   Peter Zijlstra   perf_counter: Rew...
600
  }
9e35ad388   Peter Zijlstra   perf_counter: Rew...
601

e9d2b0641   Peter Zijlstra   perf: Undo the pe...
602
603
604
605
606
607
608
  static DEFINE_PER_CPU(struct list_head, rotation_list);
  
  /*
   * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
   * because they're strictly cpu affine and rotate_start is called with IRQs
   * disabled, while rotate_context is called from IRQ context.
   */
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
609
  static void perf_pmu_rotate_start(struct pmu *pmu)
9e35ad388   Peter Zijlstra   perf_counter: Rew...
610
  {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
611
  	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
612
  	struct list_head *head = &__get_cpu_var(rotation_list);
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
613

e9d2b0641   Peter Zijlstra   perf: Undo the pe...
614
  	WARN_ON(!irqs_disabled());
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
615

e9d2b0641   Peter Zijlstra   perf: Undo the pe...
616
617
  	if (list_empty(&cpuctx->rotation_list))
  		list_add(&cpuctx->rotation_list, head);
9e35ad388   Peter Zijlstra   perf_counter: Rew...
618
  }
9e35ad388   Peter Zijlstra   perf_counter: Rew...
619

cdd6c482c   Ingo Molnar   perf: Do the big ...
620
  static void get_ctx(struct perf_event_context *ctx)
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
621
  {
e5289d4a1   Peter Zijlstra   perf_counter: Sim...
622
  	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
623
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
624
  static void put_ctx(struct perf_event_context *ctx)
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
625
  {
564c2b210   Paul Mackerras   perf_counter: Opt...
626
627
628
  	if (atomic_dec_and_test(&ctx->refcount)) {
  		if (ctx->parent_ctx)
  			put_ctx(ctx->parent_ctx);
c93f76690   Paul Mackerras   perf_counter: Fix...
629
630
  		if (ctx->task)
  			put_task_struct(ctx->task);
cb796ff33   Lai Jiangshan   perf,rcu: convert...
631
  		kfree_rcu(ctx, rcu_head);
564c2b210   Paul Mackerras   perf_counter: Opt...
632
  	}
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
633
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
634
  static void unclone_ctx(struct perf_event_context *ctx)
71a851b4d   Peter Zijlstra   perf_counter: Sto...
635
636
637
638
639
640
  {
  	if (ctx->parent_ctx) {
  		put_ctx(ctx->parent_ctx);
  		ctx->parent_ctx = NULL;
  	}
  }
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
  static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
  {
  	/*
  	 * only top level events have the pid namespace they were created in
  	 */
  	if (event->parent)
  		event = event->parent;
  
  	return task_tgid_nr_ns(p, event->ns);
  }
  
  static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
  {
  	/*
  	 * only top level events have the pid namespace they were created in
  	 */
  	if (event->parent)
  		event = event->parent;
  
  	return task_pid_nr_ns(p, event->ns);
  }
fccc714b3   Peter Zijlstra   perf_counter: San...
662
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
663
   * If we inherit events we want to return the parent event id
7f453c24b   Peter Zijlstra   perf_counter: PER...
664
665
   * to userspace.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
666
  static u64 primary_event_id(struct perf_event *event)
7f453c24b   Peter Zijlstra   perf_counter: PER...
667
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
668
  	u64 id = event->id;
7f453c24b   Peter Zijlstra   perf_counter: PER...
669

cdd6c482c   Ingo Molnar   perf: Do the big ...
670
671
  	if (event->parent)
  		id = event->parent->id;
7f453c24b   Peter Zijlstra   perf_counter: PER...
672
673
674
  
  	return id;
  }
fccc714b3   Peter Zijlstra   perf_counter: San...
675
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
676
   * Get the perf_event_context for a task and lock it.
25346b93c   Paul Mackerras   perf_counter: Pro...
677
678
679
   * This has to cope with with the fact that until it is locked,
   * the context could get moved to another task.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
680
  static struct perf_event_context *
8dc85d547   Peter Zijlstra   perf: Multiple ta...
681
  perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93c   Paul Mackerras   perf_counter: Pro...
682
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
683
  	struct perf_event_context *ctx;
25346b93c   Paul Mackerras   perf_counter: Pro...
684
685
  
  	rcu_read_lock();
9ed6060d2   Peter Zijlstra   perf: Unindent la...
686
  retry:
8dc85d547   Peter Zijlstra   perf: Multiple ta...
687
  	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93c   Paul Mackerras   perf_counter: Pro...
688
689
690
691
  	if (ctx) {
  		/*
  		 * If this context is a clone of another, it might
  		 * get swapped for another underneath us by
cdd6c482c   Ingo Molnar   perf: Do the big ...
692
  		 * perf_event_task_sched_out, though the
25346b93c   Paul Mackerras   perf_counter: Pro...
693
694
695
696
697
698
  		 * rcu_read_lock() protects us from any context
  		 * getting freed.  Lock the context and check if it
  		 * got swapped before we could get the lock, and retry
  		 * if so.  If we locked the right context, then it
  		 * can't get swapped on us any more.
  		 */
e625cce1b   Thomas Gleixner   perf_event: Conve...
699
  		raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
700
  		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1b   Thomas Gleixner   perf_event: Conve...
701
  			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
25346b93c   Paul Mackerras   perf_counter: Pro...
702
703
  			goto retry;
  		}
b49a9e7e7   Peter Zijlstra   perf_counter: Clo...
704
705
  
  		if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1b   Thomas Gleixner   perf_event: Conve...
706
  			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e7   Peter Zijlstra   perf_counter: Clo...
707
708
  			ctx = NULL;
  		}
25346b93c   Paul Mackerras   perf_counter: Pro...
709
710
711
712
713
714
715
716
717
718
  	}
  	rcu_read_unlock();
  	return ctx;
  }
  
  /*
   * Get the context for a task and increment its pin_count so it
   * can't get swapped to another task.  This also increments its
   * reference count so that the context can't get freed.
   */
8dc85d547   Peter Zijlstra   perf: Multiple ta...
719
720
  static struct perf_event_context *
  perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93c   Paul Mackerras   perf_counter: Pro...
721
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
722
  	struct perf_event_context *ctx;
25346b93c   Paul Mackerras   perf_counter: Pro...
723
  	unsigned long flags;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
724
  	ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93c   Paul Mackerras   perf_counter: Pro...
725
726
  	if (ctx) {
  		++ctx->pin_count;
e625cce1b   Thomas Gleixner   perf_event: Conve...
727
  		raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93c   Paul Mackerras   perf_counter: Pro...
728
729
730
  	}
  	return ctx;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
731
  static void perf_unpin_context(struct perf_event_context *ctx)
25346b93c   Paul Mackerras   perf_counter: Pro...
732
733
  {
  	unsigned long flags;
e625cce1b   Thomas Gleixner   perf_event: Conve...
734
  	raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93c   Paul Mackerras   perf_counter: Pro...
735
  	--ctx->pin_count;
e625cce1b   Thomas Gleixner   perf_event: Conve...
736
  	raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93c   Paul Mackerras   perf_counter: Pro...
737
  }
f67218c3e   Peter Zijlstra   perf_events: Fix ...
738
739
740
741
742
743
744
745
746
747
  /*
   * Update the record of the current time in a context.
   */
  static void update_context_time(struct perf_event_context *ctx)
  {
  	u64 now = perf_clock();
  
  	ctx->time += now - ctx->timestamp;
  	ctx->timestamp = now;
  }
4158755d3   Stephane Eranian   perf_events: Add ...
748
749
750
  static u64 perf_event_time(struct perf_event *event)
  {
  	struct perf_event_context *ctx = event->ctx;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
751
752
753
  
  	if (is_cgroup_event(event))
  		return perf_cgroup_event_time(event);
4158755d3   Stephane Eranian   perf_events: Add ...
754
755
  	return ctx ? ctx->time : 0;
  }
f67218c3e   Peter Zijlstra   perf_events: Fix ...
756
757
  /*
   * Update the total_time_enabled and total_time_running fields for a event.
b7526f0ca   Eric B Munson   events: Add note ...
758
   * The caller of this function needs to hold the ctx->lock.
f67218c3e   Peter Zijlstra   perf_events: Fix ...
759
760
761
762
763
764
765
766
767
   */
  static void update_event_times(struct perf_event *event)
  {
  	struct perf_event_context *ctx = event->ctx;
  	u64 run_end;
  
  	if (event->state < PERF_EVENT_STATE_INACTIVE ||
  	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
  		return;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
768
769
770
771
772
773
774
775
776
777
778
  	/*
  	 * in cgroup mode, time_enabled represents
  	 * the time the event was enabled AND active
  	 * tasks were in the monitored cgroup. This is
  	 * independent of the activity of the context as
  	 * there may be a mix of cgroup and non-cgroup events.
  	 *
  	 * That is why we treat cgroup events differently
  	 * here.
  	 */
  	if (is_cgroup_event(event))
4158755d3   Stephane Eranian   perf_events: Add ...
779
  		run_end = perf_event_time(event);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
780
781
  	else if (ctx->is_active)
  		run_end = ctx->time;
acd1d7c1f   Peter Zijlstra   perf_events: Rest...
782
783
784
785
  	else
  		run_end = event->tstamp_stopped;
  
  	event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3e   Peter Zijlstra   perf_events: Fix ...
786
787
788
789
  
  	if (event->state == PERF_EVENT_STATE_INACTIVE)
  		run_end = event->tstamp_stopped;
  	else
4158755d3   Stephane Eranian   perf_events: Add ...
790
  		run_end = perf_event_time(event);
f67218c3e   Peter Zijlstra   perf_events: Fix ...
791
792
  
  	event->total_time_running = run_end - event->tstamp_running;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
793

f67218c3e   Peter Zijlstra   perf_events: Fix ...
794
  }
96c21a460   Peter Zijlstra   perf: Fix exit() ...
795
796
797
798
799
800
801
802
803
804
805
  /*
   * Update total_time_enabled and total_time_running for all events in a group.
   */
  static void update_group_times(struct perf_event *leader)
  {
  	struct perf_event *event;
  
  	update_event_times(leader);
  	list_for_each_entry(event, &leader->sibling_list, group_entry)
  		update_event_times(event);
  }
889ff0150   Frederic Weisbecker   perf/core: Split ...
806
807
808
809
810
811
812
813
  static struct list_head *
  ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
  {
  	if (event->attr.pinned)
  		return &ctx->pinned_groups;
  	else
  		return &ctx->flexible_groups;
  }
25346b93c   Paul Mackerras   perf_counter: Pro...
814
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
815
   * Add a event from the lists for its context.
fccc714b3   Peter Zijlstra   perf_counter: San...
816
817
   * Must be called with ctx->mutex and ctx->lock held.
   */
04289bb98   Ingo Molnar   perf counters: ad...
818
  static void
cdd6c482c   Ingo Molnar   perf: Do the big ...
819
  list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb98   Ingo Molnar   perf counters: ad...
820
  {
8a49542c0   Peter Zijlstra   perf_events: Fix ...
821
822
  	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
  	event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb98   Ingo Molnar   perf counters: ad...
823
824
  
  	/*
8a49542c0   Peter Zijlstra   perf_events: Fix ...
825
826
827
  	 * If we're a stand alone event or group leader, we go to the context
  	 * list, group events are kept attached to the group so that
  	 * perf_group_detach can, at all times, locate all siblings.
04289bb98   Ingo Molnar   perf counters: ad...
828
  	 */
8a49542c0   Peter Zijlstra   perf_events: Fix ...
829
  	if (event->group_leader == event) {
889ff0150   Frederic Weisbecker   perf/core: Split ...
830
  		struct list_head *list;
d6f962b57   Frederic Weisbecker   perf: Export soft...
831
832
  		if (is_software_event(event))
  			event->group_flags |= PERF_GROUP_SOFTWARE;
889ff0150   Frederic Weisbecker   perf/core: Split ...
833
834
  		list = ctx_group_list(event, ctx);
  		list_add_tail(&event->group_entry, list);
5c1481943   Peter Zijlstra   perf_counter: out...
835
  	}
592903cdc   Peter Zijlstra   perf_counter: add...
836

08309379b   Peter Zijlstra   perf: Fix cgroup ...
837
  	if (is_cgroup_event(event))
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
838
  		ctx->nr_cgroups++;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
839

cdd6c482c   Ingo Molnar   perf: Do the big ...
840
  	list_add_rcu(&event->event_entry, &ctx->event_list);
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
841
  	if (!ctx->nr_events)
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
842
  		perf_pmu_rotate_start(ctx->pmu);
cdd6c482c   Ingo Molnar   perf: Do the big ...
843
844
  	ctx->nr_events++;
  	if (event->attr.inherit_stat)
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
845
  		ctx->nr_stat++;
04289bb98   Ingo Molnar   perf counters: ad...
846
  }
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
  /*
   * Called at perf_event creation and when events are attached/detached from a
   * group.
   */
  static void perf_event__read_size(struct perf_event *event)
  {
  	int entry = sizeof(u64); /* value */
  	int size = 0;
  	int nr = 1;
  
  	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  		size += sizeof(u64);
  
  	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  		size += sizeof(u64);
  
  	if (event->attr.read_format & PERF_FORMAT_ID)
  		entry += sizeof(u64);
  
  	if (event->attr.read_format & PERF_FORMAT_GROUP) {
  		nr += event->group_leader->nr_siblings;
  		size += sizeof(u64);
  	}
  
  	size += entry * nr;
  	event->read_size = size;
  }
  
  static void perf_event__header_size(struct perf_event *event)
  {
  	struct perf_sample_data *data;
  	u64 sample_type = event->attr.sample_type;
  	u16 size = 0;
  
  	perf_event__read_size(event);
  
  	if (sample_type & PERF_SAMPLE_IP)
  		size += sizeof(data->ip);
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
  	if (sample_type & PERF_SAMPLE_ADDR)
  		size += sizeof(data->addr);
  
  	if (sample_type & PERF_SAMPLE_PERIOD)
  		size += sizeof(data->period);
  
  	if (sample_type & PERF_SAMPLE_READ)
  		size += event->read_size;
  
  	event->header_size = size;
  }
  
  static void perf_event__id_header_size(struct perf_event *event)
  {
  	struct perf_sample_data *data;
  	u64 sample_type = event->attr.sample_type;
  	u16 size = 0;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
902
903
904
905
906
  	if (sample_type & PERF_SAMPLE_TID)
  		size += sizeof(data->tid_entry);
  
  	if (sample_type & PERF_SAMPLE_TIME)
  		size += sizeof(data->time);
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
907
908
909
910
911
912
913
914
  	if (sample_type & PERF_SAMPLE_ID)
  		size += sizeof(data->id);
  
  	if (sample_type & PERF_SAMPLE_STREAM_ID)
  		size += sizeof(data->stream_id);
  
  	if (sample_type & PERF_SAMPLE_CPU)
  		size += sizeof(data->cpu_entry);
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
915
  	event->id_header_size = size;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
916
  }
8a49542c0   Peter Zijlstra   perf_events: Fix ...
917
918
  static void perf_group_attach(struct perf_event *event)
  {
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
919
  	struct perf_event *group_leader = event->group_leader, *pos;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
920

74c3337c2   Peter Zijlstra   perf: Fix group m...
921
922
923
924
925
  	/*
  	 * We can have double attach due to group movement in perf_event_open.
  	 */
  	if (event->attach_state & PERF_ATTACH_GROUP)
  		return;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
926
927
928
929
930
931
932
933
934
935
936
  	event->attach_state |= PERF_ATTACH_GROUP;
  
  	if (group_leader == event)
  		return;
  
  	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
  			!is_software_event(event))
  		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
  
  	list_add_tail(&event->group_entry, &group_leader->sibling_list);
  	group_leader->nr_siblings++;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
937
938
939
940
941
  
  	perf_event__header_size(group_leader);
  
  	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
  		perf_event__header_size(pos);
8a49542c0   Peter Zijlstra   perf_events: Fix ...
942
  }
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
943
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
944
   * Remove a event from the lists for its context.
fccc714b3   Peter Zijlstra   perf_counter: San...
945
   * Must be called with ctx->mutex and ctx->lock held.
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
946
   */
04289bb98   Ingo Molnar   perf counters: ad...
947
  static void
cdd6c482c   Ingo Molnar   perf: Do the big ...
948
  list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb98   Ingo Molnar   perf counters: ad...
949
  {
68cacd291   Stephane Eranian   perf_events: Fix ...
950
  	struct perf_cpu_context *cpuctx;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
951
952
953
954
  	/*
  	 * We can have double detach due to exit/hot-unplug + close.
  	 */
  	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
955
  		return;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
956
957
  
  	event->attach_state &= ~PERF_ATTACH_CONTEXT;
68cacd291   Stephane Eranian   perf_events: Fix ...
958
  	if (is_cgroup_event(event)) {
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
959
  		ctx->nr_cgroups--;
68cacd291   Stephane Eranian   perf_events: Fix ...
960
961
962
963
964
965
966
967
968
  		cpuctx = __get_cpu_context(ctx);
  		/*
  		 * if there are no more cgroup events
  		 * then cler cgrp to avoid stale pointer
  		 * in update_cgrp_time_from_cpuctx()
  		 */
  		if (!ctx->nr_cgroups)
  			cpuctx->cgrp = NULL;
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
969

cdd6c482c   Ingo Molnar   perf: Do the big ...
970
971
  	ctx->nr_events--;
  	if (event->attr.inherit_stat)
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
972
  		ctx->nr_stat--;
8bc209595   Peter Zijlstra   perf_counter: Fix...
973

cdd6c482c   Ingo Molnar   perf: Do the big ...
974
  	list_del_rcu(&event->event_entry);
04289bb98   Ingo Molnar   perf counters: ad...
975

8a49542c0   Peter Zijlstra   perf_events: Fix ...
976
977
  	if (event->group_leader == event)
  		list_del_init(&event->group_entry);
5c1481943   Peter Zijlstra   perf_counter: out...
978

96c21a460   Peter Zijlstra   perf: Fix exit() ...
979
  	update_group_times(event);
b2e74a265   Stephane Eranian   perf_events: Fix ...
980
981
982
983
984
985
986
987
988
989
  
  	/*
  	 * If event was in error state, then keep it
  	 * that way, otherwise bogus counts will be
  	 * returned on read(). The only way to get out
  	 * of error state is by explicit re-enabling
  	 * of the event
  	 */
  	if (event->state > PERF_EVENT_STATE_OFF)
  		event->state = PERF_EVENT_STATE_OFF;
050735b08   Peter Zijlstra   perf: Fix exit() ...
990
  }
8a49542c0   Peter Zijlstra   perf_events: Fix ...
991
  static void perf_group_detach(struct perf_event *event)
050735b08   Peter Zijlstra   perf: Fix exit() ...
992
993
  {
  	struct perf_event *sibling, *tmp;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
  	struct list_head *list = NULL;
  
  	/*
  	 * We can have double detach due to exit/hot-unplug + close.
  	 */
  	if (!(event->attach_state & PERF_ATTACH_GROUP))
  		return;
  
  	event->attach_state &= ~PERF_ATTACH_GROUP;
  
  	/*
  	 * If this is a sibling, remove it from its group.
  	 */
  	if (event->group_leader != event) {
  		list_del_init(&event->group_entry);
  		event->group_leader->nr_siblings--;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
1010
  		goto out;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
1011
1012
1013
1014
  	}
  
  	if (!list_empty(&event->group_entry))
  		list = &event->group_entry;
2e2af50b1   Peter Zijlstra   perf_events: Disa...
1015

04289bb98   Ingo Molnar   perf counters: ad...
1016
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1017
1018
  	 * If this was a group event with sibling events then
  	 * upgrade the siblings to singleton events by adding them
8a49542c0   Peter Zijlstra   perf_events: Fix ...
1019
  	 * to whatever list we are on.
04289bb98   Ingo Molnar   perf counters: ad...
1020
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1021
  	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c0   Peter Zijlstra   perf_events: Fix ...
1022
1023
  		if (list)
  			list_move_tail(&sibling->group_entry, list);
04289bb98   Ingo Molnar   perf counters: ad...
1024
  		sibling->group_leader = sibling;
d6f962b57   Frederic Weisbecker   perf: Export soft...
1025
1026
1027
  
  		/* Inherit group flags from the previous leader */
  		sibling->group_flags = event->group_flags;
04289bb98   Ingo Molnar   perf counters: ad...
1028
  	}
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
1029
1030
1031
1032
1033
1034
  
  out:
  	perf_event__header_size(event->group_leader);
  
  	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
  		perf_event__header_size(tmp);
04289bb98   Ingo Molnar   perf counters: ad...
1035
  }
fa66f07aa   Stephane Eranian   perf_events: Fix ...
1036
1037
1038
  static inline int
  event_filter_match(struct perf_event *event)
  {
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1039
1040
  	return (event->cpu == -1 || event->cpu == smp_processor_id())
  	    && perf_cgroup_match(event);
fa66f07aa   Stephane Eranian   perf_events: Fix ...
1041
  }
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1042
1043
  static void
  event_sched_out(struct perf_event *event,
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1044
  		  struct perf_cpu_context *cpuctx,
cdd6c482c   Ingo Molnar   perf: Do the big ...
1045
  		  struct perf_event_context *ctx)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1046
  {
4158755d3   Stephane Eranian   perf_events: Add ...
1047
  	u64 tstamp = perf_event_time(event);
fa66f07aa   Stephane Eranian   perf_events: Fix ...
1048
1049
1050
1051
1052
1053
1054
1055
1056
  	u64 delta;
  	/*
  	 * An event which could not be activated because of
  	 * filter mismatch still needs to have its timings
  	 * maintained, otherwise bogus information is return
  	 * via read() for time_enabled, time_running:
  	 */
  	if (event->state == PERF_EVENT_STATE_INACTIVE
  	    && !event_filter_match(event)) {
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1057
  		delta = tstamp - event->tstamp_stopped;
fa66f07aa   Stephane Eranian   perf_events: Fix ...
1058
  		event->tstamp_running += delta;
4158755d3   Stephane Eranian   perf_events: Add ...
1059
  		event->tstamp_stopped = tstamp;
fa66f07aa   Stephane Eranian   perf_events: Fix ...
1060
  	}
cdd6c482c   Ingo Molnar   perf: Do the big ...
1061
  	if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1062
  		return;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1063

cdd6c482c   Ingo Molnar   perf: Do the big ...
1064
1065
1066
1067
  	event->state = PERF_EVENT_STATE_INACTIVE;
  	if (event->pending_disable) {
  		event->pending_disable = 0;
  		event->state = PERF_EVENT_STATE_OFF;
970892a90   Peter Zijlstra   perf_counter: Fix...
1068
  	}
4158755d3   Stephane Eranian   perf_events: Add ...
1069
  	event->tstamp_stopped = tstamp;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1070
  	event->pmu->del(event, 0);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1071
  	event->oncpu = -1;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1072

cdd6c482c   Ingo Molnar   perf: Do the big ...
1073
  	if (!is_software_event(event))
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1074
1075
  		cpuctx->active_oncpu--;
  	ctx->nr_active--;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1076
  	if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1077
1078
  		cpuctx->exclusive = 0;
  }
d859e29fe   Paul Mackerras   perf_counter: Add...
1079
  static void
cdd6c482c   Ingo Molnar   perf: Do the big ...
1080
  group_sched_out(struct perf_event *group_event,
d859e29fe   Paul Mackerras   perf_counter: Add...
1081
  		struct perf_cpu_context *cpuctx,
cdd6c482c   Ingo Molnar   perf: Do the big ...
1082
  		struct perf_event_context *ctx)
d859e29fe   Paul Mackerras   perf_counter: Add...
1083
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1084
  	struct perf_event *event;
fa66f07aa   Stephane Eranian   perf_events: Fix ...
1085
  	int state = group_event->state;
d859e29fe   Paul Mackerras   perf_counter: Add...
1086

cdd6c482c   Ingo Molnar   perf: Do the big ...
1087
  	event_sched_out(group_event, cpuctx, ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
1088
1089
1090
1091
  
  	/*
  	 * Schedule out siblings (if any):
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1092
1093
  	list_for_each_entry(event, &group_event->sibling_list, group_entry)
  		event_sched_out(event, cpuctx, ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
1094

fa66f07aa   Stephane Eranian   perf_events: Fix ...
1095
  	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29fe   Paul Mackerras   perf_counter: Add...
1096
1097
  		cpuctx->exclusive = 0;
  }
0793a61d4   Thomas Gleixner   performance count...
1098
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1099
   * Cross CPU call to remove a performance event
0793a61d4   Thomas Gleixner   performance count...
1100
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1101
   * We disable the event on the hardware level first. After that we
0793a61d4   Thomas Gleixner   performance count...
1102
1103
   * remove it from the context list.
   */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1104
  static int __perf_remove_from_context(void *info)
0793a61d4   Thomas Gleixner   performance count...
1105
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1106
1107
  	struct perf_event *event = info;
  	struct perf_event_context *ctx = event->ctx;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1108
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d4   Thomas Gleixner   performance count...
1109

e625cce1b   Thomas Gleixner   perf_event: Conve...
1110
  	raw_spin_lock(&ctx->lock);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1111
  	event_sched_out(event, cpuctx, ctx);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1112
  	list_del_event(event, ctx);
64ce31261   Peter Zijlstra   perf: De-schedule...
1113
1114
1115
1116
  	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
  		ctx->is_active = 0;
  		cpuctx->task_ctx = NULL;
  	}
e625cce1b   Thomas Gleixner   perf_event: Conve...
1117
  	raw_spin_unlock(&ctx->lock);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1118
1119
  
  	return 0;
0793a61d4   Thomas Gleixner   performance count...
1120
1121
1122
1123
  }
  
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1124
   * Remove the event from a task's (or a CPU's) list of events.
0793a61d4   Thomas Gleixner   performance count...
1125
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1126
   * CPU events are removed with a smp call. For task events we only
0793a61d4   Thomas Gleixner   performance count...
1127
   * call when the task is on a CPU.
c93f76690   Paul Mackerras   perf_counter: Fix...
1128
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1129
1130
   * If event->ctx is a cloned context, callers must make sure that
   * every task struct that event->ctx->task could possibly point to
c93f76690   Paul Mackerras   perf_counter: Fix...
1131
1132
   * remains valid.  This is OK when called from perf_release since
   * that only calls us on the top-level context, which can't be a clone.
cdd6c482c   Ingo Molnar   perf: Do the big ...
1133
   * When called from perf_event_exit_task, it's OK because the
c93f76690   Paul Mackerras   perf_counter: Fix...
1134
   * context has been detached from its task.
0793a61d4   Thomas Gleixner   performance count...
1135
   */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1136
  static void perf_remove_from_context(struct perf_event *event)
0793a61d4   Thomas Gleixner   performance count...
1137
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1138
  	struct perf_event_context *ctx = event->ctx;
0793a61d4   Thomas Gleixner   performance count...
1139
  	struct task_struct *task = ctx->task;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1140
  	lockdep_assert_held(&ctx->mutex);
0793a61d4   Thomas Gleixner   performance count...
1141
1142
  	if (!task) {
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1143
  		 * Per cpu events are removed via an smp call and
af901ca18   André Goddard Rosa   tree-wide: fix as...
1144
  		 * the removal is always successful.
0793a61d4   Thomas Gleixner   performance count...
1145
  		 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1146
  		cpu_function_call(event->cpu, __perf_remove_from_context, event);
0793a61d4   Thomas Gleixner   performance count...
1147
1148
1149
1150
  		return;
  	}
  
  retry:
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1151
1152
  	if (!task_function_call(task, __perf_remove_from_context, event))
  		return;
0793a61d4   Thomas Gleixner   performance count...
1153

e625cce1b   Thomas Gleixner   perf_event: Conve...
1154
  	raw_spin_lock_irq(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
1155
  	/*
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1156
1157
  	 * If we failed to find a running task, but find the context active now
  	 * that we've acquired the ctx->lock, retry.
0793a61d4   Thomas Gleixner   performance count...
1158
  	 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1159
  	if (ctx->is_active) {
e625cce1b   Thomas Gleixner   perf_event: Conve...
1160
  		raw_spin_unlock_irq(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
1161
1162
1163
1164
  		goto retry;
  	}
  
  	/*
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1165
1166
  	 * Since the task isn't running, its safe to remove the event, us
  	 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d4   Thomas Gleixner   performance count...
1167
  	 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1168
  	list_del_event(event, ctx);
e625cce1b   Thomas Gleixner   perf_event: Conve...
1169
  	raw_spin_unlock_irq(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
1170
  }
53cfbf593   Paul Mackerras   perf_counter: rec...
1171
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1172
   * Cross CPU call to disable a performance event
d859e29fe   Paul Mackerras   perf_counter: Add...
1173
   */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1174
  static int __perf_event_disable(void *info)
d859e29fe   Paul Mackerras   perf_counter: Add...
1175
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1176
  	struct perf_event *event = info;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1177
  	struct perf_event_context *ctx = event->ctx;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1178
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
1179
1180
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1181
1182
  	 * If this is a per-task event, need to check whether this
  	 * event's task is the current task on this cpu.
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1183
1184
1185
  	 *
  	 * Can trigger due to concurrent perf_event_context_sched_out()
  	 * flipping contexts around.
d859e29fe   Paul Mackerras   perf_counter: Add...
1186
  	 */
665c2142a   Peter Zijlstra   perf_counter: Cle...
1187
  	if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1188
  		return -EINVAL;
d859e29fe   Paul Mackerras   perf_counter: Add...
1189

e625cce1b   Thomas Gleixner   perf_event: Conve...
1190
  	raw_spin_lock(&ctx->lock);
d859e29fe   Paul Mackerras   perf_counter: Add...
1191
1192
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1193
  	 * If the event is on, turn it off.
d859e29fe   Paul Mackerras   perf_counter: Add...
1194
1195
  	 * If it is in error state, leave it in error state.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1196
  	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b8   Peter Zijlstra   perf_counter: rew...
1197
  		update_context_time(ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1198
  		update_cgrp_time_from_event(event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1199
1200
1201
  		update_group_times(event);
  		if (event == event->group_leader)
  			group_sched_out(event, cpuctx, ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
1202
  		else
cdd6c482c   Ingo Molnar   perf: Do the big ...
1203
1204
  			event_sched_out(event, cpuctx, ctx);
  		event->state = PERF_EVENT_STATE_OFF;
d859e29fe   Paul Mackerras   perf_counter: Add...
1205
  	}
e625cce1b   Thomas Gleixner   perf_event: Conve...
1206
  	raw_spin_unlock(&ctx->lock);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1207
1208
  
  	return 0;
d859e29fe   Paul Mackerras   perf_counter: Add...
1209
1210
1211
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1212
   * Disable a event.
c93f76690   Paul Mackerras   perf_counter: Fix...
1213
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1214
1215
   * If event->ctx is a cloned context, callers must make sure that
   * every task struct that event->ctx->task could possibly point to
c93f76690   Paul Mackerras   perf_counter: Fix...
1216
   * remains valid.  This condition is satisifed when called through
cdd6c482c   Ingo Molnar   perf: Do the big ...
1217
1218
1219
1220
   * perf_event_for_each_child or perf_event_for_each because they
   * hold the top-level event's child_mutex, so any descendant that
   * goes to exit will block in sync_child_event.
   * When called from perf_pending_event it's OK because event->ctx
c93f76690   Paul Mackerras   perf_counter: Fix...
1221
   * is the current context on this CPU and preemption is disabled,
cdd6c482c   Ingo Molnar   perf: Do the big ...
1222
   * hence we can't get into perf_event_task_sched_out for this context.
d859e29fe   Paul Mackerras   perf_counter: Add...
1223
   */
44234adcd   Frederic Weisbecker   hw-breakpoints: M...
1224
  void perf_event_disable(struct perf_event *event)
d859e29fe   Paul Mackerras   perf_counter: Add...
1225
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1226
  	struct perf_event_context *ctx = event->ctx;
d859e29fe   Paul Mackerras   perf_counter: Add...
1227
1228
1229
1230
  	struct task_struct *task = ctx->task;
  
  	if (!task) {
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1231
  		 * Disable the event on the cpu that it's on
d859e29fe   Paul Mackerras   perf_counter: Add...
1232
  		 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1233
  		cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29fe   Paul Mackerras   perf_counter: Add...
1234
1235
  		return;
  	}
9ed6060d2   Peter Zijlstra   perf: Unindent la...
1236
  retry:
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1237
1238
  	if (!task_function_call(task, __perf_event_disable, event))
  		return;
d859e29fe   Paul Mackerras   perf_counter: Add...
1239

e625cce1b   Thomas Gleixner   perf_event: Conve...
1240
  	raw_spin_lock_irq(&ctx->lock);
d859e29fe   Paul Mackerras   perf_counter: Add...
1241
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1242
  	 * If the event is still active, we need to retry the cross-call.
d859e29fe   Paul Mackerras   perf_counter: Add...
1243
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1244
  	if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1b   Thomas Gleixner   perf_event: Conve...
1245
  		raw_spin_unlock_irq(&ctx->lock);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1246
1247
1248
1249
1250
  		/*
  		 * Reload the task pointer, it might have been changed by
  		 * a concurrent perf_event_context_sched_out().
  		 */
  		task = ctx->task;
d859e29fe   Paul Mackerras   perf_counter: Add...
1251
1252
1253
1254
1255
1256
1257
  		goto retry;
  	}
  
  	/*
  	 * Since we have the lock this context can't be scheduled
  	 * in, so we can change the state safely.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1258
1259
1260
  	if (event->state == PERF_EVENT_STATE_INACTIVE) {
  		update_group_times(event);
  		event->state = PERF_EVENT_STATE_OFF;
53cfbf593   Paul Mackerras   perf_counter: rec...
1261
  	}
e625cce1b   Thomas Gleixner   perf_event: Conve...
1262
  	raw_spin_unlock_irq(&ctx->lock);
d859e29fe   Paul Mackerras   perf_counter: Add...
1263
  }
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
  static void perf_set_shadow_time(struct perf_event *event,
  				 struct perf_event_context *ctx,
  				 u64 tstamp)
  {
  	/*
  	 * use the correct time source for the time snapshot
  	 *
  	 * We could get by without this by leveraging the
  	 * fact that to get to this function, the caller
  	 * has most likely already called update_context_time()
  	 * and update_cgrp_time_xx() and thus both timestamp
  	 * are identical (or very close). Given that tstamp is,
  	 * already adjusted for cgroup, we could say that:
  	 *    tstamp - ctx->timestamp
  	 * is equivalent to
  	 *    tstamp - cgrp->timestamp.
  	 *
  	 * Then, in perf_output_read(), the calculation would
  	 * work with no changes because:
  	 * - event is guaranteed scheduled in
  	 * - no scheduled out in between
  	 * - thus the timestamp would be the same
  	 *
  	 * But this is a bit hairy.
  	 *
  	 * So instead, we have an explicit cgroup call to remain
  	 * within the time time source all along. We believe it
  	 * is cleaner and simpler to understand.
  	 */
  	if (is_cgroup_event(event))
  		perf_cgroup_set_shadow_time(event, tstamp);
  	else
  		event->shadow_ctx_time = tstamp - ctx->timestamp;
  }
4fe757dd4   Peter Zijlstra   perf: Fix throttl...
1298
1299
1300
  #define MAX_INTERRUPTS (~0ULL)
  
  static void perf_log_throttle(struct perf_event *event, int enable);
235c7fc7c   Ingo Molnar   perfcounters: gen...
1301
  static int
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1302
  event_sched_in(struct perf_event *event,
235c7fc7c   Ingo Molnar   perfcounters: gen...
1303
  		 struct perf_cpu_context *cpuctx,
6e37738a2   Peter Zijlstra   perf_events: Simp...
1304
  		 struct perf_event_context *ctx)
235c7fc7c   Ingo Molnar   perfcounters: gen...
1305
  {
4158755d3   Stephane Eranian   perf_events: Add ...
1306
  	u64 tstamp = perf_event_time(event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1307
  	if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7c   Ingo Molnar   perfcounters: gen...
1308
  		return 0;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1309
  	event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a2   Peter Zijlstra   perf_events: Simp...
1310
  	event->oncpu = smp_processor_id();
4fe757dd4   Peter Zijlstra   perf: Fix throttl...
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
  
  	/*
  	 * Unthrottle events, since we scheduled we might have missed several
  	 * ticks already, also for a heavily scheduling task there is little
  	 * guarantee it'll get a tick in a timely manner.
  	 */
  	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
  		perf_log_throttle(event, 1);
  		event->hw.interrupts = 0;
  	}
235c7fc7c   Ingo Molnar   perfcounters: gen...
1321
1322
1323
1324
  	/*
  	 * The new state must be visible before we turn it on in the hardware:
  	 */
  	smp_wmb();
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1325
  	if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1326
1327
  		event->state = PERF_EVENT_STATE_INACTIVE;
  		event->oncpu = -1;
235c7fc7c   Ingo Molnar   perfcounters: gen...
1328
1329
  		return -EAGAIN;
  	}
4158755d3   Stephane Eranian   perf_events: Add ...
1330
  	event->tstamp_running += tstamp - event->tstamp_stopped;
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1331

e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1332
  	perf_set_shadow_time(event, ctx, tstamp);
eed01528a   Stephane Eranian   perf_events: Fix ...
1333

cdd6c482c   Ingo Molnar   perf: Do the big ...
1334
  	if (!is_software_event(event))
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1335
  		cpuctx->active_oncpu++;
235c7fc7c   Ingo Molnar   perfcounters: gen...
1336
  	ctx->nr_active++;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1337
  	if (event->attr.exclusive)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1338
  		cpuctx->exclusive = 1;
235c7fc7c   Ingo Molnar   perfcounters: gen...
1339
1340
  	return 0;
  }
6751b71ea   Paul Mackerras   perf_counter: Put...
1341
  static int
cdd6c482c   Ingo Molnar   perf: Do the big ...
1342
  group_sched_in(struct perf_event *group_event,
6751b71ea   Paul Mackerras   perf_counter: Put...
1343
  	       struct perf_cpu_context *cpuctx,
6e37738a2   Peter Zijlstra   perf_events: Simp...
1344
  	       struct perf_event_context *ctx)
6751b71ea   Paul Mackerras   perf_counter: Put...
1345
  {
6bde9b6ce   Lin Ming   perf: Add group s...
1346
  	struct perf_event *event, *partial_group = NULL;
51b0fe395   Peter Zijlstra   perf: Deconstify ...
1347
  	struct pmu *pmu = group_event->pmu;
d7842da47   Stephane Eranian   perf_events: Fix ...
1348
1349
  	u64 now = ctx->time;
  	bool simulate = false;
6751b71ea   Paul Mackerras   perf_counter: Put...
1350

cdd6c482c   Ingo Molnar   perf: Do the big ...
1351
  	if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71ea   Paul Mackerras   perf_counter: Put...
1352
  		return 0;
ad5133b70   Peter Zijlstra   perf: Default PMU...
1353
  	pmu->start_txn(pmu);
6bde9b6ce   Lin Ming   perf: Add group s...
1354

9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1355
  	if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b70   Peter Zijlstra   perf: Default PMU...
1356
  		pmu->cancel_txn(pmu);
6751b71ea   Paul Mackerras   perf_counter: Put...
1357
  		return -EAGAIN;
90151c35b   Stephane Eranian   perf_events: Fix ...
1358
  	}
6751b71ea   Paul Mackerras   perf_counter: Put...
1359
1360
1361
1362
  
  	/*
  	 * Schedule in siblings as one group (if any):
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1363
  	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1364
  		if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1365
  			partial_group = event;
6751b71ea   Paul Mackerras   perf_counter: Put...
1366
1367
1368
  			goto group_error;
  		}
  	}
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1369
  	if (!pmu->commit_txn(pmu))
6e85158cf   Paul Mackerras   perf_event: Make ...
1370
  		return 0;
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1371

6751b71ea   Paul Mackerras   perf_counter: Put...
1372
1373
1374
1375
  group_error:
  	/*
  	 * Groups can be scheduled in as one unit only, so undo any
  	 * partial group before returning:
d7842da47   Stephane Eranian   perf_events: Fix ...
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
  	 * The events up to the failed event are scheduled out normally,
  	 * tstamp_stopped will be updated.
  	 *
  	 * The failed events and the remaining siblings need to have
  	 * their timings updated as if they had gone thru event_sched_in()
  	 * and event_sched_out(). This is required to get consistent timings
  	 * across the group. This also takes care of the case where the group
  	 * could never be scheduled by ensuring tstamp_stopped is set to mark
  	 * the time the event was actually stopped, such that time delta
  	 * calculation in update_event_times() is correct.
6751b71ea   Paul Mackerras   perf_counter: Put...
1386
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1387
1388
  	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  		if (event == partial_group)
d7842da47   Stephane Eranian   perf_events: Fix ...
1389
1390
1391
1392
1393
1394
1395
1396
  			simulate = true;
  
  		if (simulate) {
  			event->tstamp_running += now - event->tstamp_stopped;
  			event->tstamp_stopped = now;
  		} else {
  			event_sched_out(event, cpuctx, ctx);
  		}
6751b71ea   Paul Mackerras   perf_counter: Put...
1397
  	}
9ffcfa6f1   Stephane Eranian   perf_events: Reve...
1398
  	event_sched_out(group_event, cpuctx, ctx);
6751b71ea   Paul Mackerras   perf_counter: Put...
1399

ad5133b70   Peter Zijlstra   perf: Default PMU...
1400
  	pmu->cancel_txn(pmu);
90151c35b   Stephane Eranian   perf_events: Fix ...
1401

6751b71ea   Paul Mackerras   perf_counter: Put...
1402
1403
  	return -EAGAIN;
  }
0793a61d4   Thomas Gleixner   performance count...
1404
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1405
   * Work out whether we can put this event group on the CPU now.
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1406
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1407
  static int group_can_go_on(struct perf_event *event,
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1408
1409
1410
1411
  			   struct perf_cpu_context *cpuctx,
  			   int can_add_hw)
  {
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1412
  	 * Groups consisting entirely of software events can always go on.
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1413
  	 */
d6f962b57   Frederic Weisbecker   perf: Export soft...
1414
  	if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1415
1416
1417
  		return 1;
  	/*
  	 * If an exclusive group is already on, no other hardware
cdd6c482c   Ingo Molnar   perf: Do the big ...
1418
  	 * events can go on.
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1419
1420
1421
1422
1423
  	 */
  	if (cpuctx->exclusive)
  		return 0;
  	/*
  	 * If this group is exclusive and there are already
cdd6c482c   Ingo Molnar   perf: Do the big ...
1424
  	 * events on the CPU, it can't go on.
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1425
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1426
  	if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1427
1428
1429
1430
1431
1432
1433
  		return 0;
  	/*
  	 * Otherwise, try to add it if all previous groups were able
  	 * to go on.
  	 */
  	return can_add_hw;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
1434
1435
  static void add_event_to_ctx(struct perf_event *event,
  			       struct perf_event_context *ctx)
53cfbf593   Paul Mackerras   perf_counter: rec...
1436
  {
4158755d3   Stephane Eranian   perf_events: Add ...
1437
  	u64 tstamp = perf_event_time(event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1438
  	list_add_event(event, ctx);
8a49542c0   Peter Zijlstra   perf_events: Fix ...
1439
  	perf_group_attach(event);
4158755d3   Stephane Eranian   perf_events: Add ...
1440
1441
1442
  	event->tstamp_enabled = tstamp;
  	event->tstamp_running = tstamp;
  	event->tstamp_stopped = tstamp;
53cfbf593   Paul Mackerras   perf_counter: rec...
1443
  }
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1444
1445
1446
1447
1448
1449
  static void task_ctx_sched_out(struct perf_event_context *ctx);
  static void
  ctx_sched_in(struct perf_event_context *ctx,
  	     struct perf_cpu_context *cpuctx,
  	     enum event_type_t event_type,
  	     struct task_struct *task);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1450

dce5855bb   Peter Zijlstra   perf: Collect the...
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
  static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
  				struct perf_event_context *ctx,
  				struct task_struct *task)
  {
  	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
  	if (ctx)
  		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
  	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
  	if (ctx)
  		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
  }
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1462
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1463
   * Cross CPU call to install and enable a performance event
682076ae1   Peter Zijlstra   perf_counter: San...
1464
1465
   *
   * Must be called with ctx->mutex held
0793a61d4   Thomas Gleixner   performance count...
1466
   */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1467
  static int  __perf_install_in_context(void *info)
0793a61d4   Thomas Gleixner   performance count...
1468
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1469
1470
  	struct perf_event *event = info;
  	struct perf_event_context *ctx = event->ctx;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1471
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1472
1473
  	struct perf_event_context *task_ctx = cpuctx->task_ctx;
  	struct task_struct *task = current;
b58f6b0dd   Peter Zijlstra   perf, core: Fix i...
1474
  	perf_ctx_lock(cpuctx, task_ctx);
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1475
  	perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d4   Thomas Gleixner   performance count...
1476
1477
  
  	/*
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1478
  	 * If there was an active task_ctx schedule it out.
0793a61d4   Thomas Gleixner   performance count...
1479
  	 */
b58f6b0dd   Peter Zijlstra   perf, core: Fix i...
1480
  	if (task_ctx)
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1481
  		task_ctx_sched_out(task_ctx);
b58f6b0dd   Peter Zijlstra   perf, core: Fix i...
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
  
  	/*
  	 * If the context we're installing events in is not the
  	 * active task_ctx, flip them.
  	 */
  	if (ctx->task && task_ctx != ctx) {
  		if (task_ctx)
  			raw_spin_unlock(&task_ctx->lock);
  		raw_spin_lock(&ctx->lock);
  		task_ctx = ctx;
  	}
  
  	if (task_ctx) {
  		cpuctx->task_ctx = task_ctx;
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1496
1497
  		task = task_ctx->task;
  	}
b58f6b0dd   Peter Zijlstra   perf, core: Fix i...
1498

2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1499
  	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d4   Thomas Gleixner   performance count...
1500

4af4998b8   Peter Zijlstra   perf_counter: rew...
1501
  	update_context_time(ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1502
1503
1504
1505
1506
1507
  	/*
  	 * update cgrp time only if current cgrp
  	 * matches event->cgrp. Must be done before
  	 * calling add_event_to_ctx()
  	 */
  	update_cgrp_time_from_event(event);
0793a61d4   Thomas Gleixner   performance count...
1508

cdd6c482c   Ingo Molnar   perf: Do the big ...
1509
  	add_event_to_ctx(event, ctx);
0793a61d4   Thomas Gleixner   performance count...
1510

3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1511
  	/*
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1512
  	 * Schedule everything back in
d859e29fe   Paul Mackerras   perf_counter: Add...
1513
  	 */
dce5855bb   Peter Zijlstra   perf: Collect the...
1514
  	perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0fe   Peter Zijlstra   perf: Simplify an...
1515
1516
1517
  
  	perf_pmu_enable(cpuctx->ctx.pmu);
  	perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1518
1519
  
  	return 0;
0793a61d4   Thomas Gleixner   performance count...
1520
1521
1522
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1523
   * Attach a performance event to a context
0793a61d4   Thomas Gleixner   performance count...
1524
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1525
1526
   * First we add the event to the list with the hardware enable bit
   * in event->hw_config cleared.
0793a61d4   Thomas Gleixner   performance count...
1527
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1528
   * If the event is attached to a task which is on a CPU we use a smp
0793a61d4   Thomas Gleixner   performance count...
1529
1530
1531
1532
   * call to enable it in the task context. The task might have been
   * scheduled away, but we check this in the smp call again.
   */
  static void
cdd6c482c   Ingo Molnar   perf: Do the big ...
1533
1534
  perf_install_in_context(struct perf_event_context *ctx,
  			struct perf_event *event,
0793a61d4   Thomas Gleixner   performance count...
1535
1536
1537
  			int cpu)
  {
  	struct task_struct *task = ctx->task;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1538
  	lockdep_assert_held(&ctx->mutex);
c3f00c702   Peter Zijlstra   perf: Separate fi...
1539
  	event->ctx = ctx;
0793a61d4   Thomas Gleixner   performance count...
1540
1541
  	if (!task) {
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1542
  		 * Per cpu events are installed via an smp call and
af901ca18   André Goddard Rosa   tree-wide: fix as...
1543
  		 * the install is always successful.
0793a61d4   Thomas Gleixner   performance count...
1544
  		 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1545
  		cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d4   Thomas Gleixner   performance count...
1546
1547
  		return;
  	}
0793a61d4   Thomas Gleixner   performance count...
1548
  retry:
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1549
1550
  	if (!task_function_call(task, __perf_install_in_context, event))
  		return;
0793a61d4   Thomas Gleixner   performance count...
1551

e625cce1b   Thomas Gleixner   perf_event: Conve...
1552
  	raw_spin_lock_irq(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
1553
  	/*
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1554
1555
  	 * If we failed to find a running task, but find the context active now
  	 * that we've acquired the ctx->lock, retry.
0793a61d4   Thomas Gleixner   performance count...
1556
  	 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1557
  	if (ctx->is_active) {
e625cce1b   Thomas Gleixner   perf_event: Conve...
1558
  		raw_spin_unlock_irq(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
1559
1560
1561
1562
  		goto retry;
  	}
  
  	/*
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1563
1564
  	 * Since the task isn't running, its safe to add the event, us holding
  	 * the ctx->lock ensures the task won't get scheduled in.
0793a61d4   Thomas Gleixner   performance count...
1565
  	 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1566
  	add_event_to_ctx(event, ctx);
e625cce1b   Thomas Gleixner   perf_event: Conve...
1567
  	raw_spin_unlock_irq(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
1568
  }
d859e29fe   Paul Mackerras   perf_counter: Add...
1569
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1570
   * Put a event into inactive state and update time fields.
fa289beca   Paul Mackerras   perf_counter: Sta...
1571
1572
1573
1574
1575
1576
   * Enabling the leader of a group effectively enables all
   * the group members that aren't explicitly disabled, so we
   * have to update their ->tstamp_enabled also.
   * Note: this works for group members as well as group leaders
   * since the non-leader members' sibling_lists will be empty.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1577
1578
  static void __perf_event_mark_enabled(struct perf_event *event,
  					struct perf_event_context *ctx)
fa289beca   Paul Mackerras   perf_counter: Sta...
1579
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1580
  	struct perf_event *sub;
4158755d3   Stephane Eranian   perf_events: Add ...
1581
  	u64 tstamp = perf_event_time(event);
fa289beca   Paul Mackerras   perf_counter: Sta...
1582

cdd6c482c   Ingo Molnar   perf: Do the big ...
1583
  	event->state = PERF_EVENT_STATE_INACTIVE;
4158755d3   Stephane Eranian   perf_events: Add ...
1584
  	event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d2   Peter Zijlstra   perf: Unindent la...
1585
  	list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d3   Stephane Eranian   perf_events: Add ...
1586
1587
  		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
  			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d2   Peter Zijlstra   perf: Unindent la...
1588
  	}
fa289beca   Paul Mackerras   perf_counter: Sta...
1589
1590
1591
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1592
   * Cross CPU call to enable a performance event
d859e29fe   Paul Mackerras   perf_counter: Add...
1593
   */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1594
  static int __perf_event_enable(void *info)
04289bb98   Ingo Molnar   perf counters: ad...
1595
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1596
  	struct perf_event *event = info;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1597
1598
  	struct perf_event_context *ctx = event->ctx;
  	struct perf_event *leader = event->group_leader;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1599
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
1600
  	int err;
04289bb98   Ingo Molnar   perf counters: ad...
1601

fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1602
1603
  	if (WARN_ON_ONCE(!ctx->is_active))
  		return -EINVAL;
3cbed429a   Paul Mackerras   perf_counter: Add...
1604

e625cce1b   Thomas Gleixner   perf_event: Conve...
1605
  	raw_spin_lock(&ctx->lock);
4af4998b8   Peter Zijlstra   perf_counter: rew...
1606
  	update_context_time(ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
1607

cdd6c482c   Ingo Molnar   perf: Do the big ...
1608
  	if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29fe   Paul Mackerras   perf_counter: Add...
1609
  		goto unlock;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1610
1611
1612
1613
  
  	/*
  	 * set current task's cgroup time reference point
  	 */
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
1614
  	perf_cgroup_set_timestamp(current, ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1615

cdd6c482c   Ingo Molnar   perf: Do the big ...
1616
  	__perf_event_mark_enabled(event, ctx);
04289bb98   Ingo Molnar   perf counters: ad...
1617

e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1618
1619
1620
  	if (!event_filter_match(event)) {
  		if (is_cgroup_event(event))
  			perf_cgroup_defer_enabled(event);
f4c4176f2   Peter Zijlstra   perf events: Allo...
1621
  		goto unlock;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1622
  	}
f4c4176f2   Peter Zijlstra   perf events: Allo...
1623

04289bb98   Ingo Molnar   perf counters: ad...
1624
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1625
  	 * If the event is in a group and isn't the group leader,
d859e29fe   Paul Mackerras   perf_counter: Add...
1626
  	 * then don't put it on unless the group is on.
04289bb98   Ingo Molnar   perf counters: ad...
1627
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1628
  	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29fe   Paul Mackerras   perf_counter: Add...
1629
  		goto unlock;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1630

cdd6c482c   Ingo Molnar   perf: Do the big ...
1631
  	if (!group_can_go_on(event, cpuctx, 1)) {
d859e29fe   Paul Mackerras   perf_counter: Add...
1632
  		err = -EEXIST;
e758a33d6   Paul Mackerras   perf_counter: cal...
1633
  	} else {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1634
  		if (event == leader)
6e37738a2   Peter Zijlstra   perf_events: Simp...
1635
  			err = group_sched_in(event, cpuctx, ctx);
e758a33d6   Paul Mackerras   perf_counter: cal...
1636
  		else
6e37738a2   Peter Zijlstra   perf_events: Simp...
1637
  			err = event_sched_in(event, cpuctx, ctx);
e758a33d6   Paul Mackerras   perf_counter: cal...
1638
  	}
d859e29fe   Paul Mackerras   perf_counter: Add...
1639
1640
1641
  
  	if (err) {
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1642
  		 * If this event can't go on and it's part of a
d859e29fe   Paul Mackerras   perf_counter: Add...
1643
1644
  		 * group, then the whole group has to come off.
  		 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1645
  		if (leader != event)
d859e29fe   Paul Mackerras   perf_counter: Add...
1646
  			group_sched_out(leader, cpuctx, ctx);
0d48696f8   Peter Zijlstra   perf_counter: Ren...
1647
  		if (leader->attr.pinned) {
53cfbf593   Paul Mackerras   perf_counter: rec...
1648
  			update_group_times(leader);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1649
  			leader->state = PERF_EVENT_STATE_ERROR;
53cfbf593   Paul Mackerras   perf_counter: rec...
1650
  		}
d859e29fe   Paul Mackerras   perf_counter: Add...
1651
  	}
9ed6060d2   Peter Zijlstra   perf: Unindent la...
1652
  unlock:
e625cce1b   Thomas Gleixner   perf_event: Conve...
1653
  	raw_spin_unlock(&ctx->lock);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1654
1655
  
  	return 0;
d859e29fe   Paul Mackerras   perf_counter: Add...
1656
1657
1658
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1659
   * Enable a event.
c93f76690   Paul Mackerras   perf_counter: Fix...
1660
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
1661
1662
   * If event->ctx is a cloned context, callers must make sure that
   * every task struct that event->ctx->task could possibly point to
c93f76690   Paul Mackerras   perf_counter: Fix...
1663
   * remains valid.  This condition is satisfied when called through
cdd6c482c   Ingo Molnar   perf: Do the big ...
1664
1665
   * perf_event_for_each_child or perf_event_for_each as described
   * for perf_event_disable.
d859e29fe   Paul Mackerras   perf_counter: Add...
1666
   */
44234adcd   Frederic Weisbecker   hw-breakpoints: M...
1667
  void perf_event_enable(struct perf_event *event)
d859e29fe   Paul Mackerras   perf_counter: Add...
1668
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1669
  	struct perf_event_context *ctx = event->ctx;
d859e29fe   Paul Mackerras   perf_counter: Add...
1670
1671
1672
1673
  	struct task_struct *task = ctx->task;
  
  	if (!task) {
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1674
  		 * Enable the event on the cpu that it's on
d859e29fe   Paul Mackerras   perf_counter: Add...
1675
  		 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1676
  		cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29fe   Paul Mackerras   perf_counter: Add...
1677
1678
  		return;
  	}
e625cce1b   Thomas Gleixner   perf_event: Conve...
1679
  	raw_spin_lock_irq(&ctx->lock);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1680
  	if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29fe   Paul Mackerras   perf_counter: Add...
1681
1682
1683
  		goto out;
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1684
1685
  	 * If the event is in error state, clear that first.
  	 * That way, if we see the event in error state below, we
d859e29fe   Paul Mackerras   perf_counter: Add...
1686
1687
1688
1689
  	 * know that it has gone back into error state, as distinct
  	 * from the task having been scheduled away before the
  	 * cross-call arrived.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1690
1691
  	if (event->state == PERF_EVENT_STATE_ERROR)
  		event->state = PERF_EVENT_STATE_OFF;
d859e29fe   Paul Mackerras   perf_counter: Add...
1692

9ed6060d2   Peter Zijlstra   perf: Unindent la...
1693
  retry:
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1694
1695
1696
1697
  	if (!ctx->is_active) {
  		__perf_event_mark_enabled(event, ctx);
  		goto out;
  	}
e625cce1b   Thomas Gleixner   perf_event: Conve...
1698
  	raw_spin_unlock_irq(&ctx->lock);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1699
1700
1701
  
  	if (!task_function_call(task, __perf_event_enable, event))
  		return;
d859e29fe   Paul Mackerras   perf_counter: Add...
1702

e625cce1b   Thomas Gleixner   perf_event: Conve...
1703
  	raw_spin_lock_irq(&ctx->lock);
d859e29fe   Paul Mackerras   perf_counter: Add...
1704
1705
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1706
  	 * If the context is active and the event is still off,
d859e29fe   Paul Mackerras   perf_counter: Add...
1707
1708
  	 * we need to retry the cross-call.
  	 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1709
1710
1711
1712
1713
1714
  	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
  		/*
  		 * task could have been flipped by a concurrent
  		 * perf_event_context_sched_out()
  		 */
  		task = ctx->task;
d859e29fe   Paul Mackerras   perf_counter: Add...
1715
  		goto retry;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1716
  	}
fa289beca   Paul Mackerras   perf_counter: Sta...
1717

9ed6060d2   Peter Zijlstra   perf: Unindent la...
1718
  out:
e625cce1b   Thomas Gleixner   perf_event: Conve...
1719
  	raw_spin_unlock_irq(&ctx->lock);
d859e29fe   Paul Mackerras   perf_counter: Add...
1720
  }
26ca5c11f   Avi Kivity   perf: export perf...
1721
  int perf_event_refresh(struct perf_event *event, int refresh)
79f146415   Peter Zijlstra   perf_counter: cou...
1722
  {
2023b3592   Peter Zijlstra   perf_counter: inh...
1723
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1724
  	 * not supported on inherited events
2023b3592   Peter Zijlstra   perf_counter: inh...
1725
  	 */
2e939d1da   Franck Bui-Huu   perf: Limit event...
1726
  	if (event->attr.inherit || !is_sampling_event(event))
2023b3592   Peter Zijlstra   perf_counter: inh...
1727
  		return -EINVAL;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1728
1729
  	atomic_add(refresh, &event->event_limit);
  	perf_event_enable(event);
2023b3592   Peter Zijlstra   perf_counter: inh...
1730
1731
  
  	return 0;
79f146415   Peter Zijlstra   perf_counter: cou...
1732
  }
26ca5c11f   Avi Kivity   perf: export perf...
1733
  EXPORT_SYMBOL_GPL(perf_event_refresh);
79f146415   Peter Zijlstra   perf_counter: cou...
1734

5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1735
1736
1737
  static void ctx_sched_out(struct perf_event_context *ctx,
  			  struct perf_cpu_context *cpuctx,
  			  enum event_type_t event_type)
235c7fc7c   Ingo Molnar   perfcounters: gen...
1738
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1739
  	struct perf_event *event;
db24d33e0   Peter Zijlstra   perf: Change and ...
1740
  	int is_active = ctx->is_active;
235c7fc7c   Ingo Molnar   perfcounters: gen...
1741

db24d33e0   Peter Zijlstra   perf: Change and ...
1742
  	ctx->is_active &= ~event_type;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1743
  	if (likely(!ctx->nr_events))
facc43071   Peter Zijlstra   perf: Optimize ev...
1744
  		return;
4af4998b8   Peter Zijlstra   perf_counter: rew...
1745
  	update_context_time(ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1746
  	update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1747
  	if (!ctx->nr_active)
facc43071   Peter Zijlstra   perf: Optimize ev...
1748
  		return;
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1749

075e0b008   Peter Zijlstra   perf: Optimize ct...
1750
  	perf_pmu_disable(ctx->pmu);
db24d33e0   Peter Zijlstra   perf: Change and ...
1751
  	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff0150   Frederic Weisbecker   perf/core: Split ...
1752
1753
  		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
  			group_sched_out(event, cpuctx, ctx);
9ed6060d2   Peter Zijlstra   perf: Unindent la...
1754
  	}
889ff0150   Frederic Weisbecker   perf/core: Split ...
1755

db24d33e0   Peter Zijlstra   perf: Change and ...
1756
  	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff0150   Frederic Weisbecker   perf/core: Split ...
1757
  		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e14   Xiao Guangrong   perf_event: Fix e...
1758
  			group_sched_out(event, cpuctx, ctx);
9ed6060d2   Peter Zijlstra   perf: Unindent la...
1759
  	}
1b9a644fe   Peter Zijlstra   perf: Optimize co...
1760
  	perf_pmu_enable(ctx->pmu);
235c7fc7c   Ingo Molnar   perfcounters: gen...
1761
  }
0793a61d4   Thomas Gleixner   performance count...
1762
  /*
564c2b210   Paul Mackerras   perf_counter: Opt...
1763
1764
   * Test whether two contexts are equivalent, i.e. whether they
   * have both been cloned from the same version of the same context
cdd6c482c   Ingo Molnar   perf: Do the big ...
1765
1766
1767
1768
   * and they both have the same number of enabled events.
   * If the number of enabled events is the same, then the set
   * of enabled events should be the same, because these are both
   * inherited contexts, therefore we can't access individual events
564c2b210   Paul Mackerras   perf_counter: Opt...
1769
   * in them directly with an fd; we can only enable/disable all
cdd6c482c   Ingo Molnar   perf: Do the big ...
1770
   * events via prctl, or enable/disable all events in a family
564c2b210   Paul Mackerras   perf_counter: Opt...
1771
1772
   * via ioctl, which will have the same effect on both contexts.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1773
1774
  static int context_equiv(struct perf_event_context *ctx1,
  			 struct perf_event_context *ctx2)
564c2b210   Paul Mackerras   perf_counter: Opt...
1775
1776
  {
  	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
ad3a37de8   Paul Mackerras   perf_counter: Don...
1777
  		&& ctx1->parent_gen == ctx2->parent_gen
25346b93c   Paul Mackerras   perf_counter: Pro...
1778
  		&& !ctx1->pin_count && !ctx2->pin_count;
564c2b210   Paul Mackerras   perf_counter: Opt...
1779
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
1780
1781
  static void __perf_event_sync_stat(struct perf_event *event,
  				     struct perf_event *next_event)
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1782
1783
  {
  	u64 value;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1784
  	if (!event->attr.inherit_stat)
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1785
1786
1787
  		return;
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1788
  	 * Update the event value, we cannot use perf_event_read()
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1789
1790
  	 * because we're in the middle of a context switch and have IRQs
  	 * disabled, which upsets smp_call_function_single(), however
cdd6c482c   Ingo Molnar   perf: Do the big ...
1791
  	 * we know the event must be on the current CPU, therefore we
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1792
1793
  	 * don't need to use it.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1794
1795
  	switch (event->state) {
  	case PERF_EVENT_STATE_ACTIVE:
3dbebf15c   Peter Zijlstra   perf: Simplify __...
1796
1797
  		event->pmu->read(event);
  		/* fall-through */
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1798

cdd6c482c   Ingo Molnar   perf: Do the big ...
1799
1800
  	case PERF_EVENT_STATE_INACTIVE:
  		update_event_times(event);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1801
1802
1803
1804
1805
1806
1807
  		break;
  
  	default:
  		break;
  	}
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1808
  	 * In order to keep per-task stats reliable we need to flip the event
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1809
1810
  	 * values when we flip the contexts.
  	 */
e78505958   Peter Zijlstra   perf: Convert per...
1811
1812
1813
  	value = local64_read(&next_event->count);
  	value = local64_xchg(&event->count, value);
  	local64_set(&next_event->count, value);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1814

cdd6c482c   Ingo Molnar   perf: Do the big ...
1815
1816
  	swap(event->total_time_enabled, next_event->total_time_enabled);
  	swap(event->total_time_running, next_event->total_time_running);
19d2e7554   Peter Zijlstra   perf_counter: Com...
1817

bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1818
  	/*
19d2e7554   Peter Zijlstra   perf_counter: Com...
1819
  	 * Since we swizzled the values, update the user visible data too.
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1820
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1821
1822
  	perf_event_update_userpage(event);
  	perf_event_update_userpage(next_event);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1823
1824
1825
1826
  }
  
  #define list_next_entry(pos, member) \
  	list_entry(pos->member.next, typeof(*pos), member)
cdd6c482c   Ingo Molnar   perf: Do the big ...
1827
1828
  static void perf_event_sync_stat(struct perf_event_context *ctx,
  				   struct perf_event_context *next_ctx)
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1829
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1830
  	struct perf_event *event, *next_event;
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1831
1832
1833
  
  	if (!ctx->nr_stat)
  		return;
02ffdbc86   Peter Zijlstra   perf: Optimize pe...
1834
  	update_context_time(ctx);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1835
1836
  	event = list_first_entry(&ctx->event_list,
  				   struct perf_event, event_entry);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1837

cdd6c482c   Ingo Molnar   perf: Do the big ...
1838
1839
  	next_event = list_first_entry(&next_ctx->event_list,
  					struct perf_event, event_entry);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1840

cdd6c482c   Ingo Molnar   perf: Do the big ...
1841
1842
  	while (&event->event_entry != &ctx->event_list &&
  	       &next_event->event_entry != &next_ctx->event_list) {
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1843

cdd6c482c   Ingo Molnar   perf: Do the big ...
1844
  		__perf_event_sync_stat(event, next_event);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1845

cdd6c482c   Ingo Molnar   perf: Do the big ...
1846
1847
  		event = list_next_entry(event, event_entry);
  		next_event = list_next_entry(next_event, event_entry);
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1848
1849
  	}
  }
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
1850
1851
  static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  					 struct task_struct *next)
0793a61d4   Thomas Gleixner   performance count...
1852
  {
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1853
  	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482c   Ingo Molnar   perf: Do the big ...
1854
1855
  	struct perf_event_context *next_ctx;
  	struct perf_event_context *parent;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1856
  	struct perf_cpu_context *cpuctx;
c93f76690   Paul Mackerras   perf_counter: Fix...
1857
  	int do_switch = 1;
0793a61d4   Thomas Gleixner   performance count...
1858

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1859
1860
  	if (likely(!ctx))
  		return;
10989fb24   Peter Zijlstra   perf_counter: Fix...
1861

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1862
1863
  	cpuctx = __get_cpu_context(ctx);
  	if (!cpuctx->task_ctx)
0793a61d4   Thomas Gleixner   performance count...
1864
  		return;
c93f76690   Paul Mackerras   perf_counter: Fix...
1865
1866
  	rcu_read_lock();
  	parent = rcu_dereference(ctx->parent_ctx);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1867
  	next_ctx = next->perf_event_ctxp[ctxn];
c93f76690   Paul Mackerras   perf_counter: Fix...
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
  	if (parent && next_ctx &&
  	    rcu_dereference(next_ctx->parent_ctx) == parent) {
  		/*
  		 * Looks like the two contexts are clones, so we might be
  		 * able to optimize the context switch.  We lock both
  		 * contexts and check that they are clones under the
  		 * lock (including re-checking that neither has been
  		 * uncloned in the meantime).  It doesn't matter which
  		 * order we take the locks because no other cpu could
  		 * be trying to lock both of these tasks.
  		 */
e625cce1b   Thomas Gleixner   perf_event: Conve...
1879
1880
  		raw_spin_lock(&ctx->lock);
  		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f76690   Paul Mackerras   perf_counter: Fix...
1881
  		if (context_equiv(ctx, next_ctx)) {
665c2142a   Peter Zijlstra   perf_counter: Cle...
1882
1883
  			/*
  			 * XXX do we need a memory barrier of sorts
cdd6c482c   Ingo Molnar   perf: Do the big ...
1884
  			 * wrt to rcu_dereference() of perf_event_ctxp
665c2142a   Peter Zijlstra   perf_counter: Cle...
1885
  			 */
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1886
1887
  			task->perf_event_ctxp[ctxn] = next_ctx;
  			next->perf_event_ctxp[ctxn] = ctx;
c93f76690   Paul Mackerras   perf_counter: Fix...
1888
1889
1890
  			ctx->task = next;
  			next_ctx->task = task;
  			do_switch = 0;
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
1891

cdd6c482c   Ingo Molnar   perf: Do the big ...
1892
  			perf_event_sync_stat(ctx, next_ctx);
c93f76690   Paul Mackerras   perf_counter: Fix...
1893
  		}
e625cce1b   Thomas Gleixner   perf_event: Conve...
1894
1895
  		raw_spin_unlock(&next_ctx->lock);
  		raw_spin_unlock(&ctx->lock);
564c2b210   Paul Mackerras   perf_counter: Opt...
1896
  	}
c93f76690   Paul Mackerras   perf_counter: Fix...
1897
  	rcu_read_unlock();
564c2b210   Paul Mackerras   perf_counter: Opt...
1898

c93f76690   Paul Mackerras   perf_counter: Fix...
1899
  	if (do_switch) {
facc43071   Peter Zijlstra   perf: Optimize ev...
1900
  		raw_spin_lock(&ctx->lock);
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1901
  		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f76690   Paul Mackerras   perf_counter: Fix...
1902
  		cpuctx->task_ctx = NULL;
facc43071   Peter Zijlstra   perf: Optimize ev...
1903
  		raw_spin_unlock(&ctx->lock);
c93f76690   Paul Mackerras   perf_counter: Fix...
1904
  	}
0793a61d4   Thomas Gleixner   performance count...
1905
  }
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
  #define for_each_task_context_nr(ctxn)					\
  	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
  
  /*
   * Called from scheduler to remove the events of the current task,
   * with interrupts disabled.
   *
   * We stop each event and update the event value in event->count.
   *
   * This does not protect us against NMI, but disable()
   * sets the disabled bit in the control field of event _before_
   * accessing the event control register. If a NMI hits, then it will
   * not restart the event.
   */
82cd6def9   Peter Zijlstra   perf: Use jump_la...
1920
1921
  void __perf_event_task_sched_out(struct task_struct *task,
  				 struct task_struct *next)
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1922
1923
  {
  	int ctxn;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1924
1925
  	for_each_task_context_nr(ctxn)
  		perf_event_context_sched_out(task, ctxn, next);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1926
1927
1928
1929
1930
1931
1932
  
  	/*
  	 * if cgroup events exist on this CPU, then we need
  	 * to check if we have to switch out PMU state.
  	 * cgroup event are system-wide mode only
  	 */
  	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef0   Stephane Eranian   perf events: Fix ...
1933
  		perf_cgroup_sched_out(task, next);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
1934
  }
04dc2dbbf   Peter Zijlstra   perf: Remove task...
1935
  static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159fc   Paul Mackerras   perf_counter: don...
1936
  {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
1937
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159fc   Paul Mackerras   perf_counter: don...
1938

a63eaf34a   Paul Mackerras   perf_counter: Dyn...
1939
1940
  	if (!cpuctx->task_ctx)
  		return;
012b84dae   Ingo Molnar   perf_counter: Rob...
1941
1942
1943
  
  	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  		return;
04dc2dbbf   Peter Zijlstra   perf: Remove task...
1944
  	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159fc   Paul Mackerras   perf_counter: don...
1945
1946
  	cpuctx->task_ctx = NULL;
  }
665c2142a   Peter Zijlstra   perf_counter: Cle...
1947
1948
1949
  /*
   * Called with IRQs disabled
   */
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1950
1951
1952
1953
  static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  			      enum event_type_t event_type)
  {
  	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb98   Ingo Molnar   perf counters: ad...
1954
  }
235c7fc7c   Ingo Molnar   perfcounters: gen...
1955
  static void
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1956
  ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a2   Peter Zijlstra   perf_events: Simp...
1957
  		    struct perf_cpu_context *cpuctx)
0793a61d4   Thomas Gleixner   performance count...
1958
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1959
  	struct perf_event *event;
0793a61d4   Thomas Gleixner   performance count...
1960

889ff0150   Frederic Weisbecker   perf/core: Split ...
1961
1962
  	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  		if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1963
  			continue;
5632ab12e   Stephane Eranian   perf_events: Gene...
1964
  		if (!event_filter_match(event))
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1965
  			continue;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
1966
1967
1968
  		/* may need to reset tstamp_enabled */
  		if (is_cgroup_event(event))
  			perf_cgroup_mark_enabled(event, ctx);
8c9ed8e14   Xiao Guangrong   perf_event: Fix e...
1969
  		if (group_can_go_on(event, cpuctx, 1))
6e37738a2   Peter Zijlstra   perf_events: Simp...
1970
  			group_sched_in(event, cpuctx, ctx);
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1971
1972
1973
1974
1975
  
  		/*
  		 * If this pinned group hasn't been scheduled,
  		 * put it in error state.
  		 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1976
1977
1978
  		if (event->state == PERF_EVENT_STATE_INACTIVE) {
  			update_group_times(event);
  			event->state = PERF_EVENT_STATE_ERROR;
53cfbf593   Paul Mackerras   perf_counter: rec...
1979
  		}
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1980
  	}
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1981
1982
1983
1984
  }
  
  static void
  ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a2   Peter Zijlstra   perf_events: Simp...
1985
  		      struct perf_cpu_context *cpuctx)
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
1986
1987
1988
  {
  	struct perf_event *event;
  	int can_add_hw = 1;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1989

889ff0150   Frederic Weisbecker   perf/core: Split ...
1990
1991
1992
  	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  		/* Ignore events in OFF or ERROR state */
  		if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1993
  			continue;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
1994
  		/*
04289bb98   Ingo Molnar   perf counters: ad...
1995
  		 * Listen to the 'cpu' scheduling filter constraint
cdd6c482c   Ingo Molnar   perf: Do the big ...
1996
  		 * of events:
04289bb98   Ingo Molnar   perf counters: ad...
1997
  		 */
5632ab12e   Stephane Eranian   perf_events: Gene...
1998
  		if (!event_filter_match(event))
0793a61d4   Thomas Gleixner   performance count...
1999
  			continue;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2000
2001
2002
  		/* may need to reset tstamp_enabled */
  		if (is_cgroup_event(event))
  			perf_cgroup_mark_enabled(event, ctx);
9ed6060d2   Peter Zijlstra   perf: Unindent la...
2003
  		if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a2   Peter Zijlstra   perf_events: Simp...
2004
  			if (group_sched_in(event, cpuctx, ctx))
dd0e6ba22   Paul Mackerras   perf_counter: Alw...
2005
  				can_add_hw = 0;
9ed6060d2   Peter Zijlstra   perf: Unindent la...
2006
  		}
0793a61d4   Thomas Gleixner   performance count...
2007
  	}
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
2008
2009
2010
2011
2012
  }
  
  static void
  ctx_sched_in(struct perf_event_context *ctx,
  	     struct perf_cpu_context *cpuctx,
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2013
2014
  	     enum event_type_t event_type,
  	     struct task_struct *task)
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
2015
  {
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2016
  	u64 now;
db24d33e0   Peter Zijlstra   perf: Change and ...
2017
  	int is_active = ctx->is_active;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2018

db24d33e0   Peter Zijlstra   perf: Change and ...
2019
  	ctx->is_active |= event_type;
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
2020
  	if (likely(!ctx->nr_events))
facc43071   Peter Zijlstra   perf: Optimize ev...
2021
  		return;
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
2022

e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2023
2024
  	now = perf_clock();
  	ctx->timestamp = now;
3f7cce3c1   Stephane Eranian   perf_events: Fix ...
2025
  	perf_cgroup_set_timestamp(task, ctx);
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
2026
2027
2028
2029
  	/*
  	 * First go through the list and put on any pinned groups
  	 * in order to give them the best chance of going on.
  	 */
db24d33e0   Peter Zijlstra   perf: Change and ...
2030
  	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a2   Peter Zijlstra   perf_events: Simp...
2031
  		ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1f   Frederic Weisbecker   perf: Allow pinne...
2032
2033
  
  	/* Then walk through the lower prio flexible groups */
db24d33e0   Peter Zijlstra   perf: Change and ...
2034
  	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a2   Peter Zijlstra   perf_events: Simp...
2035
  		ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7c   Ingo Molnar   perfcounters: gen...
2036
  }
329c0e012   Frederic Weisbecker   perf: Better orde...
2037
  static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2038
2039
  			     enum event_type_t event_type,
  			     struct task_struct *task)
329c0e012   Frederic Weisbecker   perf: Better orde...
2040
2041
  {
  	struct perf_event_context *ctx = &cpuctx->ctx;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2042
  	ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e012   Frederic Weisbecker   perf: Better orde...
2043
  }
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2044
2045
  static void perf_event_context_sched_in(struct perf_event_context *ctx,
  					struct task_struct *task)
235c7fc7c   Ingo Molnar   perfcounters: gen...
2046
  {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
2047
  	struct perf_cpu_context *cpuctx;
235c7fc7c   Ingo Molnar   perfcounters: gen...
2048

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
2049
  	cpuctx = __get_cpu_context(ctx);
329c0e012   Frederic Weisbecker   perf: Better orde...
2050
2051
  	if (cpuctx->task_ctx == ctx)
  		return;
facc43071   Peter Zijlstra   perf: Optimize ev...
2052
  	perf_ctx_lock(cpuctx, ctx);
1b9a644fe   Peter Zijlstra   perf: Optimize co...
2053
  	perf_pmu_disable(ctx->pmu);
329c0e012   Frederic Weisbecker   perf: Better orde...
2054
2055
2056
2057
2058
2059
  	/*
  	 * We want to keep the following priority order:
  	 * cpu pinned (that don't need to move), task pinned,
  	 * cpu flexible, task flexible.
  	 */
  	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
dce5855bb   Peter Zijlstra   perf: Collect the...
2060
  	perf_event_sched_in(cpuctx, ctx, task);
329c0e012   Frederic Weisbecker   perf: Better orde...
2061
2062
  
  	cpuctx->task_ctx = ctx;
9b33fa6ba   eranian@google.com   perf_events: Impr...
2063

facc43071   Peter Zijlstra   perf: Optimize ev...
2064
2065
  	perf_pmu_enable(ctx->pmu);
  	perf_ctx_unlock(cpuctx, ctx);
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2066
2067
2068
2069
  	/*
  	 * Since these rotations are per-cpu, we need to ensure the
  	 * cpu-context we got scheduled on is actually rotating.
  	 */
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
2070
  	perf_pmu_rotate_start(ctx->pmu);
235c7fc7c   Ingo Molnar   perfcounters: gen...
2071
  }
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
  /*
   * Called from scheduler to add the events of the current task
   * with interrupts disabled.
   *
   * We restore the event value and then enable it.
   *
   * This does not protect us against NMI, but enable()
   * sets the enabled bit in the control field of event _before_
   * accessing the event control register. If a NMI hits, then it will
   * keep the event running.
   */
a8d757ef0   Stephane Eranian   perf events: Fix ...
2083
2084
  void __perf_event_task_sched_in(struct task_struct *prev,
  				struct task_struct *task)
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2085
2086
2087
2088
2089
2090
2091
2092
  {
  	struct perf_event_context *ctx;
  	int ctxn;
  
  	for_each_task_context_nr(ctxn) {
  		ctx = task->perf_event_ctxp[ctxn];
  		if (likely(!ctx))
  			continue;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2093
  		perf_event_context_sched_in(ctx, task);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2094
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2095
2096
2097
2098
2099
2100
  	/*
  	 * if cgroup events exist on this CPU, then we need
  	 * to check if we have to switch in PMU state.
  	 * cgroup event are system-wide mode only
  	 */
  	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef0   Stephane Eranian   perf events: Fix ...
2101
  		perf_cgroup_sched_in(prev, task);
235c7fc7c   Ingo Molnar   perfcounters: gen...
2102
  }
abd507139   Peter Zijlstra   perf: Reimplement...
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
  static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
  {
  	u64 frequency = event->attr.sample_freq;
  	u64 sec = NSEC_PER_SEC;
  	u64 divisor, dividend;
  
  	int count_fls, nsec_fls, frequency_fls, sec_fls;
  
  	count_fls = fls64(count);
  	nsec_fls = fls64(nsec);
  	frequency_fls = fls64(frequency);
  	sec_fls = 30;
  
  	/*
  	 * We got @count in @nsec, with a target of sample_freq HZ
  	 * the target period becomes:
  	 *
  	 *             @count * 10^9
  	 * period = -------------------
  	 *          @nsec * sample_freq
  	 *
  	 */
  
  	/*
  	 * Reduce accuracy by one bit such that @a and @b converge
  	 * to a similar magnitude.
  	 */
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2130
  #define REDUCE_FLS(a, b)		\
abd507139   Peter Zijlstra   perf: Reimplement...
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
  do {					\
  	if (a##_fls > b##_fls) {	\
  		a >>= 1;		\
  		a##_fls--;		\
  	} else {			\
  		b >>= 1;		\
  		b##_fls--;		\
  	}				\
  } while (0)
  
  	/*
  	 * Reduce accuracy until either term fits in a u64, then proceed with
  	 * the other, so that finally we can do a u64/u64 division.
  	 */
  	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
  		REDUCE_FLS(nsec, frequency);
  		REDUCE_FLS(sec, count);
  	}
  
  	if (count_fls + sec_fls > 64) {
  		divisor = nsec * frequency;
  
  		while (count_fls + sec_fls > 64) {
  			REDUCE_FLS(count, sec);
  			divisor >>= 1;
  		}
  
  		dividend = count * sec;
  	} else {
  		dividend = count * sec;
  
  		while (nsec_fls + frequency_fls > 64) {
  			REDUCE_FLS(nsec, frequency);
  			dividend >>= 1;
  		}
  
  		divisor = nsec * frequency;
  	}
f6ab91add   Peter Zijlstra   perf: Fix signed ...
2169
2170
  	if (!divisor)
  		return dividend;
abd507139   Peter Zijlstra   perf: Reimplement...
2171
2172
2173
2174
  	return div64_u64(dividend, divisor);
  }
  
  static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2175
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2176
  	struct hw_perf_event *hwc = &event->hw;
f6ab91add   Peter Zijlstra   perf: Fix signed ...
2177
  	s64 period, sample_period;
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2178
  	s64 delta;
abd507139   Peter Zijlstra   perf: Reimplement...
2179
  	period = perf_calculate_period(event, nsec, count);
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2180
2181
2182
2183
2184
2185
2186
2187
  
  	delta = (s64)(period - hwc->sample_period);
  	delta = (delta + 7) / 8; /* low pass filter */
  
  	sample_period = hwc->sample_period + delta;
  
  	if (!sample_period)
  		sample_period = 1;
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2188
  	hwc->sample_period = sample_period;
abd507139   Peter Zijlstra   perf: Reimplement...
2189

e78505958   Peter Zijlstra   perf: Convert per...
2190
  	if (local64_read(&hwc->period_left) > 8*sample_period) {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
2191
  		event->pmu->stop(event, PERF_EF_UPDATE);
e78505958   Peter Zijlstra   perf: Convert per...
2192
  		local64_set(&hwc->period_left, 0);
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
2193
  		event->pmu->start(event, PERF_EF_RELOAD);
abd507139   Peter Zijlstra   perf: Reimplement...
2194
  	}
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2195
  }
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2196
  static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
60db5e09c   Peter Zijlstra   perf_counter: fre...
2197
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2198
2199
  	struct perf_event *event;
  	struct hw_perf_event *hwc;
abd507139   Peter Zijlstra   perf: Reimplement...
2200
2201
  	u64 interrupts, now;
  	s64 delta;
60db5e09c   Peter Zijlstra   perf_counter: fre...
2202

03541f8b6   Paul Mackerras   perf_event: Adjus...
2203
  	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2204
  		if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09c   Peter Zijlstra   perf_counter: fre...
2205
  			continue;
5632ab12e   Stephane Eranian   perf_events: Gene...
2206
  		if (!event_filter_match(event))
5d27c23df   Peter Zijlstra   perf events: Dont...
2207
  			continue;
cdd6c482c   Ingo Molnar   perf: Do the big ...
2208
  		hwc = &event->hw;
6a24ed6c6   Peter Zijlstra   perf_counter: Fix...
2209
2210
2211
  
  		interrupts = hwc->interrupts;
  		hwc->interrupts = 0;
a78ac3258   Peter Zijlstra   perf_counter: Gen...
2212

bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2213
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2214
  		 * unthrottle events on the tick
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
2215
  		 */
a78ac3258   Peter Zijlstra   perf_counter: Gen...
2216
  		if (interrupts == MAX_INTERRUPTS) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2217
  			perf_log_throttle(event, 1);
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
2218
  			event->pmu->start(event, 0);
a78ac3258   Peter Zijlstra   perf_counter: Gen...
2219
  		}
cdd6c482c   Ingo Molnar   perf: Do the big ...
2220
  		if (!event->attr.freq || !event->attr.sample_freq)
60db5e09c   Peter Zijlstra   perf_counter: fre...
2221
  			continue;
abd507139   Peter Zijlstra   perf: Reimplement...
2222
  		event->pmu->read(event);
e78505958   Peter Zijlstra   perf: Convert per...
2223
  		now = local64_read(&event->count);
abd507139   Peter Zijlstra   perf: Reimplement...
2224
2225
  		delta = now - hwc->freq_count_stamp;
  		hwc->freq_count_stamp = now;
60db5e09c   Peter Zijlstra   perf_counter: fre...
2226

abd507139   Peter Zijlstra   perf: Reimplement...
2227
  		if (delta > 0)
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2228
  			perf_adjust_period(event, period, delta);
60db5e09c   Peter Zijlstra   perf_counter: fre...
2229
  	}
60db5e09c   Peter Zijlstra   perf_counter: fre...
2230
  }
235c7fc7c   Ingo Molnar   perfcounters: gen...
2231
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2232
   * Round-robin a context's events:
235c7fc7c   Ingo Molnar   perfcounters: gen...
2233
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
2234
  static void rotate_ctx(struct perf_event_context *ctx)
0793a61d4   Thomas Gleixner   performance count...
2235
  {
dddd3379a   Thomas Gleixner   perf: Fix inherit...
2236
2237
2238
2239
2240
2241
  	/*
  	 * Rotate the first entry last of non-pinned groups. Rotation might be
  	 * disabled by the inheritance code.
  	 */
  	if (!ctx->rotate_disable)
  		list_rotate_left(&ctx->flexible_groups);
235c7fc7c   Ingo Molnar   perfcounters: gen...
2242
  }
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2243
  /*
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2244
2245
2246
   * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
   * because they're strictly cpu affine and rotate_start is called with IRQs
   * disabled, while rotate_context is called from IRQ context.
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2247
   */
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2248
  static void perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7c   Ingo Molnar   perfcounters: gen...
2249
  {
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2250
  	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2251
  	struct perf_event_context *ctx = NULL;
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2252
  	int rotate = 0, remove = 1;
7fc23a538   Peter Zijlstra   perf_counter: opt...
2253

b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2254
  	if (cpuctx->ctx.nr_events) {
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2255
  		remove = 0;
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2256
2257
2258
  		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
  			rotate = 1;
  	}
235c7fc7c   Ingo Molnar   perfcounters: gen...
2259

8dc85d547   Peter Zijlstra   perf: Multiple ta...
2260
  	ctx = cpuctx->task_ctx;
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2261
  	if (ctx && ctx->nr_events) {
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2262
  		remove = 0;
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2263
2264
2265
  		if (ctx->nr_events != ctx->nr_active)
  			rotate = 1;
  	}
9717e6cd3   Peter Zijlstra   perf_events: Opti...
2266

facc43071   Peter Zijlstra   perf: Optimize ev...
2267
  	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644fe   Peter Zijlstra   perf: Optimize co...
2268
  	perf_pmu_disable(cpuctx->ctx.pmu);
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2269
  	perf_ctx_adjust_freq(&cpuctx->ctx, interval);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2270
  	if (ctx)
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2271
  		perf_ctx_adjust_freq(ctx, interval);
60db5e09c   Peter Zijlstra   perf_counter: fre...
2272

d4944a066   Peter Zijlstra   perf: Provide bet...
2273
  	if (!rotate)
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2274
  		goto done;
d4944a066   Peter Zijlstra   perf: Provide bet...
2275

7defb0f87   Frederic Weisbecker   perf: Don't sched...
2276
  	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2277
  	if (ctx)
04dc2dbbf   Peter Zijlstra   perf: Remove task...
2278
  		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d4   Thomas Gleixner   performance count...
2279

b82914ce3   Ingo Molnar   perf_counter: rou...
2280
  	rotate_ctx(&cpuctx->ctx);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2281
2282
  	if (ctx)
  		rotate_ctx(ctx);
235c7fc7c   Ingo Molnar   perfcounters: gen...
2283

dce5855bb   Peter Zijlstra   perf: Collect the...
2284
  	perf_event_sched_in(cpuctx, ctx, current);
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2285
2286
  
  done:
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2287
2288
  	if (remove)
  		list_del_init(&cpuctx->rotation_list);
1b9a644fe   Peter Zijlstra   perf: Optimize co...
2289
  	perf_pmu_enable(cpuctx->ctx.pmu);
facc43071   Peter Zijlstra   perf: Optimize ev...
2290
  	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2291
2292
2293
2294
2295
2296
  }
  
  void perf_event_task_tick(void)
  {
  	struct list_head *head = &__get_cpu_var(rotation_list);
  	struct perf_cpu_context *cpuctx, *tmp;
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
2297

e9d2b0641   Peter Zijlstra   perf: Undo the pe...
2298
2299
2300
2301
2302
2303
2304
  	WARN_ON(!irqs_disabled());
  
  	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
  		if (cpuctx->jiffies_interval == 1 ||
  				!(jiffies % cpuctx->jiffies_interval))
  			perf_rotate_context(cpuctx);
  	}
0793a61d4   Thomas Gleixner   performance count...
2305
  }
889ff0150   Frederic Weisbecker   perf/core: Split ...
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
  static int event_enable_on_exec(struct perf_event *event,
  				struct perf_event_context *ctx)
  {
  	if (!event->attr.enable_on_exec)
  		return 0;
  
  	event->attr.enable_on_exec = 0;
  	if (event->state >= PERF_EVENT_STATE_INACTIVE)
  		return 0;
  
  	__perf_event_mark_enabled(event, ctx);
  
  	return 1;
  }
0793a61d4   Thomas Gleixner   performance count...
2320
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2321
   * Enable all of a task's events that have been marked enable-on-exec.
57e7986ed   Paul Mackerras   perf_counter: Pro...
2322
2323
   * This expects task == current.
   */
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2324
  static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986ed   Paul Mackerras   perf_counter: Pro...
2325
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2326
  	struct perf_event *event;
57e7986ed   Paul Mackerras   perf_counter: Pro...
2327
2328
  	unsigned long flags;
  	int enabled = 0;
889ff0150   Frederic Weisbecker   perf/core: Split ...
2329
  	int ret;
57e7986ed   Paul Mackerras   perf_counter: Pro...
2330
2331
  
  	local_irq_save(flags);
cdd6c482c   Ingo Molnar   perf: Do the big ...
2332
  	if (!ctx || !ctx->nr_events)
57e7986ed   Paul Mackerras   perf_counter: Pro...
2333
  		goto out;
e566b76ed   Stephane Eranian   perf_event: Fix c...
2334
2335
2336
2337
2338
2339
2340
  	/*
  	 * We must ctxsw out cgroup events to avoid conflict
  	 * when invoking perf_task_event_sched_in() later on
  	 * in this function. Otherwise we end up trying to
  	 * ctxswin cgroup events which are already scheduled
  	 * in.
  	 */
a8d757ef0   Stephane Eranian   perf events: Fix ...
2341
  	perf_cgroup_sched_out(current, NULL);
57e7986ed   Paul Mackerras   perf_counter: Pro...
2342

e625cce1b   Thomas Gleixner   perf_event: Conve...
2343
  	raw_spin_lock(&ctx->lock);
04dc2dbbf   Peter Zijlstra   perf: Remove task...
2344
  	task_ctx_sched_out(ctx);
57e7986ed   Paul Mackerras   perf_counter: Pro...
2345

889ff0150   Frederic Weisbecker   perf/core: Split ...
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
  	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  		ret = event_enable_on_exec(event, ctx);
  		if (ret)
  			enabled = 1;
  	}
  
  	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  		ret = event_enable_on_exec(event, ctx);
  		if (ret)
  			enabled = 1;
57e7986ed   Paul Mackerras   perf_counter: Pro...
2356
2357
2358
  	}
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2359
  	 * Unclone this context if we enabled any event.
57e7986ed   Paul Mackerras   perf_counter: Pro...
2360
  	 */
71a851b4d   Peter Zijlstra   perf_counter: Sto...
2361
2362
  	if (enabled)
  		unclone_ctx(ctx);
57e7986ed   Paul Mackerras   perf_counter: Pro...
2363

e625cce1b   Thomas Gleixner   perf_event: Conve...
2364
  	raw_spin_unlock(&ctx->lock);
57e7986ed   Paul Mackerras   perf_counter: Pro...
2365

e566b76ed   Stephane Eranian   perf_event: Fix c...
2366
2367
2368
  	/*
  	 * Also calls ctxswin for cgroup events, if any:
  	 */
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2369
  	perf_event_context_sched_in(ctx, ctx->task);
9ed6060d2   Peter Zijlstra   perf: Unindent la...
2370
  out:
57e7986ed   Paul Mackerras   perf_counter: Pro...
2371
2372
2373
2374
  	local_irq_restore(flags);
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2375
   * Cross CPU call to read the hardware event
0793a61d4   Thomas Gleixner   performance count...
2376
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
2377
  static void __perf_event_read(void *info)
0793a61d4   Thomas Gleixner   performance count...
2378
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2379
2380
  	struct perf_event *event = info;
  	struct perf_event_context *ctx = event->ctx;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
2381
  	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01eac   Ingo Molnar   perf counters: hw...
2382

e1ac3614f   Paul Mackerras   perf_counter: Che...
2383
2384
2385
2386
  	/*
  	 * If this is a task context, we need to check whether it is
  	 * the current task context of this cpu.  If not it has been
  	 * scheduled out before the smp call arrived.  In that case
cdd6c482c   Ingo Molnar   perf: Do the big ...
2387
2388
  	 * event->count would have been updated to a recent sample
  	 * when the event was scheduled out.
e1ac3614f   Paul Mackerras   perf_counter: Che...
2389
2390
2391
  	 */
  	if (ctx->task && cpuctx->task_ctx != ctx)
  		return;
e625cce1b   Thomas Gleixner   perf_event: Conve...
2392
  	raw_spin_lock(&ctx->lock);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2393
  	if (ctx->is_active) {
542e72fc9   Peter Zijlstra   perf: Fix reading...
2394
  		update_context_time(ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2395
2396
  		update_cgrp_time_from_event(event);
  	}
cdd6c482c   Ingo Molnar   perf: Do the big ...
2397
  	update_event_times(event);
542e72fc9   Peter Zijlstra   perf: Fix reading...
2398
2399
  	if (event->state == PERF_EVENT_STATE_ACTIVE)
  		event->pmu->read(event);
e625cce1b   Thomas Gleixner   perf_event: Conve...
2400
  	raw_spin_unlock(&ctx->lock);
0793a61d4   Thomas Gleixner   performance count...
2401
  }
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
2402
2403
  static inline u64 perf_event_count(struct perf_event *event)
  {
e78505958   Peter Zijlstra   perf: Convert per...
2404
  	return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
2405
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
2406
  static u64 perf_event_read(struct perf_event *event)
0793a61d4   Thomas Gleixner   performance count...
2407
2408
  {
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2409
2410
  	 * If event is enabled and currently active on a CPU, update the
  	 * value in the event structure:
0793a61d4   Thomas Gleixner   performance count...
2411
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
2412
2413
2414
2415
  	if (event->state == PERF_EVENT_STATE_ACTIVE) {
  		smp_call_function_single(event->oncpu,
  					 __perf_event_read, event, 1);
  	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9f   Peter Zijlstra   perf: Fix time lo...
2416
2417
  		struct perf_event_context *ctx = event->ctx;
  		unsigned long flags;
e625cce1b   Thomas Gleixner   perf_event: Conve...
2418
  		raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9a   Stephane Eranian   perf_events: Fix ...
2419
2420
2421
2422
2423
  		/*
  		 * may read while context is not active
  		 * (e.g., thread is blocked), in that case
  		 * we cannot update context time
  		 */
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2424
  		if (ctx->is_active) {
c530ccd9a   Stephane Eranian   perf_events: Fix ...
2425
  			update_context_time(ctx);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2426
2427
  			update_cgrp_time_from_event(event);
  		}
cdd6c482c   Ingo Molnar   perf: Do the big ...
2428
  		update_event_times(event);
e625cce1b   Thomas Gleixner   perf_event: Conve...
2429
  		raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d4   Thomas Gleixner   performance count...
2430
  	}
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
2431
  	return perf_event_count(event);
0793a61d4   Thomas Gleixner   performance count...
2432
  }
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2433
  /*
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2434
   * Callchain support
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2435
   */
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2436
2437
2438
2439
2440
  
  struct callchain_cpus_entries {
  	struct rcu_head			rcu_head;
  	struct perf_callchain_entry	*cpu_entries[0];
  };
7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
2441
  static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2442
2443
2444
2445
2446
2447
2448
  static atomic_t nr_callchain_events;
  static DEFINE_MUTEX(callchain_mutex);
  struct callchain_cpus_entries *callchain_cpus_entries;
  
  
  __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
  				  struct pt_regs *regs)
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2449
  {
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2450
  }
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2451
2452
  __weak void perf_callchain_user(struct perf_callchain_entry *entry,
  				struct pt_regs *regs)
0793a61d4   Thomas Gleixner   performance count...
2453
  {
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2454
  }
0793a61d4   Thomas Gleixner   performance count...
2455

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2456
2457
2458
2459
  static void release_callchain_buffers_rcu(struct rcu_head *head)
  {
  	struct callchain_cpus_entries *entries;
  	int cpu;
0793a61d4   Thomas Gleixner   performance count...
2460

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2461
  	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
0793a61d4   Thomas Gleixner   performance count...
2462

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2463
2464
  	for_each_possible_cpu(cpu)
  		kfree(entries->cpu_entries[cpu]);
0793a61d4   Thomas Gleixner   performance count...
2465

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2466
2467
  	kfree(entries);
  }
0793a61d4   Thomas Gleixner   performance count...
2468

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2469
2470
2471
  static void release_callchain_buffers(void)
  {
  	struct callchain_cpus_entries *entries;
0793a61d4   Thomas Gleixner   performance count...
2472

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2473
2474
2475
2476
  	entries = callchain_cpus_entries;
  	rcu_assign_pointer(callchain_cpus_entries, NULL);
  	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
  }
0793a61d4   Thomas Gleixner   performance count...
2477

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2478
2479
2480
2481
2482
  static int alloc_callchain_buffers(void)
  {
  	int cpu;
  	int size;
  	struct callchain_cpus_entries *entries;
0793a61d4   Thomas Gleixner   performance count...
2483

c93f76690   Paul Mackerras   perf_counter: Fix...
2484
  	/*
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2485
2486
2487
  	 * We can't use the percpu allocation API for data that can be
  	 * accessed from NMI. Use a temporary manual per cpu allocation
  	 * until that gets sorted out.
c93f76690   Paul Mackerras   perf_counter: Fix...
2488
  	 */
88d4f0db7   Eric Dumazet   perf: Fix alloc_c...
2489
  	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
c93f76690   Paul Mackerras   perf_counter: Fix...
2490

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2491
2492
2493
  	entries = kzalloc(size, GFP_KERNEL);
  	if (!entries)
  		return -ENOMEM;
c93f76690   Paul Mackerras   perf_counter: Fix...
2494

7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
2495
  	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
0793a61d4   Thomas Gleixner   performance count...
2496

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2497
2498
2499
2500
2501
  	for_each_possible_cpu(cpu) {
  		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
  							 cpu_to_node(cpu));
  		if (!entries->cpu_entries[cpu])
  			goto fail;
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2502
  	}
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2503
  	rcu_assign_pointer(callchain_cpus_entries, entries);
0793a61d4   Thomas Gleixner   performance count...
2504

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
  	return 0;
  
  fail:
  	for_each_possible_cpu(cpu)
  		kfree(entries->cpu_entries[cpu]);
  	kfree(entries);
  
  	return -ENOMEM;
  }
  
  static int get_callchain_buffers(void)
  {
  	int err = 0;
  	int count;
  
  	mutex_lock(&callchain_mutex);
  
  	count = atomic_inc_return(&nr_callchain_events);
  	if (WARN_ON_ONCE(count < 1)) {
  		err = -EINVAL;
  		goto exit;
  	}
  
  	if (count > 1) {
  		/* If the allocation failed, give up */
  		if (!callchain_cpus_entries)
  			err = -ENOMEM;
  		goto exit;
  	}
  
  	err = alloc_callchain_buffers();
  	if (err)
  		release_callchain_buffers();
  exit:
  	mutex_unlock(&callchain_mutex);
  
  	return err;
  }
  
  static void put_callchain_buffers(void)
  {
  	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
  		release_callchain_buffers();
  		mutex_unlock(&callchain_mutex);
  	}
  }
  
  static int get_recursion_context(int *recursion)
  {
  	int rctx;
  
  	if (in_nmi())
  		rctx = 3;
  	else if (in_irq())
  		rctx = 2;
  	else if (in_softirq())
  		rctx = 1;
  	else
  		rctx = 0;
  
  	if (recursion[rctx])
  		return -1;
  
  	recursion[rctx]++;
  	barrier();
  
  	return rctx;
  }
  
  static inline void put_recursion_context(int *recursion, int rctx)
  {
  	barrier();
  	recursion[rctx]--;
  }
  
  static struct perf_callchain_entry *get_callchain_entry(int *rctx)
  {
  	int cpu;
  	struct callchain_cpus_entries *entries;
  
  	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
  	if (*rctx == -1)
  		return NULL;
  
  	entries = rcu_dereference(callchain_cpus_entries);
  	if (!entries)
  		return NULL;
  
  	cpu = smp_processor_id();
  
  	return &entries->cpu_entries[cpu][*rctx];
  }
  
  static void
  put_callchain_entry(int rctx)
  {
  	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
  }
  
  static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  {
  	int rctx;
  	struct perf_callchain_entry *entry;
  
  
  	entry = get_callchain_entry(&rctx);
  	if (rctx == -1)
  		return NULL;
  
  	if (!entry)
  		goto exit_put;
  
  	entry->nr = 0;
  
  	if (!user_mode(regs)) {
  		perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
  		perf_callchain_kernel(entry, regs);
  		if (current->mm)
  			regs = task_pt_regs(current);
  		else
  			regs = NULL;
  	}
  
  	if (regs) {
  		perf_callchain_store(entry, PERF_CONTEXT_USER);
  		perf_callchain_user(entry, regs);
  	}
  
  exit_put:
  	put_callchain_entry(rctx);
  
  	return entry;
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2640
   * Initialize the perf_event context in a task_struct:
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2641
   */
eb1844798   Peter Zijlstra   perf: Clean up pe...
2642
  static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2643
  {
e625cce1b   Thomas Gleixner   perf_event: Conve...
2644
  	raw_spin_lock_init(&ctx->lock);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2645
  	mutex_init(&ctx->mutex);
889ff0150   Frederic Weisbecker   perf/core: Split ...
2646
2647
  	INIT_LIST_HEAD(&ctx->pinned_groups);
  	INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2648
2649
  	INIT_LIST_HEAD(&ctx->event_list);
  	atomic_set(&ctx->refcount, 1);
eb1844798   Peter Zijlstra   perf: Clean up pe...
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
  }
  
  static struct perf_event_context *
  alloc_perf_context(struct pmu *pmu, struct task_struct *task)
  {
  	struct perf_event_context *ctx;
  
  	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
  	if (!ctx)
  		return NULL;
  
  	__perf_event_init_context(ctx);
  	if (task) {
  		ctx->task = task;
  		get_task_struct(task);
0793a61d4   Thomas Gleixner   performance count...
2665
  	}
eb1844798   Peter Zijlstra   perf: Clean up pe...
2666
2667
2668
  	ctx->pmu = pmu;
  
  	return ctx;
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2669
  }
2ebd4ffb6   Matt Helsley   perf events: Spli...
2670
2671
2672
2673
2674
  static struct task_struct *
  find_lively_task_by_vpid(pid_t vpid)
  {
  	struct task_struct *task;
  	int err;
0793a61d4   Thomas Gleixner   performance count...
2675
2676
  
  	rcu_read_lock();
2ebd4ffb6   Matt Helsley   perf events: Spli...
2677
  	if (!vpid)
0793a61d4   Thomas Gleixner   performance count...
2678
2679
  		task = current;
  	else
2ebd4ffb6   Matt Helsley   perf events: Spli...
2680
  		task = find_task_by_vpid(vpid);
0793a61d4   Thomas Gleixner   performance count...
2681
2682
2683
2684
2685
2686
  	if (task)
  		get_task_struct(task);
  	rcu_read_unlock();
  
  	if (!task)
  		return ERR_PTR(-ESRCH);
0793a61d4   Thomas Gleixner   performance count...
2687
  	/* Reuse ptrace permission checks for now. */
c93f76690   Paul Mackerras   perf_counter: Fix...
2688
2689
2690
  	err = -EACCES;
  	if (!ptrace_may_access(task, PTRACE_MODE_READ))
  		goto errout;
2ebd4ffb6   Matt Helsley   perf events: Spli...
2691
2692
2693
2694
2695
2696
  	return task;
  errout:
  	put_task_struct(task);
  	return ERR_PTR(err);
  
  }
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2697
2698
2699
  /*
   * Returns a matching context with refcount and pincount.
   */
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
2700
  static struct perf_event_context *
38a81da22   Matt Helsley   perf events: Clea...
2701
  find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
0793a61d4   Thomas Gleixner   performance count...
2702
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2703
  	struct perf_event_context *ctx;
22a4f650d   Ingo Molnar   perf_counter: Tid...
2704
  	struct perf_cpu_context *cpuctx;
25346b93c   Paul Mackerras   perf_counter: Pro...
2705
  	unsigned long flags;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2706
  	int ctxn, err;
0793a61d4   Thomas Gleixner   performance count...
2707

22a4ec729   Oleg Nesterov   perf: Find_get_co...
2708
  	if (!task) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2709
  		/* Must be root to operate on a CPU event: */
0764771da   Peter Zijlstra   perf_counter: Mor...
2710
  		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d4   Thomas Gleixner   performance count...
2711
  			return ERR_PTR(-EACCES);
0793a61d4   Thomas Gleixner   performance count...
2712
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2713
  		 * We could be clever and allow to attach a event to an
0793a61d4   Thomas Gleixner   performance count...
2714
2715
2716
  		 * offline CPU and activate it when the CPU comes up, but
  		 * that's for later.
  		 */
f6325e30e   Rusty Russell   cpumask: use cpu_...
2717
  		if (!cpu_online(cpu))
0793a61d4   Thomas Gleixner   performance count...
2718
  			return ERR_PTR(-ENODEV);
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
2719
  		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d4   Thomas Gleixner   performance count...
2720
  		ctx = &cpuctx->ctx;
c93f76690   Paul Mackerras   perf_counter: Fix...
2721
  		get_ctx(ctx);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2722
  		++ctx->pin_count;
0793a61d4   Thomas Gleixner   performance count...
2723

0793a61d4   Thomas Gleixner   performance count...
2724
2725
  		return ctx;
  	}
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2726
2727
2728
2729
  	err = -EINVAL;
  	ctxn = pmu->task_ctx_nr;
  	if (ctxn < 0)
  		goto errout;
9ed6060d2   Peter Zijlstra   perf: Unindent la...
2730
  retry:
8dc85d547   Peter Zijlstra   perf: Multiple ta...
2731
  	ctx = perf_lock_task_context(task, ctxn, &flags);
c93f76690   Paul Mackerras   perf_counter: Fix...
2732
  	if (ctx) {
71a851b4d   Peter Zijlstra   perf_counter: Sto...
2733
  		unclone_ctx(ctx);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2734
  		++ctx->pin_count;
e625cce1b   Thomas Gleixner   perf_event: Conve...
2735
  		raw_spin_unlock_irqrestore(&ctx->lock, flags);
9137fb28a   Peter Zijlstra   perf: Clean up 'c...
2736
  	} else {
eb1844798   Peter Zijlstra   perf: Clean up pe...
2737
  		ctx = alloc_perf_context(pmu, task);
c93f76690   Paul Mackerras   perf_counter: Fix...
2738
2739
2740
  		err = -ENOMEM;
  		if (!ctx)
  			goto errout;
eb1844798   Peter Zijlstra   perf: Clean up pe...
2741

dbe08d82c   Oleg Nesterov   perf: Fix find_ge...
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
  		err = 0;
  		mutex_lock(&task->perf_event_mutex);
  		/*
  		 * If it has already passed perf_event_exit_task().
  		 * we must see PF_EXITING, it takes this mutex too.
  		 */
  		if (task->flags & PF_EXITING)
  			err = -ESRCH;
  		else if (task->perf_event_ctxp[ctxn])
  			err = -EAGAIN;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2752
  		else {
9137fb28a   Peter Zijlstra   perf: Clean up 'c...
2753
  			get_ctx(ctx);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2754
  			++ctx->pin_count;
dbe08d82c   Oleg Nesterov   perf: Fix find_ge...
2755
  			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
2756
  		}
dbe08d82c   Oleg Nesterov   perf: Fix find_ge...
2757
2758
2759
  		mutex_unlock(&task->perf_event_mutex);
  
  		if (unlikely(err)) {
9137fb28a   Peter Zijlstra   perf: Clean up 'c...
2760
  			put_ctx(ctx);
dbe08d82c   Oleg Nesterov   perf: Fix find_ge...
2761
2762
2763
2764
  
  			if (err == -EAGAIN)
  				goto retry;
  			goto errout;
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
2765
2766
  		}
  	}
0793a61d4   Thomas Gleixner   performance count...
2767
  	return ctx;
c93f76690   Paul Mackerras   perf_counter: Fix...
2768

9ed6060d2   Peter Zijlstra   perf: Unindent la...
2769
  errout:
c93f76690   Paul Mackerras   perf_counter: Fix...
2770
  	return ERR_PTR(err);
0793a61d4   Thomas Gleixner   performance count...
2771
  }
6fb2915df   Li Zefan   tracing/profile: ...
2772
  static void perf_event_free_filter(struct perf_event *event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
2773
  static void free_event_rcu(struct rcu_head *head)
592903cdc   Peter Zijlstra   perf_counter: add...
2774
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2775
  	struct perf_event *event;
592903cdc   Peter Zijlstra   perf_counter: add...
2776

cdd6c482c   Ingo Molnar   perf: Do the big ...
2777
2778
2779
  	event = container_of(head, struct perf_event, rcu_head);
  	if (event->ns)
  		put_pid_ns(event->ns);
6fb2915df   Li Zefan   tracing/profile: ...
2780
  	perf_event_free_filter(event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
2781
  	kfree(event);
592903cdc   Peter Zijlstra   perf_counter: add...
2782
  }
76369139c   Frederic Weisbecker   perf: Split up bu...
2783
  static void ring_buffer_put(struct ring_buffer *rb);
925d519ab   Peter Zijlstra   perf_counter: uni...
2784

cdd6c482c   Ingo Molnar   perf: Do the big ...
2785
  static void free_event(struct perf_event *event)
f16009527   Peter Zijlstra   perf_counter: fix...
2786
  {
e360adbe2   Peter Zijlstra   irq_work: Add gen...
2787
  	irq_work_sync(&event->pending);
925d519ab   Peter Zijlstra   perf_counter: uni...
2788

cdd6c482c   Ingo Molnar   perf: Do the big ...
2789
  	if (!event->parent) {
82cd6def9   Peter Zijlstra   perf: Use jump_la...
2790
  		if (event->attach_state & PERF_ATTACH_TASK)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2791
  			jump_label_dec(&perf_sched_events);
3af9e8592   Eric B Munson   perf: Add non-exe...
2792
  		if (event->attr.mmap || event->attr.mmap_data)
cdd6c482c   Ingo Molnar   perf: Do the big ...
2793
2794
2795
2796
2797
  			atomic_dec(&nr_mmap_events);
  		if (event->attr.comm)
  			atomic_dec(&nr_comm_events);
  		if (event->attr.task)
  			atomic_dec(&nr_task_events);
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
2798
2799
  		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
  			put_callchain_buffers();
08309379b   Peter Zijlstra   perf: Fix cgroup ...
2800
2801
2802
2803
  		if (is_cgroup_event(event)) {
  			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
  			jump_label_dec(&perf_sched_events);
  		}
f344011cc   Peter Zijlstra   perf_counter: Opt...
2804
  	}
9ee318a78   Peter Zijlstra   perf_counter: opt...
2805

76369139c   Frederic Weisbecker   perf: Split up bu...
2806
2807
2808
  	if (event->rb) {
  		ring_buffer_put(event->rb);
  		event->rb = NULL;
a4be7c277   Peter Zijlstra   perf_counter: All...
2809
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
2810
2811
  	if (is_cgroup_event(event))
  		perf_detach_cgroup(event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
2812
2813
  	if (event->destroy)
  		event->destroy(event);
e077df4f4   Peter Zijlstra   perf_counter: hoo...
2814

0c67b4087   Peter Zijlstra   perf: Fix free_ev...
2815
2816
  	if (event->ctx)
  		put_ctx(event->ctx);
cdd6c482c   Ingo Molnar   perf: Do the big ...
2817
  	call_rcu(&event->rcu_head, free_event_rcu);
f16009527   Peter Zijlstra   perf_counter: fix...
2818
  }
a66a3052e   Peter Zijlstra   perf_events: Undo...
2819
  int perf_event_release_kernel(struct perf_event *event)
0793a61d4   Thomas Gleixner   performance count...
2820
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2821
  	struct perf_event_context *ctx = event->ctx;
0793a61d4   Thomas Gleixner   performance count...
2822

ad3a37de8   Paul Mackerras   perf_counter: Don...
2823
  	WARN_ON_ONCE(ctx->parent_ctx);
a0507c84b   Peter Zijlstra   perf: Annotate pe...
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
  	/*
  	 * There are two ways this annotation is useful:
  	 *
  	 *  1) there is a lock recursion from perf_event_exit_task
  	 *     see the comment there.
  	 *
  	 *  2) there is a lock-inversion with mmap_sem through
  	 *     perf_event_read_group(), which takes faults while
  	 *     holding ctx->mutex, however this is called after
  	 *     the last filedesc died, so there is no possibility
  	 *     to trigger the AB-BA case.
  	 */
  	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
050735b08   Peter Zijlstra   perf: Fix exit() ...
2837
  	raw_spin_lock_irq(&ctx->lock);
8a49542c0   Peter Zijlstra   perf_events: Fix ...
2838
  	perf_group_detach(event);
050735b08   Peter Zijlstra   perf: Fix exit() ...
2839
  	raw_spin_unlock_irq(&ctx->lock);
e03a9a55b   Peter Zijlstra   perf: Change clos...
2840
  	perf_remove_from_context(event);
d859e29fe   Paul Mackerras   perf_counter: Add...
2841
  	mutex_unlock(&ctx->mutex);
0793a61d4   Thomas Gleixner   performance count...
2842

cdd6c482c   Ingo Molnar   perf: Do the big ...
2843
  	free_event(event);
0793a61d4   Thomas Gleixner   performance count...
2844
2845
2846
  
  	return 0;
  }
a66a3052e   Peter Zijlstra   perf_events: Undo...
2847
  EXPORT_SYMBOL_GPL(perf_event_release_kernel);
0793a61d4   Thomas Gleixner   performance count...
2848

a66a3052e   Peter Zijlstra   perf_events: Undo...
2849
2850
2851
2852
  /*
   * Called when the last reference to the file is gone.
   */
  static int perf_release(struct inode *inode, struct file *file)
fb0459d75   Arjan van de Ven   perf/core: Provid...
2853
  {
a66a3052e   Peter Zijlstra   perf_events: Undo...
2854
  	struct perf_event *event = file->private_data;
8882135bc   Peter Zijlstra   perf: Fix owner-l...
2855
  	struct task_struct *owner;
fb0459d75   Arjan van de Ven   perf/core: Provid...
2856

a66a3052e   Peter Zijlstra   perf_events: Undo...
2857
  	file->private_data = NULL;
fb0459d75   Arjan van de Ven   perf/core: Provid...
2858

8882135bc   Peter Zijlstra   perf: Fix owner-l...
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
  	rcu_read_lock();
  	owner = ACCESS_ONCE(event->owner);
  	/*
  	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
  	 * !owner it means the list deletion is complete and we can indeed
  	 * free this event, otherwise we need to serialize on
  	 * owner->perf_event_mutex.
  	 */
  	smp_read_barrier_depends();
  	if (owner) {
  		/*
  		 * Since delayed_put_task_struct() also drops the last
  		 * task reference we can safely take a new reference
  		 * while holding the rcu_read_lock().
  		 */
  		get_task_struct(owner);
  	}
  	rcu_read_unlock();
  
  	if (owner) {
  		mutex_lock(&owner->perf_event_mutex);
  		/*
  		 * We have to re-check the event->owner field, if it is cleared
  		 * we raced with perf_event_exit_task(), acquiring the mutex
  		 * ensured they're done, and we can proceed with freeing the
  		 * event.
  		 */
  		if (event->owner)
  			list_del_init(&event->owner_entry);
  		mutex_unlock(&owner->perf_event_mutex);
  		put_task_struct(owner);
  	}
a66a3052e   Peter Zijlstra   perf_events: Undo...
2891
  	return perf_event_release_kernel(event);
fb0459d75   Arjan van de Ven   perf/core: Provid...
2892
  }
fb0459d75   Arjan van de Ven   perf/core: Provid...
2893

59ed446f7   Peter Zijlstra   perf: Fix event s...
2894
  u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c09947   Peter Zijlstra   perf_counter: Col...
2895
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2896
  	struct perf_event *child;
e53c09947   Peter Zijlstra   perf_counter: Col...
2897
  	u64 total = 0;
59ed446f7   Peter Zijlstra   perf: Fix event s...
2898
2899
  	*enabled = 0;
  	*running = 0;
6f10581ae   Peter Zijlstra   perf: Fix locking...
2900
  	mutex_lock(&event->child_mutex);
cdd6c482c   Ingo Molnar   perf: Do the big ...
2901
  	total += perf_event_read(event);
59ed446f7   Peter Zijlstra   perf: Fix event s...
2902
2903
2904
2905
2906
2907
  	*enabled += event->total_time_enabled +
  			atomic64_read(&event->child_total_time_enabled);
  	*running += event->total_time_running +
  			atomic64_read(&event->child_total_time_running);
  
  	list_for_each_entry(child, &event->child_list, child_list) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2908
  		total += perf_event_read(child);
59ed446f7   Peter Zijlstra   perf: Fix event s...
2909
2910
2911
  		*enabled += child->total_time_enabled;
  		*running += child->total_time_running;
  	}
6f10581ae   Peter Zijlstra   perf: Fix locking...
2912
  	mutex_unlock(&event->child_mutex);
e53c09947   Peter Zijlstra   perf_counter: Col...
2913
2914
2915
  
  	return total;
  }
fb0459d75   Arjan van de Ven   perf/core: Provid...
2916
  EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c09947   Peter Zijlstra   perf_counter: Col...
2917

cdd6c482c   Ingo Molnar   perf: Do the big ...
2918
  static int perf_event_read_group(struct perf_event *event,
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2919
2920
  				   u64 read_format, char __user *buf)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2921
  	struct perf_event *leader = event->group_leader, *sub;
6f10581ae   Peter Zijlstra   perf: Fix locking...
2922
2923
  	int n = 0, size = 0, ret = -EFAULT;
  	struct perf_event_context *ctx = leader->ctx;
abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2924
  	u64 values[5];
59ed446f7   Peter Zijlstra   perf: Fix event s...
2925
  	u64 count, enabled, running;
abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2926

6f10581ae   Peter Zijlstra   perf: Fix locking...
2927
  	mutex_lock(&ctx->mutex);
59ed446f7   Peter Zijlstra   perf: Fix event s...
2928
  	count = perf_event_read_value(leader, &enabled, &running);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2929
2930
  
  	values[n++] = 1 + leader->nr_siblings;
59ed446f7   Peter Zijlstra   perf: Fix event s...
2931
2932
2933
2934
  	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  		values[n++] = enabled;
  	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  		values[n++] = running;
abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2935
2936
2937
  	values[n++] = count;
  	if (read_format & PERF_FORMAT_ID)
  		values[n++] = primary_event_id(leader);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2938
2939
2940
2941
  
  	size = n * sizeof(u64);
  
  	if (copy_to_user(buf, values, size))
6f10581ae   Peter Zijlstra   perf: Fix locking...
2942
  		goto unlock;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2943

6f10581ae   Peter Zijlstra   perf: Fix locking...
2944
  	ret = size;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2945

65abc8653   Ingo Molnar   perf_counter: Ren...
2946
  	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2947
  		n = 0;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2948

59ed446f7   Peter Zijlstra   perf: Fix event s...
2949
  		values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2950
2951
2952
2953
  		if (read_format & PERF_FORMAT_ID)
  			values[n++] = primary_event_id(sub);
  
  		size = n * sizeof(u64);
184d3da8e   Stephane Eranian   perf_events: Fix ...
2954
  		if (copy_to_user(buf + ret, values, size)) {
6f10581ae   Peter Zijlstra   perf: Fix locking...
2955
2956
2957
  			ret = -EFAULT;
  			goto unlock;
  		}
abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2958
2959
  
  		ret += size;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2960
  	}
6f10581ae   Peter Zijlstra   perf: Fix locking...
2961
2962
  unlock:
  	mutex_unlock(&ctx->mutex);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2963

abf4868b8   Peter Zijlstra   perf: Fix PERF_FO...
2964
  	return ret;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2965
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
2966
  static int perf_event_read_one(struct perf_event *event,
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2967
2968
  				 u64 read_format, char __user *buf)
  {
59ed446f7   Peter Zijlstra   perf: Fix event s...
2969
  	u64 enabled, running;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2970
2971
  	u64 values[4];
  	int n = 0;
59ed446f7   Peter Zijlstra   perf: Fix event s...
2972
2973
2974
2975
2976
  	values[n++] = perf_event_read_value(event, &enabled, &running);
  	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  		values[n++] = enabled;
  	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  		values[n++] = running;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2977
  	if (read_format & PERF_FORMAT_ID)
cdd6c482c   Ingo Molnar   perf: Do the big ...
2978
  		values[n++] = primary_event_id(event);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2979
2980
2981
2982
2983
2984
  
  	if (copy_to_user(buf, values, n * sizeof(u64)))
  		return -EFAULT;
  
  	return n * sizeof(u64);
  }
0793a61d4   Thomas Gleixner   performance count...
2985
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2986
   * Read the performance event - simple non blocking version for now
0793a61d4   Thomas Gleixner   performance count...
2987
2988
   */
  static ssize_t
cdd6c482c   Ingo Molnar   perf: Do the big ...
2989
  perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
0793a61d4   Thomas Gleixner   performance count...
2990
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
2991
  	u64 read_format = event->attr.read_format;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
2992
  	int ret;
0793a61d4   Thomas Gleixner   performance count...
2993

3b6f9e5cb   Paul Mackerras   perf_counter: Add...
2994
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2995
  	 * Return end-of-file for a read on a event that is in
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
2996
2997
2998
  	 * error state (i.e. because it was pinned but it couldn't be
  	 * scheduled on to the CPU at some point).
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
2999
  	if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
3000
  		return 0;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
3001
  	if (count < event->read_size)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3002
  		return -ENOSPC;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3003
  	WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3004
  	if (read_format & PERF_FORMAT_GROUP)
cdd6c482c   Ingo Molnar   perf: Do the big ...
3005
  		ret = perf_event_read_group(event, read_format, buf);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3006
  	else
cdd6c482c   Ingo Molnar   perf: Do the big ...
3007
  		ret = perf_event_read_one(event, read_format, buf);
0793a61d4   Thomas Gleixner   performance count...
3008

3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3009
  	return ret;
0793a61d4   Thomas Gleixner   performance count...
3010
3011
3012
  }
  
  static ssize_t
0793a61d4   Thomas Gleixner   performance count...
3013
3014
  perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3015
  	struct perf_event *event = file->private_data;
0793a61d4   Thomas Gleixner   performance count...
3016

cdd6c482c   Ingo Molnar   perf: Do the big ...
3017
  	return perf_read_hw(event, buf, count);
0793a61d4   Thomas Gleixner   performance count...
3018
3019
3020
3021
  }
  
  static unsigned int perf_poll(struct file *file, poll_table *wait)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3022
  	struct perf_event *event = file->private_data;
76369139c   Frederic Weisbecker   perf: Split up bu...
3023
  	struct ring_buffer *rb;
c33a0bc4e   Peter Zijlstra   perf_counter: fix...
3024
  	unsigned int events = POLL_HUP;
c7138f37f   Peter Zijlstra   perf_counter: fix...
3025
3026
  
  	rcu_read_lock();
76369139c   Frederic Weisbecker   perf: Split up bu...
3027
3028
3029
  	rb = rcu_dereference(event->rb);
  	if (rb)
  		events = atomic_xchg(&rb->poll, 0);
c7138f37f   Peter Zijlstra   perf_counter: fix...
3030
  	rcu_read_unlock();
0793a61d4   Thomas Gleixner   performance count...
3031

cdd6c482c   Ingo Molnar   perf: Do the big ...
3032
  	poll_wait(file, &event->waitq, wait);
0793a61d4   Thomas Gleixner   performance count...
3033

0793a61d4   Thomas Gleixner   performance count...
3034
3035
  	return events;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3036
  static void perf_event_reset(struct perf_event *event)
6de6a7b95   Peter Zijlstra   perf_counter: add...
3037
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3038
  	(void)perf_event_read(event);
e78505958   Peter Zijlstra   perf: Convert per...
3039
  	local64_set(&event->count, 0);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3040
  	perf_event_update_userpage(event);
3df5edad8   Peter Zijlstra   perf_counter: rew...
3041
  }
c93f76690   Paul Mackerras   perf_counter: Fix...
3042
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
3043
3044
3045
3046
   * Holding the top-level event's child_mutex means that any
   * descendant process that has inherited this event will block
   * in sync_child_event if it goes to exit, thus satisfying the
   * task existence requirements of perf_event_enable/disable.
c93f76690   Paul Mackerras   perf_counter: Fix...
3047
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
3048
3049
  static void perf_event_for_each_child(struct perf_event *event,
  					void (*func)(struct perf_event *))
3df5edad8   Peter Zijlstra   perf_counter: rew...
3050
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3051
  	struct perf_event *child;
3df5edad8   Peter Zijlstra   perf_counter: rew...
3052

cdd6c482c   Ingo Molnar   perf: Do the big ...
3053
3054
3055
3056
  	WARN_ON_ONCE(event->ctx->parent_ctx);
  	mutex_lock(&event->child_mutex);
  	func(event);
  	list_for_each_entry(child, &event->child_list, child_list)
3df5edad8   Peter Zijlstra   perf_counter: rew...
3057
  		func(child);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3058
  	mutex_unlock(&event->child_mutex);
3df5edad8   Peter Zijlstra   perf_counter: rew...
3059
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3060
3061
  static void perf_event_for_each(struct perf_event *event,
  				  void (*func)(struct perf_event *))
3df5edad8   Peter Zijlstra   perf_counter: rew...
3062
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3063
3064
  	struct perf_event_context *ctx = event->ctx;
  	struct perf_event *sibling;
3df5edad8   Peter Zijlstra   perf_counter: rew...
3065

75f937f24   Peter Zijlstra   perf_counter: Fix...
3066
3067
  	WARN_ON_ONCE(ctx->parent_ctx);
  	mutex_lock(&ctx->mutex);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3068
  	event = event->group_leader;
75f937f24   Peter Zijlstra   perf_counter: Fix...
3069

cdd6c482c   Ingo Molnar   perf: Do the big ...
3070
3071
3072
3073
  	perf_event_for_each_child(event, func);
  	func(event);
  	list_for_each_entry(sibling, &event->sibling_list, group_entry)
  		perf_event_for_each_child(event, func);
75f937f24   Peter Zijlstra   perf_counter: Fix...
3074
  	mutex_unlock(&ctx->mutex);
6de6a7b95   Peter Zijlstra   perf_counter: add...
3075
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3076
  static int perf_event_period(struct perf_event *event, u64 __user *arg)
08247e31c   Peter Zijlstra   perf_counter: Add...
3077
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3078
  	struct perf_event_context *ctx = event->ctx;
08247e31c   Peter Zijlstra   perf_counter: Add...
3079
3080
  	int ret = 0;
  	u64 value;
6c7e550f1   Franck Bui-Huu   perf: Introduce i...
3081
  	if (!is_sampling_event(event))
08247e31c   Peter Zijlstra   perf_counter: Add...
3082
  		return -EINVAL;
ad0cf3478   John Blackwood   perf: Fix incorre...
3083
  	if (copy_from_user(&value, arg, sizeof(value)))
08247e31c   Peter Zijlstra   perf_counter: Add...
3084
3085
3086
3087
  		return -EFAULT;
  
  	if (!value)
  		return -EINVAL;
e625cce1b   Thomas Gleixner   perf_event: Conve...
3088
  	raw_spin_lock_irq(&ctx->lock);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3089
3090
  	if (event->attr.freq) {
  		if (value > sysctl_perf_event_sample_rate) {
08247e31c   Peter Zijlstra   perf_counter: Add...
3091
3092
3093
  			ret = -EINVAL;
  			goto unlock;
  		}
cdd6c482c   Ingo Molnar   perf: Do the big ...
3094
  		event->attr.sample_freq = value;
08247e31c   Peter Zijlstra   perf_counter: Add...
3095
  	} else {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3096
3097
  		event->attr.sample_period = value;
  		event->hw.sample_period = value;
08247e31c   Peter Zijlstra   perf_counter: Add...
3098
3099
  	}
  unlock:
e625cce1b   Thomas Gleixner   perf_event: Conve...
3100
  	raw_spin_unlock_irq(&ctx->lock);
08247e31c   Peter Zijlstra   perf_counter: Add...
3101
3102
3103
  
  	return ret;
  }
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
  static const struct file_operations perf_fops;
  
  static struct perf_event *perf_fget_light(int fd, int *fput_needed)
  {
  	struct file *file;
  
  	file = fget_light(fd, fput_needed);
  	if (!file)
  		return ERR_PTR(-EBADF);
  
  	if (file->f_op != &perf_fops) {
  		fput_light(file, *fput_needed);
  		*fput_needed = 0;
  		return ERR_PTR(-EBADF);
  	}
  
  	return file->private_data;
  }
  
  static int perf_event_set_output(struct perf_event *event,
  				 struct perf_event *output_event);
6fb2915df   Li Zefan   tracing/profile: ...
3125
  static int perf_event_set_filter(struct perf_event *event, void __user *arg);
a4be7c277   Peter Zijlstra   perf_counter: All...
3126

d859e29fe   Paul Mackerras   perf_counter: Add...
3127
3128
  static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3129
3130
  	struct perf_event *event = file->private_data;
  	void (*func)(struct perf_event *);
3df5edad8   Peter Zijlstra   perf_counter: rew...
3131
  	u32 flags = arg;
d859e29fe   Paul Mackerras   perf_counter: Add...
3132
3133
  
  	switch (cmd) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3134
3135
  	case PERF_EVENT_IOC_ENABLE:
  		func = perf_event_enable;
d859e29fe   Paul Mackerras   perf_counter: Add...
3136
  		break;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3137
3138
  	case PERF_EVENT_IOC_DISABLE:
  		func = perf_event_disable;
79f146415   Peter Zijlstra   perf_counter: cou...
3139
  		break;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3140
3141
  	case PERF_EVENT_IOC_RESET:
  		func = perf_event_reset;
6de6a7b95   Peter Zijlstra   perf_counter: add...
3142
  		break;
3df5edad8   Peter Zijlstra   perf_counter: rew...
3143

cdd6c482c   Ingo Molnar   perf: Do the big ...
3144
3145
  	case PERF_EVENT_IOC_REFRESH:
  		return perf_event_refresh(event, arg);
08247e31c   Peter Zijlstra   perf_counter: Add...
3146

cdd6c482c   Ingo Molnar   perf: Do the big ...
3147
3148
  	case PERF_EVENT_IOC_PERIOD:
  		return perf_event_period(event, (u64 __user *)arg);
08247e31c   Peter Zijlstra   perf_counter: Add...
3149

cdd6c482c   Ingo Molnar   perf: Do the big ...
3150
  	case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
  	{
  		struct perf_event *output_event = NULL;
  		int fput_needed = 0;
  		int ret;
  
  		if (arg != -1) {
  			output_event = perf_fget_light(arg, &fput_needed);
  			if (IS_ERR(output_event))
  				return PTR_ERR(output_event);
  		}
  
  		ret = perf_event_set_output(event, output_event);
  		if (output_event)
  			fput_light(output_event->filp, fput_needed);
  
  		return ret;
  	}
a4be7c277   Peter Zijlstra   perf_counter: All...
3168

6fb2915df   Li Zefan   tracing/profile: ...
3169
3170
  	case PERF_EVENT_IOC_SET_FILTER:
  		return perf_event_set_filter(event, (void __user *)arg);
d859e29fe   Paul Mackerras   perf_counter: Add...
3171
  	default:
3df5edad8   Peter Zijlstra   perf_counter: rew...
3172
  		return -ENOTTY;
d859e29fe   Paul Mackerras   perf_counter: Add...
3173
  	}
3df5edad8   Peter Zijlstra   perf_counter: rew...
3174
3175
  
  	if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482c   Ingo Molnar   perf: Do the big ...
3176
  		perf_event_for_each(event, func);
3df5edad8   Peter Zijlstra   perf_counter: rew...
3177
  	else
cdd6c482c   Ingo Molnar   perf: Do the big ...
3178
  		perf_event_for_each_child(event, func);
3df5edad8   Peter Zijlstra   perf_counter: rew...
3179
3180
  
  	return 0;
d859e29fe   Paul Mackerras   perf_counter: Add...
3181
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3182
  int perf_event_task_enable(void)
771d7cde1   Peter Zijlstra   perf_counter: Mak...
3183
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3184
  	struct perf_event *event;
771d7cde1   Peter Zijlstra   perf_counter: Mak...
3185

cdd6c482c   Ingo Molnar   perf: Do the big ...
3186
3187
3188
3189
  	mutex_lock(&current->perf_event_mutex);
  	list_for_each_entry(event, &current->perf_event_list, owner_entry)
  		perf_event_for_each_child(event, perf_event_enable);
  	mutex_unlock(&current->perf_event_mutex);
771d7cde1   Peter Zijlstra   perf_counter: Mak...
3190
3191
3192
  
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3193
  int perf_event_task_disable(void)
771d7cde1   Peter Zijlstra   perf_counter: Mak...
3194
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3195
  	struct perf_event *event;
771d7cde1   Peter Zijlstra   perf_counter: Mak...
3196

cdd6c482c   Ingo Molnar   perf: Do the big ...
3197
3198
3199
3200
  	mutex_lock(&current->perf_event_mutex);
  	list_for_each_entry(event, &current->perf_event_list, owner_entry)
  		perf_event_for_each_child(event, perf_event_disable);
  	mutex_unlock(&current->perf_event_mutex);
771d7cde1   Peter Zijlstra   perf_counter: Mak...
3201
3202
3203
  
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3204
3205
  #ifndef PERF_EVENT_INDEX_OFFSET
  # define PERF_EVENT_INDEX_OFFSET 0
f738eb1b6   Ingo Molnar   perf_counter: Fix...
3206
  #endif
cdd6c482c   Ingo Molnar   perf: Do the big ...
3207
  static int perf_event_index(struct perf_event *event)
194002b27   Peter Zijlstra   perf_counter, x86...
3208
  {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
3209
3210
  	if (event->hw.state & PERF_HES_STOPPED)
  		return 0;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3211
  	if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b27   Peter Zijlstra   perf_counter, x86...
3212
  		return 0;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3213
  	return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
194002b27   Peter Zijlstra   perf_counter, x86...
3214
  }
c47942959   Eric B Munson   events: Move lock...
3215
  static void calc_timer_values(struct perf_event *event,
7f310a5d4   Eric B Munson   perf_event: Fix b...
3216
3217
  				u64 *enabled,
  				u64 *running)
c47942959   Eric B Munson   events: Move lock...
3218
3219
3220
3221
3222
3223
3224
3225
  {
  	u64 now, ctx_time;
  
  	now = perf_clock();
  	ctx_time = event->shadow_ctx_time + now;
  	*enabled = ctx_time - event->tstamp_enabled;
  	*running = ctx_time - event->tstamp_running;
  }
38ff667b3   Peter Zijlstra   perf_counter: fix...
3226
3227
3228
3229
3230
  /*
   * Callers need to ensure there can be no nesting of this function, otherwise
   * the seqlock logic goes bad. We can not serialize this because the arch
   * code calls this from NMI context.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
3231
  void perf_event_update_userpage(struct perf_event *event)
37d818283   Paul Mackerras   perf_counter: add...
3232
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3233
  	struct perf_event_mmap_page *userpg;
76369139c   Frederic Weisbecker   perf: Split up bu...
3234
  	struct ring_buffer *rb;
0d6412085   Eric B Munson   events: Ensure th...
3235
  	u64 enabled, running;
38ff667b3   Peter Zijlstra   perf_counter: fix...
3236
3237
  
  	rcu_read_lock();
0d6412085   Eric B Munson   events: Ensure th...
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
  	/*
  	 * compute total_time_enabled, total_time_running
  	 * based on snapshot values taken when the event
  	 * was last scheduled in.
  	 *
  	 * we cannot simply called update_context_time()
  	 * because of locking issue as we can be called in
  	 * NMI context
  	 */
  	calc_timer_values(event, &enabled, &running);
76369139c   Frederic Weisbecker   perf: Split up bu...
3248
3249
  	rb = rcu_dereference(event->rb);
  	if (!rb)
38ff667b3   Peter Zijlstra   perf_counter: fix...
3250
  		goto unlock;
76369139c   Frederic Weisbecker   perf: Split up bu...
3251
  	userpg = rb->user_page;
37d818283   Paul Mackerras   perf_counter: add...
3252

7b732a750   Peter Zijlstra   perf_counter: new...
3253
3254
3255
3256
3257
  	/*
  	 * Disable preemption so as to not let the corresponding user-space
  	 * spin too long if we get preempted.
  	 */
  	preempt_disable();
37d818283   Paul Mackerras   perf_counter: add...
3258
  	++userpg->lock;
92f22a386   Peter Zijlstra   perf_counter: upd...
3259
  	barrier();
cdd6c482c   Ingo Molnar   perf: Do the big ...
3260
  	userpg->index = perf_event_index(event);
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
3261
  	userpg->offset = perf_event_count(event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3262
  	if (event->state == PERF_EVENT_STATE_ACTIVE)
e78505958   Peter Zijlstra   perf: Convert per...
3263
  		userpg->offset -= local64_read(&event->hw.prev_count);
7b732a750   Peter Zijlstra   perf_counter: new...
3264

0d6412085   Eric B Munson   events: Ensure th...
3265
  	userpg->time_enabled = enabled +
cdd6c482c   Ingo Molnar   perf: Do the big ...
3266
  			atomic64_read(&event->child_total_time_enabled);
7f8b4e4e0   Peter Zijlstra   perf_counter: Add...
3267

0d6412085   Eric B Munson   events: Ensure th...
3268
  	userpg->time_running = running +
cdd6c482c   Ingo Molnar   perf: Do the big ...
3269
  			atomic64_read(&event->child_total_time_running);
7f8b4e4e0   Peter Zijlstra   perf_counter: Add...
3270

92f22a386   Peter Zijlstra   perf_counter: upd...
3271
  	barrier();
37d818283   Paul Mackerras   perf_counter: add...
3272
  	++userpg->lock;
7b732a750   Peter Zijlstra   perf_counter: new...
3273
  	preempt_enable();
38ff667b3   Peter Zijlstra   perf_counter: fix...
3274
  unlock:
7b732a750   Peter Zijlstra   perf_counter: new...
3275
  	rcu_read_unlock();
37d818283   Paul Mackerras   perf_counter: add...
3276
  }
906010b21   Peter Zijlstra   perf_event: Provi...
3277
3278
3279
  static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
  	struct perf_event *event = vma->vm_file->private_data;
76369139c   Frederic Weisbecker   perf: Split up bu...
3280
  	struct ring_buffer *rb;
906010b21   Peter Zijlstra   perf_event: Provi...
3281
3282
3283
3284
3285
3286
3287
3288
3289
  	int ret = VM_FAULT_SIGBUS;
  
  	if (vmf->flags & FAULT_FLAG_MKWRITE) {
  		if (vmf->pgoff == 0)
  			ret = 0;
  		return ret;
  	}
  
  	rcu_read_lock();
76369139c   Frederic Weisbecker   perf: Split up bu...
3290
3291
  	rb = rcu_dereference(event->rb);
  	if (!rb)
906010b21   Peter Zijlstra   perf_event: Provi...
3292
3293
3294
3295
  		goto unlock;
  
  	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
  		goto unlock;
76369139c   Frederic Weisbecker   perf: Split up bu...
3296
  	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b21   Peter Zijlstra   perf_event: Provi...
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
  	if (!vmf->page)
  		goto unlock;
  
  	get_page(vmf->page);
  	vmf->page->mapping = vma->vm_file->f_mapping;
  	vmf->page->index   = vmf->pgoff;
  
  	ret = 0;
  unlock:
  	rcu_read_unlock();
  
  	return ret;
  }
76369139c   Frederic Weisbecker   perf: Split up bu...
3310
  static void rb_free_rcu(struct rcu_head *rcu_head)
906010b21   Peter Zijlstra   perf_event: Provi...
3311
  {
76369139c   Frederic Weisbecker   perf: Split up bu...
3312
  	struct ring_buffer *rb;
906010b21   Peter Zijlstra   perf_event: Provi...
3313

76369139c   Frederic Weisbecker   perf: Split up bu...
3314
3315
  	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
  	rb_free(rb);
7b732a750   Peter Zijlstra   perf_counter: new...
3316
  }
76369139c   Frederic Weisbecker   perf: Split up bu...
3317
  static struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a750   Peter Zijlstra   perf_counter: new...
3318
  {
76369139c   Frederic Weisbecker   perf: Split up bu...
3319
  	struct ring_buffer *rb;
7b732a750   Peter Zijlstra   perf_counter: new...
3320

ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3321
  	rcu_read_lock();
76369139c   Frederic Weisbecker   perf: Split up bu...
3322
3323
3324
3325
  	rb = rcu_dereference(event->rb);
  	if (rb) {
  		if (!atomic_inc_not_zero(&rb->refcount))
  			rb = NULL;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3326
3327
  	}
  	rcu_read_unlock();
76369139c   Frederic Weisbecker   perf: Split up bu...
3328
  	return rb;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3329
  }
76369139c   Frederic Weisbecker   perf: Split up bu...
3330
  static void ring_buffer_put(struct ring_buffer *rb)
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3331
  {
76369139c   Frederic Weisbecker   perf: Split up bu...
3332
  	if (!atomic_dec_and_test(&rb->refcount))
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3333
  		return;
7b732a750   Peter Zijlstra   perf_counter: new...
3334

76369139c   Frederic Weisbecker   perf: Split up bu...
3335
  	call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a750   Peter Zijlstra   perf_counter: new...
3336
3337
3338
3339
  }
  
  static void perf_mmap_open(struct vm_area_struct *vma)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3340
  	struct perf_event *event = vma->vm_file->private_data;
7b732a750   Peter Zijlstra   perf_counter: new...
3341

cdd6c482c   Ingo Molnar   perf: Do the big ...
3342
  	atomic_inc(&event->mmap_count);
7b732a750   Peter Zijlstra   perf_counter: new...
3343
3344
3345
3346
  }
  
  static void perf_mmap_close(struct vm_area_struct *vma)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3347
  	struct perf_event *event = vma->vm_file->private_data;
7b732a750   Peter Zijlstra   perf_counter: new...
3348

cdd6c482c   Ingo Molnar   perf: Do the big ...
3349
  	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
76369139c   Frederic Weisbecker   perf: Split up bu...
3350
  		unsigned long size = perf_data_size(event->rb);
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3351
  		struct user_struct *user = event->mmap_user;
76369139c   Frederic Weisbecker   perf: Split up bu...
3352
  		struct ring_buffer *rb = event->rb;
789f90fcf   Peter Zijlstra   perf_counter: per...
3353

906010b21   Peter Zijlstra   perf_event: Provi...
3354
  		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3355
  		vma->vm_mm->locked_vm -= event->mmap_locked;
76369139c   Frederic Weisbecker   perf: Split up bu...
3356
  		rcu_assign_pointer(event->rb, NULL);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3357
  		mutex_unlock(&event->mmap_mutex);
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3358

76369139c   Frederic Weisbecker   perf: Split up bu...
3359
  		ring_buffer_put(rb);
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3360
  		free_uid(user);
7b732a750   Peter Zijlstra   perf_counter: new...
3361
  	}
37d818283   Paul Mackerras   perf_counter: add...
3362
  }
f0f37e2f7   Alexey Dobriyan   const: mark struc...
3363
  static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea81   Peter Zijlstra   perf_counter: Add...
3364
3365
3366
3367
  	.open		= perf_mmap_open,
  	.close		= perf_mmap_close,
  	.fault		= perf_mmap_fault,
  	.page_mkwrite	= perf_mmap_fault,
37d818283   Paul Mackerras   perf_counter: add...
3368
3369
3370
3371
  };
  
  static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3372
  	struct perf_event *event = file->private_data;
22a4f650d   Ingo Molnar   perf_counter: Tid...
3373
  	unsigned long user_locked, user_lock_limit;
789f90fcf   Peter Zijlstra   perf_counter: per...
3374
  	struct user_struct *user = current_user();
22a4f650d   Ingo Molnar   perf_counter: Tid...
3375
  	unsigned long locked, lock_limit;
76369139c   Frederic Weisbecker   perf: Split up bu...
3376
  	struct ring_buffer *rb;
7b732a750   Peter Zijlstra   perf_counter: new...
3377
3378
  	unsigned long vma_size;
  	unsigned long nr_pages;
789f90fcf   Peter Zijlstra   perf_counter: per...
3379
  	long user_extra, extra;
d57e34fdd   Peter Zijlstra   perf: Simplify th...
3380
  	int ret = 0, flags = 0;
37d818283   Paul Mackerras   perf_counter: add...
3381

c7920614c   Peter Zijlstra   perf: Disallow mm...
3382
3383
3384
  	/*
  	 * Don't allow mmap() of inherited per-task counters. This would
  	 * create a performance issue due to all children writing to the
76369139c   Frederic Weisbecker   perf: Split up bu...
3385
  	 * same rb.
c7920614c   Peter Zijlstra   perf: Disallow mm...
3386
3387
3388
  	 */
  	if (event->cpu == -1 && event->attr.inherit)
  		return -EINVAL;
43a21ea81   Peter Zijlstra   perf_counter: Add...
3389
  	if (!(vma->vm_flags & VM_SHARED))
37d818283   Paul Mackerras   perf_counter: add...
3390
  		return -EINVAL;
7b732a750   Peter Zijlstra   perf_counter: new...
3391
3392
3393
  
  	vma_size = vma->vm_end - vma->vm_start;
  	nr_pages = (vma_size / PAGE_SIZE) - 1;
7730d8655   Peter Zijlstra   perf_counter: all...
3394
  	/*
76369139c   Frederic Weisbecker   perf: Split up bu...
3395
  	 * If we have rb pages ensure they're a power-of-two number, so we
7730d8655   Peter Zijlstra   perf_counter: all...
3396
3397
3398
  	 * can do bitmasks instead of modulo.
  	 */
  	if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d818283   Paul Mackerras   perf_counter: add...
3399
  		return -EINVAL;
7b732a750   Peter Zijlstra   perf_counter: new...
3400
  	if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d818283   Paul Mackerras   perf_counter: add...
3401
  		return -EINVAL;
7b732a750   Peter Zijlstra   perf_counter: new...
3402
3403
  	if (vma->vm_pgoff != 0)
  		return -EINVAL;
37d818283   Paul Mackerras   perf_counter: add...
3404

cdd6c482c   Ingo Molnar   perf: Do the big ...
3405
3406
  	WARN_ON_ONCE(event->ctx->parent_ctx);
  	mutex_lock(&event->mmap_mutex);
76369139c   Frederic Weisbecker   perf: Split up bu...
3407
3408
3409
  	if (event->rb) {
  		if (event->rb->nr_pages == nr_pages)
  			atomic_inc(&event->rb->refcount);
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3410
  		else
ebb3c4c4c   Peter Zijlstra   perf_counter: fix...
3411
3412
3413
  			ret = -EINVAL;
  		goto unlock;
  	}
789f90fcf   Peter Zijlstra   perf_counter: per...
3414
  	user_extra = nr_pages + 1;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3415
  	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f8   Ingo Molnar   perf_counter: Inc...
3416
3417
3418
3419
3420
  
  	/*
  	 * Increase the limit linearly with more CPUs:
  	 */
  	user_lock_limit *= num_online_cpus();
789f90fcf   Peter Zijlstra   perf_counter: per...
3421
  	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78b   Peter Zijlstra   perf_counter: pro...
3422

789f90fcf   Peter Zijlstra   perf_counter: per...
3423
3424
3425
  	extra = 0;
  	if (user_locked > user_lock_limit)
  		extra = user_locked - user_lock_limit;
7b732a750   Peter Zijlstra   perf_counter: new...
3426

78d7d407b   Jiri Slaby   kernel core: use ...
3427
  	lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a750   Peter Zijlstra   perf_counter: new...
3428
  	lock_limit >>= PAGE_SHIFT;
789f90fcf   Peter Zijlstra   perf_counter: per...
3429
  	locked = vma->vm_mm->locked_vm + extra;
7b732a750   Peter Zijlstra   perf_counter: new...
3430

459ec28ab   Ingo Molnar   perf_counter: All...
3431
3432
  	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
  		!capable(CAP_IPC_LOCK)) {
ebb3c4c4c   Peter Zijlstra   perf_counter: fix...
3433
3434
3435
  		ret = -EPERM;
  		goto unlock;
  	}
7b732a750   Peter Zijlstra   perf_counter: new...
3436

76369139c   Frederic Weisbecker   perf: Split up bu...
3437
  	WARN_ON(event->rb);
906010b21   Peter Zijlstra   perf_event: Provi...
3438

d57e34fdd   Peter Zijlstra   perf: Simplify th...
3439
  	if (vma->vm_flags & VM_WRITE)
76369139c   Frederic Weisbecker   perf: Split up bu...
3440
  		flags |= RING_BUFFER_WRITABLE;
d57e34fdd   Peter Zijlstra   perf: Simplify th...
3441

4ec8363df   Vince Weaver   perf_events: Fix ...
3442
3443
3444
  	rb = rb_alloc(nr_pages, 
  		event->attr.watermark ? event->attr.wakeup_watermark : 0,
  		event->cpu, flags);
76369139c   Frederic Weisbecker   perf: Split up bu...
3445
  	if (!rb) {
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3446
  		ret = -ENOMEM;
ebb3c4c4c   Peter Zijlstra   perf_counter: fix...
3447
  		goto unlock;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3448
  	}
76369139c   Frederic Weisbecker   perf: Split up bu...
3449
  	rcu_assign_pointer(event->rb, rb);
43a21ea81   Peter Zijlstra   perf_counter: Add...
3450

ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3451
3452
3453
3454
  	atomic_long_add(user_extra, &user->locked_vm);
  	event->mmap_locked = extra;
  	event->mmap_user = get_current_user();
  	vma->vm_mm->locked_vm += event->mmap_locked;
ebb3c4c4c   Peter Zijlstra   perf_counter: fix...
3455
  unlock:
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
3456
3457
  	if (!ret)
  		atomic_inc(&event->mmap_count);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3458
  	mutex_unlock(&event->mmap_mutex);
37d818283   Paul Mackerras   perf_counter: add...
3459

37d818283   Paul Mackerras   perf_counter: add...
3460
3461
  	vma->vm_flags |= VM_RESERVED;
  	vma->vm_ops = &perf_mmap_vmops;
7b732a750   Peter Zijlstra   perf_counter: new...
3462
3463
  
  	return ret;
37d818283   Paul Mackerras   perf_counter: add...
3464
  }
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
3465
3466
  static int perf_fasync(int fd, struct file *filp, int on)
  {
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
3467
  	struct inode *inode = filp->f_path.dentry->d_inode;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3468
  	struct perf_event *event = filp->private_data;
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
3469
3470
3471
  	int retval;
  
  	mutex_lock(&inode->i_mutex);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3472
  	retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
3473
3474
3475
3476
3477
3478
3479
  	mutex_unlock(&inode->i_mutex);
  
  	if (retval < 0)
  		return retval;
  
  	return 0;
  }
0793a61d4   Thomas Gleixner   performance count...
3480
  static const struct file_operations perf_fops = {
3326c1cee   Arnd Bergmann   perf_event: Make ...
3481
  	.llseek			= no_llseek,
0793a61d4   Thomas Gleixner   performance count...
3482
3483
3484
  	.release		= perf_release,
  	.read			= perf_read,
  	.poll			= perf_poll,
d859e29fe   Paul Mackerras   perf_counter: Add...
3485
3486
  	.unlocked_ioctl		= perf_ioctl,
  	.compat_ioctl		= perf_ioctl,
37d818283   Paul Mackerras   perf_counter: add...
3487
  	.mmap			= perf_mmap,
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
3488
  	.fasync			= perf_fasync,
0793a61d4   Thomas Gleixner   performance count...
3489
  };
15dbf27cc   Peter Zijlstra   perf_counter: sof...
3490
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
3491
   * Perf event wakeup
925d519ab   Peter Zijlstra   perf_counter: uni...
3492
3493
3494
3495
   *
   * If there's data, ensure we set the poll() state and publish everything
   * to user-space before waking everybody up.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
3496
  void perf_event_wakeup(struct perf_event *event)
925d519ab   Peter Zijlstra   perf_counter: uni...
3497
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3498
  	wake_up_all(&event->waitq);
4c9e25428   Peter Zijlstra   perf_counter: cha...
3499

cdd6c482c   Ingo Molnar   perf: Do the big ...
3500
3501
3502
  	if (event->pending_kill) {
  		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
  		event->pending_kill = 0;
4c9e25428   Peter Zijlstra   perf_counter: cha...
3503
  	}
925d519ab   Peter Zijlstra   perf_counter: uni...
3504
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
3505
  static void perf_pending_event(struct irq_work *entry)
79f146415   Peter Zijlstra   perf_counter: cou...
3506
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3507
3508
  	struct perf_event *event = container_of(entry,
  			struct perf_event, pending);
79f146415   Peter Zijlstra   perf_counter: cou...
3509

cdd6c482c   Ingo Molnar   perf: Do the big ...
3510
3511
3512
  	if (event->pending_disable) {
  		event->pending_disable = 0;
  		__perf_event_disable(event);
79f146415   Peter Zijlstra   perf_counter: cou...
3513
  	}
cdd6c482c   Ingo Molnar   perf: Do the big ...
3514
3515
3516
  	if (event->pending_wakeup) {
  		event->pending_wakeup = 0;
  		perf_event_wakeup(event);
79f146415   Peter Zijlstra   perf_counter: cou...
3517
3518
  	}
  }
394ee0762   Peter Zijlstra   perf_counter: pro...
3519
  /*
39447b386   Zhang, Yanmin   perf: Enhance per...
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
   * We assume there is only KVM supporting the callbacks.
   * Later on, we might change it to a list if there is
   * another virtualization implementation supporting the callbacks.
   */
  struct perf_guest_info_callbacks *perf_guest_cbs;
  
  int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  {
  	perf_guest_cbs = cbs;
  	return 0;
  }
  EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
  
  int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  {
  	perf_guest_cbs = NULL;
  	return 0;
  }
  EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3539
3540
3541
  static void __perf_event_header__init_id(struct perf_event_header *header,
  					 struct perf_sample_data *data,
  					 struct perf_event *event)
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
  {
  	u64 sample_type = event->attr.sample_type;
  
  	data->type = sample_type;
  	header->size += event->id_header_size;
  
  	if (sample_type & PERF_SAMPLE_TID) {
  		/* namespace issues */
  		data->tid_entry.pid = perf_event_pid(event, current);
  		data->tid_entry.tid = perf_event_tid(event, current);
  	}
  
  	if (sample_type & PERF_SAMPLE_TIME)
  		data->time = perf_clock();
  
  	if (sample_type & PERF_SAMPLE_ID)
  		data->id = primary_event_id(event);
  
  	if (sample_type & PERF_SAMPLE_STREAM_ID)
  		data->stream_id = event->id;
  
  	if (sample_type & PERF_SAMPLE_CPU) {
  		data->cpu_entry.cpu	 = raw_smp_processor_id();
  		data->cpu_entry.reserved = 0;
  	}
  }
76369139c   Frederic Weisbecker   perf: Split up bu...
3568
3569
3570
  void perf_event_header__init_id(struct perf_event_header *header,
  				struct perf_sample_data *data,
  				struct perf_event *event)
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
  {
  	if (event->attr.sample_id_all)
  		__perf_event_header__init_id(header, data, event);
  }
  
  static void __perf_event__output_id_sample(struct perf_output_handle *handle,
  					   struct perf_sample_data *data)
  {
  	u64 sample_type = data->type;
  
  	if (sample_type & PERF_SAMPLE_TID)
  		perf_output_put(handle, data->tid_entry);
  
  	if (sample_type & PERF_SAMPLE_TIME)
  		perf_output_put(handle, data->time);
  
  	if (sample_type & PERF_SAMPLE_ID)
  		perf_output_put(handle, data->id);
  
  	if (sample_type & PERF_SAMPLE_STREAM_ID)
  		perf_output_put(handle, data->stream_id);
  
  	if (sample_type & PERF_SAMPLE_CPU)
  		perf_output_put(handle, data->cpu_entry);
  }
76369139c   Frederic Weisbecker   perf: Split up bu...
3596
3597
3598
  void perf_event__output_id_sample(struct perf_event *event,
  				  struct perf_output_handle *handle,
  				  struct perf_sample_data *sample)
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3599
3600
3601
3602
  {
  	if (event->attr.sample_id_all)
  		__perf_event__output_id_sample(handle, sample);
  }
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3603
  static void perf_output_read_one(struct perf_output_handle *handle,
eed01528a   Stephane Eranian   perf_events: Fix ...
3604
3605
  				 struct perf_event *event,
  				 u64 enabled, u64 running)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3606
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3607
  	u64 read_format = event->attr.read_format;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3608
3609
  	u64 values[4];
  	int n = 0;
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
3610
  	values[n++] = perf_event_count(event);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3611
  	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528a   Stephane Eranian   perf_events: Fix ...
3612
  		values[n++] = enabled +
cdd6c482c   Ingo Molnar   perf: Do the big ...
3613
  			atomic64_read(&event->child_total_time_enabled);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3614
3615
  	}
  	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528a   Stephane Eranian   perf_events: Fix ...
3616
  		values[n++] = running +
cdd6c482c   Ingo Molnar   perf: Do the big ...
3617
  			atomic64_read(&event->child_total_time_running);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3618
3619
  	}
  	if (read_format & PERF_FORMAT_ID)
cdd6c482c   Ingo Molnar   perf: Do the big ...
3620
  		values[n++] = primary_event_id(event);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3621

76369139c   Frederic Weisbecker   perf: Split up bu...
3622
  	__output_copy(handle, values, n * sizeof(u64));
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3623
3624
3625
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
3626
   * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3627
3628
   */
  static void perf_output_read_group(struct perf_output_handle *handle,
eed01528a   Stephane Eranian   perf_events: Fix ...
3629
3630
  			    struct perf_event *event,
  			    u64 enabled, u64 running)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3631
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3632
3633
  	struct perf_event *leader = event->group_leader, *sub;
  	u64 read_format = event->attr.read_format;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3634
3635
3636
3637
3638
3639
  	u64 values[5];
  	int n = 0;
  
  	values[n++] = 1 + leader->nr_siblings;
  
  	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528a   Stephane Eranian   perf_events: Fix ...
3640
  		values[n++] = enabled;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3641
3642
  
  	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528a   Stephane Eranian   perf_events: Fix ...
3643
  		values[n++] = running;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3644

cdd6c482c   Ingo Molnar   perf: Do the big ...
3645
  	if (leader != event)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3646
  		leader->pmu->read(leader);
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
3647
  	values[n++] = perf_event_count(leader);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3648
  	if (read_format & PERF_FORMAT_ID)
cdd6c482c   Ingo Molnar   perf: Do the big ...
3649
  		values[n++] = primary_event_id(leader);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3650

76369139c   Frederic Weisbecker   perf: Split up bu...
3651
  	__output_copy(handle, values, n * sizeof(u64));
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3652

65abc8653   Ingo Molnar   perf_counter: Ren...
3653
  	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3654
  		n = 0;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3655
  		if (sub != event)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3656
  			sub->pmu->read(sub);
b5e58793c   Peter Zijlstra   perf: Add perf_ev...
3657
  		values[n++] = perf_event_count(sub);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3658
  		if (read_format & PERF_FORMAT_ID)
cdd6c482c   Ingo Molnar   perf: Do the big ...
3659
  			values[n++] = primary_event_id(sub);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3660

76369139c   Frederic Weisbecker   perf: Split up bu...
3661
  		__output_copy(handle, values, n * sizeof(u64));
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3662
3663
  	}
  }
eed01528a   Stephane Eranian   perf_events: Fix ...
3664
3665
  #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
  				 PERF_FORMAT_TOTAL_TIME_RUNNING)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3666
  static void perf_output_read(struct perf_output_handle *handle,
cdd6c482c   Ingo Molnar   perf: Do the big ...
3667
  			     struct perf_event *event)
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3668
  {
c47942959   Eric B Munson   events: Move lock...
3669
  	u64 enabled = 0, running = 0;
eed01528a   Stephane Eranian   perf_events: Fix ...
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
  	u64 read_format = event->attr.read_format;
  
  	/*
  	 * compute total_time_enabled, total_time_running
  	 * based on snapshot values taken when the event
  	 * was last scheduled in.
  	 *
  	 * we cannot simply called update_context_time()
  	 * because of locking issue as we are called in
  	 * NMI context
  	 */
c47942959   Eric B Munson   events: Move lock...
3681
3682
  	if (read_format & PERF_FORMAT_TOTAL_TIMES)
  		calc_timer_values(event, &enabled, &running);
eed01528a   Stephane Eranian   perf_events: Fix ...
3683

cdd6c482c   Ingo Molnar   perf: Do the big ...
3684
  	if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528a   Stephane Eranian   perf_events: Fix ...
3685
  		perf_output_read_group(handle, event, enabled, running);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3686
  	else
eed01528a   Stephane Eranian   perf_events: Fix ...
3687
  		perf_output_read_one(handle, event, enabled, running);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3688
  }
5622f295b   Markus Metzger   x86, perf_counter...
3689
3690
3691
  void perf_output_sample(struct perf_output_handle *handle,
  			struct perf_event_header *header,
  			struct perf_sample_data *data,
cdd6c482c   Ingo Molnar   perf: Do the big ...
3692
  			struct perf_event *event)
5622f295b   Markus Metzger   x86, perf_counter...
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
  {
  	u64 sample_type = data->type;
  
  	perf_output_put(handle, *header);
  
  	if (sample_type & PERF_SAMPLE_IP)
  		perf_output_put(handle, data->ip);
  
  	if (sample_type & PERF_SAMPLE_TID)
  		perf_output_put(handle, data->tid_entry);
  
  	if (sample_type & PERF_SAMPLE_TIME)
  		perf_output_put(handle, data->time);
  
  	if (sample_type & PERF_SAMPLE_ADDR)
  		perf_output_put(handle, data->addr);
  
  	if (sample_type & PERF_SAMPLE_ID)
  		perf_output_put(handle, data->id);
  
  	if (sample_type & PERF_SAMPLE_STREAM_ID)
  		perf_output_put(handle, data->stream_id);
  
  	if (sample_type & PERF_SAMPLE_CPU)
  		perf_output_put(handle, data->cpu_entry);
  
  	if (sample_type & PERF_SAMPLE_PERIOD)
  		perf_output_put(handle, data->period);
  
  	if (sample_type & PERF_SAMPLE_READ)
cdd6c482c   Ingo Molnar   perf: Do the big ...
3723
  		perf_output_read(handle, event);
5622f295b   Markus Metzger   x86, perf_counter...
3724
3725
3726
3727
3728
3729
3730
3731
3732
  
  	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  		if (data->callchain) {
  			int size = 1;
  
  			if (data->callchain)
  				size += data->callchain->nr;
  
  			size *= sizeof(u64);
76369139c   Frederic Weisbecker   perf: Split up bu...
3733
  			__output_copy(handle, data->callchain, size);
5622f295b   Markus Metzger   x86, perf_counter...
3734
3735
3736
3737
3738
3739
3740
3741
3742
  		} else {
  			u64 nr = 0;
  			perf_output_put(handle, nr);
  		}
  	}
  
  	if (sample_type & PERF_SAMPLE_RAW) {
  		if (data->raw) {
  			perf_output_put(handle, data->raw->size);
76369139c   Frederic Weisbecker   perf: Split up bu...
3743
3744
  			__output_copy(handle, data->raw->data,
  					   data->raw->size);
5622f295b   Markus Metzger   x86, perf_counter...
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
  		} else {
  			struct {
  				u32	size;
  				u32	data;
  			} raw = {
  				.size = sizeof(u32),
  				.data = 0,
  			};
  			perf_output_put(handle, raw);
  		}
  	}
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
  
  	if (!event->attr.watermark) {
  		int wakeup_events = event->attr.wakeup_events;
  
  		if (wakeup_events) {
  			struct ring_buffer *rb = handle->rb;
  			int events = local_inc_return(&rb->events);
  
  			if (events >= wakeup_events) {
  				local_sub(wakeup_events, &rb->events);
  				local_inc(&rb->wakeup);
  			}
  		}
  	}
5622f295b   Markus Metzger   x86, perf_counter...
3770
3771
3772
3773
  }
  
  void perf_prepare_sample(struct perf_event_header *header,
  			 struct perf_sample_data *data,
cdd6c482c   Ingo Molnar   perf: Do the big ...
3774
  			 struct perf_event *event,
5622f295b   Markus Metzger   x86, perf_counter...
3775
  			 struct pt_regs *regs)
7b732a750   Peter Zijlstra   perf_counter: new...
3776
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3777
  	u64 sample_type = event->attr.sample_type;
7b732a750   Peter Zijlstra   perf_counter: new...
3778

cdd6c482c   Ingo Molnar   perf: Do the big ...
3779
  	header->type = PERF_RECORD_SAMPLE;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
3780
  	header->size = sizeof(*header) + event->header_size;
5622f295b   Markus Metzger   x86, perf_counter...
3781
3782
3783
  
  	header->misc = 0;
  	header->misc |= perf_misc_flags(regs);
6fab01927   Peter Zijlstra   perf_counter: pro...
3784

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3785
  	__perf_event_header__init_id(header, data, event);
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
3786

c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
3787
  	if (sample_type & PERF_SAMPLE_IP)
5622f295b   Markus Metzger   x86, perf_counter...
3788
  		data->ip = perf_instruction_pointer(regs);
b23f3325e   Peter Zijlstra   perf_counter: Ren...
3789
  	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295b   Markus Metzger   x86, perf_counter...
3790
  		int size = 1;
394ee0762   Peter Zijlstra   perf_counter: pro...
3791

5622f295b   Markus Metzger   x86, perf_counter...
3792
3793
3794
3795
3796
3797
  		data->callchain = perf_callchain(regs);
  
  		if (data->callchain)
  			size += data->callchain->nr;
  
  		header->size += size * sizeof(u64);
394ee0762   Peter Zijlstra   perf_counter: pro...
3798
  	}
3a43ce68a   Frederic Weisbecker   perf_counter: Fix...
3799
  	if (sample_type & PERF_SAMPLE_RAW) {
a044560c3   Peter Zijlstra   perf_counter: Cor...
3800
3801
3802
3803
3804
3805
3806
3807
  		int size = sizeof(u32);
  
  		if (data->raw)
  			size += data->raw->size;
  		else
  			size += sizeof(u32);
  
  		WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295b   Markus Metzger   x86, perf_counter...
3808
  		header->size += size;
7f453c24b   Peter Zijlstra   perf_counter: PER...
3809
  	}
5622f295b   Markus Metzger   x86, perf_counter...
3810
  }
7f453c24b   Peter Zijlstra   perf_counter: PER...
3811

a8b0ca17b   Peter Zijlstra   perf: Remove the ...
3812
  static void perf_event_output(struct perf_event *event,
5622f295b   Markus Metzger   x86, perf_counter...
3813
3814
3815
3816
3817
  				struct perf_sample_data *data,
  				struct pt_regs *regs)
  {
  	struct perf_output_handle handle;
  	struct perf_event_header header;
689802b2d   Peter Zijlstra   perf_counter: Add...
3818

927c7a9e9   Frederic Weisbecker   perf: Fix race in...
3819
3820
  	/* protect the callchain buffers */
  	rcu_read_lock();
cdd6c482c   Ingo Molnar   perf: Do the big ...
3821
  	perf_prepare_sample(&header, data, event, regs);
5c1481943   Peter Zijlstra   perf_counter: out...
3822

a7ac67ea0   Peter Zijlstra   perf: Remove the ...
3823
  	if (perf_output_begin(&handle, event, header.size))
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
3824
  		goto exit;
0322cd6ec   Peter Zijlstra   perf_counter: uni...
3825

cdd6c482c   Ingo Molnar   perf: Do the big ...
3826
  	perf_output_sample(&handle, &header, data, event);
f413cdb80   Frederic Weisbecker   perf_counter: Fix...
3827

8a057d849   Peter Zijlstra   perf_counter: mov...
3828
  	perf_output_end(&handle);
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
3829
3830
3831
  
  exit:
  	rcu_read_unlock();
0322cd6ec   Peter Zijlstra   perf_counter: uni...
3832
3833
3834
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
3835
   * read event_id
38b200d67   Peter Zijlstra   perf_counter: Add...
3836
3837
3838
3839
3840
3841
3842
   */
  
  struct perf_read_event {
  	struct perf_event_header	header;
  
  	u32				pid;
  	u32				tid;
38b200d67   Peter Zijlstra   perf_counter: Add...
3843
3844
3845
  };
  
  static void
cdd6c482c   Ingo Molnar   perf: Do the big ...
3846
  perf_event_read_event(struct perf_event *event,
38b200d67   Peter Zijlstra   perf_counter: Add...
3847
3848
3849
  			struct task_struct *task)
  {
  	struct perf_output_handle handle;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3850
  	struct perf_sample_data sample;
dfc65094d   Ingo Molnar   perf_counter: Ren...
3851
  	struct perf_read_event read_event = {
38b200d67   Peter Zijlstra   perf_counter: Add...
3852
  		.header = {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3853
  			.type = PERF_RECORD_READ,
38b200d67   Peter Zijlstra   perf_counter: Add...
3854
  			.misc = 0,
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
3855
  			.size = sizeof(read_event) + event->read_size,
38b200d67   Peter Zijlstra   perf_counter: Add...
3856
  		},
cdd6c482c   Ingo Molnar   perf: Do the big ...
3857
3858
  		.pid = perf_event_pid(event, task),
  		.tid = perf_event_tid(event, task),
38b200d67   Peter Zijlstra   perf_counter: Add...
3859
  	};
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3860
  	int ret;
38b200d67   Peter Zijlstra   perf_counter: Add...
3861

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3862
  	perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
3863
  	ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d67   Peter Zijlstra   perf_counter: Add...
3864
3865
  	if (ret)
  		return;
dfc65094d   Ingo Molnar   perf_counter: Ren...
3866
  	perf_output_put(&handle, read_event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
3867
  	perf_output_read(&handle, event);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3868
  	perf_event__output_id_sample(event, &handle, &sample);
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
3869

38b200d67   Peter Zijlstra   perf_counter: Add...
3870
3871
3872
3873
  	perf_output_end(&handle);
  }
  
  /*
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3874
3875
   * task tracking -- fork/exit
   *
3af9e8592   Eric B Munson   perf: Add non-exe...
3876
   * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
60313ebed   Peter Zijlstra   perf_counter: Add...
3877
   */
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3878
  struct perf_task_event {
3a80b4a35   Peter Zijlstra   perf_counter: Fix...
3879
  	struct task_struct		*task;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3880
  	struct perf_event_context	*task_ctx;
60313ebed   Peter Zijlstra   perf_counter: Add...
3881
3882
3883
3884
3885
3886
  
  	struct {
  		struct perf_event_header	header;
  
  		u32				pid;
  		u32				ppid;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3887
3888
  		u32				tid;
  		u32				ptid;
393b2ad8c   Arjan van de Ven   perf: Add a times...
3889
  		u64				time;
cdd6c482c   Ingo Molnar   perf: Do the big ...
3890
  	} event_id;
60313ebed   Peter Zijlstra   perf_counter: Add...
3891
  };
cdd6c482c   Ingo Molnar   perf: Do the big ...
3892
  static void perf_event_task_output(struct perf_event *event,
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3893
  				     struct perf_task_event *task_event)
60313ebed   Peter Zijlstra   perf_counter: Add...
3894
3895
  {
  	struct perf_output_handle handle;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3896
  	struct perf_sample_data	sample;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3897
  	struct task_struct *task = task_event->task;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3898
  	int ret, size = task_event->event_id.header.size;
8bb39f9aa   Mike Galbraith   perf: Fix 'perf s...
3899

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3900
  	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebed   Peter Zijlstra   perf_counter: Add...
3901

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3902
  	ret = perf_output_begin(&handle, event,
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
3903
  				task_event->event_id.header.size);
ef60777c9   Peter Zijlstra   perf: Optimize th...
3904
  	if (ret)
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3905
  		goto out;
60313ebed   Peter Zijlstra   perf_counter: Add...
3906

cdd6c482c   Ingo Molnar   perf: Do the big ...
3907
3908
  	task_event->event_id.pid = perf_event_pid(event, task);
  	task_event->event_id.ppid = perf_event_pid(event, current);
60313ebed   Peter Zijlstra   perf_counter: Add...
3909

cdd6c482c   Ingo Molnar   perf: Do the big ...
3910
3911
  	task_event->event_id.tid = perf_event_tid(event, task);
  	task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3912

cdd6c482c   Ingo Molnar   perf: Do the big ...
3913
  	perf_output_put(&handle, task_event->event_id);
393b2ad8c   Arjan van de Ven   perf: Add a times...
3914

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3915
  	perf_event__output_id_sample(event, &handle, &sample);
60313ebed   Peter Zijlstra   perf_counter: Add...
3916
  	perf_output_end(&handle);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
3917
3918
  out:
  	task_event->event_id.header.size = size;
60313ebed   Peter Zijlstra   perf_counter: Add...
3919
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3920
  static int perf_event_task_match(struct perf_event *event)
60313ebed   Peter Zijlstra   perf_counter: Add...
3921
  {
6f93d0a7c   Peter Zijlstra   perf_events: Fix ...
3922
  	if (event->state < PERF_EVENT_STATE_INACTIVE)
22e190851   Peter Zijlstra   perf: Honour even...
3923
  		return 0;
5632ab12e   Stephane Eranian   perf_events: Gene...
3924
  	if (!event_filter_match(event))
5d27c23df   Peter Zijlstra   perf events: Dont...
3925
  		return 0;
3af9e8592   Eric B Munson   perf: Add non-exe...
3926
3927
  	if (event->attr.comm || event->attr.mmap ||
  	    event->attr.mmap_data || event->attr.task)
60313ebed   Peter Zijlstra   perf_counter: Add...
3928
3929
3930
3931
  		return 1;
  
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3932
  static void perf_event_task_ctx(struct perf_event_context *ctx,
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3933
  				  struct perf_task_event *task_event)
60313ebed   Peter Zijlstra   perf_counter: Add...
3934
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3935
  	struct perf_event *event;
60313ebed   Peter Zijlstra   perf_counter: Add...
3936

cdd6c482c   Ingo Molnar   perf: Do the big ...
3937
3938
3939
  	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  		if (perf_event_task_match(event))
  			perf_event_task_output(event, task_event);
60313ebed   Peter Zijlstra   perf_counter: Add...
3940
  	}
60313ebed   Peter Zijlstra   perf_counter: Add...
3941
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3942
  static void perf_event_task_event(struct perf_task_event *task_event)
60313ebed   Peter Zijlstra   perf_counter: Add...
3943
3944
  {
  	struct perf_cpu_context *cpuctx;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
3945
  	struct perf_event_context *ctx;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
3946
  	struct pmu *pmu;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
3947
  	int ctxn;
60313ebed   Peter Zijlstra   perf_counter: Add...
3948

d6ff86cfb   Peter Zijlstra   perf: Optimize pe...
3949
  	rcu_read_lock();
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
3950
  	list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
3951
  		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
516769575   Peter Zijlstra   perf: Fix duplica...
3952
3953
  		if (cpuctx->active_pmu != pmu)
  			goto next;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
3954
  		perf_event_task_ctx(&cpuctx->ctx, task_event);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
3955
3956
3957
3958
3959
  
  		ctx = task_event->task_ctx;
  		if (!ctx) {
  			ctxn = pmu->task_ctx_nr;
  			if (ctxn < 0)
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
3960
  				goto next;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
3961
3962
3963
3964
  			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  		}
  		if (ctx)
  			perf_event_task_ctx(ctx, task_event);
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
3965
3966
  next:
  		put_cpu_ptr(pmu->pmu_cpu_context);
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
3967
  	}
60313ebed   Peter Zijlstra   perf_counter: Add...
3968
3969
  	rcu_read_unlock();
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3970
3971
  static void perf_event_task(struct task_struct *task,
  			      struct perf_event_context *task_ctx,
3a80b4a35   Peter Zijlstra   perf_counter: Fix...
3972
  			      int new)
60313ebed   Peter Zijlstra   perf_counter: Add...
3973
  {
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3974
  	struct perf_task_event task_event;
60313ebed   Peter Zijlstra   perf_counter: Add...
3975

cdd6c482c   Ingo Molnar   perf: Do the big ...
3976
3977
3978
  	if (!atomic_read(&nr_comm_events) &&
  	    !atomic_read(&nr_mmap_events) &&
  	    !atomic_read(&nr_task_events))
60313ebed   Peter Zijlstra   perf_counter: Add...
3979
  		return;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3980
  	task_event = (struct perf_task_event){
3a80b4a35   Peter Zijlstra   perf_counter: Fix...
3981
3982
  		.task	  = task,
  		.task_ctx = task_ctx,
cdd6c482c   Ingo Molnar   perf: Do the big ...
3983
  		.event_id    = {
60313ebed   Peter Zijlstra   perf_counter: Add...
3984
  			.header = {
cdd6c482c   Ingo Molnar   perf: Do the big ...
3985
  				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db0   Peter Zijlstra   perf_counter: Plu...
3986
  				.misc = 0,
cdd6c482c   Ingo Molnar   perf: Do the big ...
3987
  				.size = sizeof(task_event.event_id),
60313ebed   Peter Zijlstra   perf_counter: Add...
3988
  			},
573402db0   Peter Zijlstra   perf_counter: Plu...
3989
3990
  			/* .pid  */
  			/* .ppid */
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3991
3992
  			/* .tid  */
  			/* .ptid */
6f93d0a7c   Peter Zijlstra   perf_events: Fix ...
3993
  			.time = perf_clock(),
60313ebed   Peter Zijlstra   perf_counter: Add...
3994
3995
  		},
  	};
cdd6c482c   Ingo Molnar   perf: Do the big ...
3996
  	perf_event_task_event(&task_event);
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3997
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
3998
  void perf_event_fork(struct task_struct *task)
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
3999
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4000
  	perf_event_task(task, NULL, 1);
60313ebed   Peter Zijlstra   perf_counter: Add...
4001
4002
4003
  }
  
  /*
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4004
4005
4006
4007
   * comm tracking
   */
  
  struct perf_comm_event {
22a4f650d   Ingo Molnar   perf_counter: Tid...
4008
4009
  	struct task_struct	*task;
  	char			*comm;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4010
4011
4012
4013
4014
4015
4016
  	int			comm_size;
  
  	struct {
  		struct perf_event_header	header;
  
  		u32				pid;
  		u32				tid;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4017
  	} event_id;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4018
  };
cdd6c482c   Ingo Molnar   perf: Do the big ...
4019
  static void perf_event_comm_output(struct perf_event *event,
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4020
4021
4022
  				     struct perf_comm_event *comm_event)
  {
  	struct perf_output_handle handle;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4023
  	struct perf_sample_data sample;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4024
  	int size = comm_event->event_id.header.size;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4025
4026
4027
4028
  	int ret;
  
  	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
  	ret = perf_output_begin(&handle, event,
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
4029
  				comm_event->event_id.header.size);
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4030
4031
  
  	if (ret)
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4032
  		goto out;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4033

cdd6c482c   Ingo Molnar   perf: Do the big ...
4034
4035
  	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
  	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf8   Peter Zijlstra   perf_counter: Use...
4036

cdd6c482c   Ingo Molnar   perf: Do the big ...
4037
  	perf_output_put(&handle, comm_event->event_id);
76369139c   Frederic Weisbecker   perf: Split up bu...
4038
  	__output_copy(&handle, comm_event->comm,
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4039
  				   comm_event->comm_size);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4040
4041
  
  	perf_event__output_id_sample(event, &handle, &sample);
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4042
  	perf_output_end(&handle);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4043
4044
  out:
  	comm_event->event_id.header.size = size;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4045
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4046
  static int perf_event_comm_match(struct perf_event *event)
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4047
  {
6f93d0a7c   Peter Zijlstra   perf_events: Fix ...
4048
  	if (event->state < PERF_EVENT_STATE_INACTIVE)
22e190851   Peter Zijlstra   perf: Honour even...
4049
  		return 0;
5632ab12e   Stephane Eranian   perf_events: Gene...
4050
  	if (!event_filter_match(event))
5d27c23df   Peter Zijlstra   perf events: Dont...
4051
  		return 0;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4052
  	if (event->attr.comm)
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4053
4054
4055
4056
  		return 1;
  
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4057
  static void perf_event_comm_ctx(struct perf_event_context *ctx,
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4058
4059
  				  struct perf_comm_event *comm_event)
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4060
  	struct perf_event *event;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4061

cdd6c482c   Ingo Molnar   perf: Do the big ...
4062
4063
4064
  	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  		if (perf_event_comm_match(event))
  			perf_event_comm_output(event, comm_event);
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4065
  	}
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4066
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4067
  static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4068
4069
  {
  	struct perf_cpu_context *cpuctx;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4070
  	struct perf_event_context *ctx;
413ee3b48   Anton Blanchard   perf_counter: Mak...
4071
  	char comm[TASK_COMM_LEN];
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4072
  	unsigned int size;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4073
  	struct pmu *pmu;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4074
  	int ctxn;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4075

413ee3b48   Anton Blanchard   perf_counter: Mak...
4076
  	memset(comm, 0, sizeof(comm));
96b02d78a   Márton Németh   perf_event: Remov...
4077
  	strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee06   Ingo Molnar   perf_counter: fix...
4078
  	size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4079
4080
4081
  
  	comm_event->comm = comm;
  	comm_event->comm_size = size;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4082
  	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
f6595f3a9   Peter Zijlstra   perf: Optimize pe...
4083
  	rcu_read_lock();
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4084
  	list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
4085
  		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
516769575   Peter Zijlstra   perf: Fix duplica...
4086
4087
  		if (cpuctx->active_pmu != pmu)
  			goto next;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4088
  		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4089
4090
4091
  
  		ctxn = pmu->task_ctx_nr;
  		if (ctxn < 0)
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
4092
  			goto next;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4093
4094
4095
4096
  
  		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  		if (ctx)
  			perf_event_comm_ctx(ctx, comm_event);
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
4097
4098
  next:
  		put_cpu_ptr(pmu->pmu_cpu_context);
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4099
  	}
665c2142a   Peter Zijlstra   perf_counter: Cle...
4100
  	rcu_read_unlock();
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4101
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4102
  void perf_event_comm(struct task_struct *task)
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4103
  {
9ee318a78   Peter Zijlstra   perf_counter: opt...
4104
  	struct perf_comm_event comm_event;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4105
4106
  	struct perf_event_context *ctx;
  	int ctxn;
9ee318a78   Peter Zijlstra   perf_counter: opt...
4107

8dc85d547   Peter Zijlstra   perf: Multiple ta...
4108
4109
4110
4111
  	for_each_task_context_nr(ctxn) {
  		ctx = task->perf_event_ctxp[ctxn];
  		if (!ctx)
  			continue;
9ee318a78   Peter Zijlstra   perf_counter: opt...
4112

8dc85d547   Peter Zijlstra   perf: Multiple ta...
4113
4114
  		perf_event_enable_on_exec(ctx);
  	}
9ee318a78   Peter Zijlstra   perf_counter: opt...
4115

cdd6c482c   Ingo Molnar   perf: Do the big ...
4116
  	if (!atomic_read(&nr_comm_events))
9ee318a78   Peter Zijlstra   perf_counter: opt...
4117
  		return;
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
4118

9ee318a78   Peter Zijlstra   perf_counter: opt...
4119
  	comm_event = (struct perf_comm_event){
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4120
  		.task	= task,
573402db0   Peter Zijlstra   perf_counter: Plu...
4121
4122
  		/* .comm      */
  		/* .comm_size */
cdd6c482c   Ingo Molnar   perf: Do the big ...
4123
  		.event_id  = {
573402db0   Peter Zijlstra   perf_counter: Plu...
4124
  			.header = {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4125
  				.type = PERF_RECORD_COMM,
573402db0   Peter Zijlstra   perf_counter: Plu...
4126
4127
4128
4129
4130
  				.misc = 0,
  				/* .size */
  			},
  			/* .pid */
  			/* .tid */
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4131
4132
  		},
  	};
cdd6c482c   Ingo Molnar   perf: Do the big ...
4133
  	perf_event_comm_event(&comm_event);
8d1b2d936   Peter Zijlstra   perf_counter: tra...
4134
4135
4136
  }
  
  /*
0a4a93919   Peter Zijlstra   perf_counter: exe...
4137
4138
4139
4140
   * mmap tracking
   */
  
  struct perf_mmap_event {
089dd79db   Peter Zijlstra   perf_counter: Gen...
4141
4142
4143
4144
  	struct vm_area_struct	*vma;
  
  	const char		*file_name;
  	int			file_size;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4145
4146
4147
4148
4149
4150
4151
4152
4153
  
  	struct {
  		struct perf_event_header	header;
  
  		u32				pid;
  		u32				tid;
  		u64				start;
  		u64				len;
  		u64				pgoff;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4154
  	} event_id;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4155
  };
cdd6c482c   Ingo Molnar   perf: Do the big ...
4156
  static void perf_event_mmap_output(struct perf_event *event,
0a4a93919   Peter Zijlstra   perf_counter: exe...
4157
4158
4159
  				     struct perf_mmap_event *mmap_event)
  {
  	struct perf_output_handle handle;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4160
  	struct perf_sample_data sample;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4161
  	int size = mmap_event->event_id.header.size;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4162
  	int ret;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4163

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4164
4165
  	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
  	ret = perf_output_begin(&handle, event,
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
4166
  				mmap_event->event_id.header.size);
0a4a93919   Peter Zijlstra   perf_counter: exe...
4167
  	if (ret)
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4168
  		goto out;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4169

cdd6c482c   Ingo Molnar   perf: Do the big ...
4170
4171
  	mmap_event->event_id.pid = perf_event_pid(event, current);
  	mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf8   Peter Zijlstra   perf_counter: Use...
4172

cdd6c482c   Ingo Molnar   perf: Do the big ...
4173
  	perf_output_put(&handle, mmap_event->event_id);
76369139c   Frederic Weisbecker   perf: Split up bu...
4174
  	__output_copy(&handle, mmap_event->file_name,
0a4a93919   Peter Zijlstra   perf_counter: exe...
4175
  				   mmap_event->file_size);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4176
4177
  
  	perf_event__output_id_sample(event, &handle, &sample);
78d613eb1   Peter Zijlstra   perf_counter: sma...
4178
  	perf_output_end(&handle);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4179
4180
  out:
  	mmap_event->event_id.header.size = size;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4181
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4182
  static int perf_event_mmap_match(struct perf_event *event,
3af9e8592   Eric B Munson   perf: Add non-exe...
4183
4184
  				   struct perf_mmap_event *mmap_event,
  				   int executable)
0a4a93919   Peter Zijlstra   perf_counter: exe...
4185
  {
6f93d0a7c   Peter Zijlstra   perf_events: Fix ...
4186
  	if (event->state < PERF_EVENT_STATE_INACTIVE)
22e190851   Peter Zijlstra   perf: Honour even...
4187
  		return 0;
5632ab12e   Stephane Eranian   perf_events: Gene...
4188
  	if (!event_filter_match(event))
5d27c23df   Peter Zijlstra   perf events: Dont...
4189
  		return 0;
3af9e8592   Eric B Munson   perf: Add non-exe...
4190
4191
  	if ((!executable && event->attr.mmap_data) ||
  	    (executable && event->attr.mmap))
0a4a93919   Peter Zijlstra   perf_counter: exe...
4192
4193
4194
4195
  		return 1;
  
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4196
  static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3af9e8592   Eric B Munson   perf: Add non-exe...
4197
4198
  				  struct perf_mmap_event *mmap_event,
  				  int executable)
0a4a93919   Peter Zijlstra   perf_counter: exe...
4199
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4200
  	struct perf_event *event;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4201

cdd6c482c   Ingo Molnar   perf: Do the big ...
4202
  	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3af9e8592   Eric B Munson   perf: Add non-exe...
4203
  		if (perf_event_mmap_match(event, mmap_event, executable))
cdd6c482c   Ingo Molnar   perf: Do the big ...
4204
  			perf_event_mmap_output(event, mmap_event);
0a4a93919   Peter Zijlstra   perf_counter: exe...
4205
  	}
0a4a93919   Peter Zijlstra   perf_counter: exe...
4206
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4207
  static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a93919   Peter Zijlstra   perf_counter: exe...
4208
4209
  {
  	struct perf_cpu_context *cpuctx;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4210
  	struct perf_event_context *ctx;
089dd79db   Peter Zijlstra   perf_counter: Gen...
4211
4212
  	struct vm_area_struct *vma = mmap_event->vma;
  	struct file *file = vma->vm_file;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4213
4214
4215
  	unsigned int size;
  	char tmp[16];
  	char *buf = NULL;
089dd79db   Peter Zijlstra   perf_counter: Gen...
4216
  	const char *name;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4217
  	struct pmu *pmu;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4218
  	int ctxn;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4219

413ee3b48   Anton Blanchard   perf_counter: Mak...
4220
  	memset(tmp, 0, sizeof(tmp));
0a4a93919   Peter Zijlstra   perf_counter: exe...
4221
  	if (file) {
413ee3b48   Anton Blanchard   perf_counter: Mak...
4222
  		/*
76369139c   Frederic Weisbecker   perf: Split up bu...
4223
  		 * d_path works from the end of the rb backwards, so we
413ee3b48   Anton Blanchard   perf_counter: Mak...
4224
4225
4226
4227
  		 * need to add enough zero bytes after the string to handle
  		 * the 64bit alignment we do later.
  		 */
  		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
0a4a93919   Peter Zijlstra   perf_counter: exe...
4228
4229
4230
4231
  		if (!buf) {
  			name = strncpy(tmp, "//enomem", sizeof(tmp));
  			goto got_name;
  		}
d3d21c412   Peter Zijlstra   perf_counter: log...
4232
  		name = d_path(&file->f_path, buf, PATH_MAX);
0a4a93919   Peter Zijlstra   perf_counter: exe...
4233
4234
4235
4236
4237
  		if (IS_ERR(name)) {
  			name = strncpy(tmp, "//toolong", sizeof(tmp));
  			goto got_name;
  		}
  	} else {
413ee3b48   Anton Blanchard   perf_counter: Mak...
4238
4239
4240
  		if (arch_vma_name(mmap_event->vma)) {
  			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
  				       sizeof(tmp));
089dd79db   Peter Zijlstra   perf_counter: Gen...
4241
  			goto got_name;
413ee3b48   Anton Blanchard   perf_counter: Mak...
4242
  		}
089dd79db   Peter Zijlstra   perf_counter: Gen...
4243
4244
4245
4246
  
  		if (!vma->vm_mm) {
  			name = strncpy(tmp, "[vdso]", sizeof(tmp));
  			goto got_name;
3af9e8592   Eric B Munson   perf: Add non-exe...
4247
4248
4249
4250
4251
4252
4253
4254
  		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
  				vma->vm_end >= vma->vm_mm->brk) {
  			name = strncpy(tmp, "[heap]", sizeof(tmp));
  			goto got_name;
  		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
  				vma->vm_end >= vma->vm_mm->start_stack) {
  			name = strncpy(tmp, "[stack]", sizeof(tmp));
  			goto got_name;
089dd79db   Peter Zijlstra   perf_counter: Gen...
4255
  		}
0a4a93919   Peter Zijlstra   perf_counter: exe...
4256
4257
4258
4259
4260
  		name = strncpy(tmp, "//anon", sizeof(tmp));
  		goto got_name;
  	}
  
  got_name:
888fcee06   Ingo Molnar   perf_counter: fix...
4261
  	size = ALIGN(strlen(name)+1, sizeof(u64));
0a4a93919   Peter Zijlstra   perf_counter: exe...
4262
4263
4264
  
  	mmap_event->file_name = name;
  	mmap_event->file_size = size;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4265
  	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a93919   Peter Zijlstra   perf_counter: exe...
4266

f6d9dd237   Peter Zijlstra   perf: Optimize pe...
4267
  	rcu_read_lock();
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4268
  	list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
4269
  		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
516769575   Peter Zijlstra   perf: Fix duplica...
4270
4271
  		if (cpuctx->active_pmu != pmu)
  			goto next;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4272
4273
  		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
  					vma->vm_flags & VM_EXEC);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4274
4275
4276
  
  		ctxn = pmu->task_ctx_nr;
  		if (ctxn < 0)
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
4277
  			goto next;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
4278
4279
4280
4281
4282
4283
  
  		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  		if (ctx) {
  			perf_event_mmap_ctx(ctx, mmap_event,
  					vma->vm_flags & VM_EXEC);
  		}
41945f6cc   Peter Zijlstra   perf: Avoid RCU v...
4284
4285
  next:
  		put_cpu_ptr(pmu->pmu_cpu_context);
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
4286
  	}
665c2142a   Peter Zijlstra   perf_counter: Cle...
4287
  	rcu_read_unlock();
0a4a93919   Peter Zijlstra   perf_counter: exe...
4288
4289
  	kfree(buf);
  }
3af9e8592   Eric B Munson   perf: Add non-exe...
4290
  void perf_event_mmap(struct vm_area_struct *vma)
0a4a93919   Peter Zijlstra   perf_counter: exe...
4291
  {
9ee318a78   Peter Zijlstra   perf_counter: opt...
4292
  	struct perf_mmap_event mmap_event;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4293
  	if (!atomic_read(&nr_mmap_events))
9ee318a78   Peter Zijlstra   perf_counter: opt...
4294
4295
4296
  		return;
  
  	mmap_event = (struct perf_mmap_event){
089dd79db   Peter Zijlstra   perf_counter: Gen...
4297
  		.vma	= vma,
573402db0   Peter Zijlstra   perf_counter: Plu...
4298
4299
  		/* .file_name */
  		/* .file_size */
cdd6c482c   Ingo Molnar   perf: Do the big ...
4300
  		.event_id  = {
573402db0   Peter Zijlstra   perf_counter: Plu...
4301
  			.header = {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4302
  				.type = PERF_RECORD_MMAP,
39447b386   Zhang, Yanmin   perf: Enhance per...
4303
  				.misc = PERF_RECORD_MISC_USER,
573402db0   Peter Zijlstra   perf_counter: Plu...
4304
4305
4306
4307
  				/* .size */
  			},
  			/* .pid */
  			/* .tid */
089dd79db   Peter Zijlstra   perf_counter: Gen...
4308
4309
  			.start  = vma->vm_start,
  			.len    = vma->vm_end - vma->vm_start,
3a0304e90   Peter Zijlstra   perf_events: Repo...
4310
  			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a93919   Peter Zijlstra   perf_counter: exe...
4311
4312
  		},
  	};
cdd6c482c   Ingo Molnar   perf: Do the big ...
4313
  	perf_event_mmap_event(&mmap_event);
0a4a93919   Peter Zijlstra   perf_counter: exe...
4314
  }
0a4a93919   Peter Zijlstra   perf_counter: exe...
4315
  /*
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4316
4317
   * IRQ throttle logging
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
4318
  static void perf_log_throttle(struct perf_event *event, int enable)
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4319
4320
  {
  	struct perf_output_handle handle;
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4321
  	struct perf_sample_data sample;
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4322
4323
4324
4325
4326
  	int ret;
  
  	struct {
  		struct perf_event_header	header;
  		u64				time;
cca3f454a   Peter Zijlstra   perf_counter: Add...
4327
  		u64				id;
7f453c24b   Peter Zijlstra   perf_counter: PER...
4328
  		u64				stream_id;
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4329
4330
  	} throttle_event = {
  		.header = {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4331
  			.type = PERF_RECORD_THROTTLE,
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4332
4333
4334
  			.misc = 0,
  			.size = sizeof(throttle_event),
  		},
def0a9b25   Peter Zijlstra   sched_clock: Make...
4335
  		.time		= perf_clock(),
cdd6c482c   Ingo Molnar   perf: Do the big ...
4336
4337
  		.id		= primary_event_id(event),
  		.stream_id	= event->id,
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4338
  	};
966ee4d6b   Anton Blanchard   perf_counter: Fix...
4339
  	if (enable)
cdd6c482c   Ingo Molnar   perf: Do the big ...
4340
  		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6b   Anton Blanchard   perf_counter: Fix...
4341

c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4342
4343
4344
  	perf_event_header__init_id(&throttle_event.header, &sample, event);
  
  	ret = perf_output_begin(&handle, event,
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
4345
  				throttle_event.header.size);
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4346
4347
4348
4349
  	if (ret)
  		return;
  
  	perf_output_put(&handle, throttle_event);
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
4350
  	perf_event__output_id_sample(event, &handle, &sample);
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4351
4352
4353
4354
  	perf_output_end(&handle);
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
4355
   * Generic event overflow handling, sampling.
f6c7d5fe5   Peter Zijlstra   perf_counter: the...
4356
   */
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4357
  static int __perf_event_overflow(struct perf_event *event,
5622f295b   Markus Metzger   x86, perf_counter...
4358
4359
  				   int throttle, struct perf_sample_data *data,
  				   struct pt_regs *regs)
f6c7d5fe5   Peter Zijlstra   perf_counter: the...
4360
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4361
4362
  	int events = atomic_read(&event->event_limit);
  	struct hw_perf_event *hwc = &event->hw;
79f146415   Peter Zijlstra   perf_counter: cou...
4363
  	int ret = 0;
963988262   Peter Zijlstra   perf: Ignore non-...
4364
4365
4366
4367
4368
4369
  	/*
  	 * Non-sampling counters might still use the PMI to fold short
  	 * hardware counters, ignore those.
  	 */
  	if (unlikely(!is_sampling_event(event)))
  		return 0;
163ec4354   Peter Zijlstra   perf: Optimize th...
4370
4371
4372
4373
  	if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
  		if (throttle) {
  			hwc->interrupts = MAX_INTERRUPTS;
  			perf_log_throttle(event, 0);
a78ac3258   Peter Zijlstra   perf_counter: Gen...
4374
4375
  			ret = 1;
  		}
163ec4354   Peter Zijlstra   perf: Optimize th...
4376
4377
  	} else
  		hwc->interrupts++;
60db5e09c   Peter Zijlstra   perf_counter: fre...
4378

cdd6c482c   Ingo Molnar   perf: Do the big ...
4379
  	if (event->attr.freq) {
def0a9b25   Peter Zijlstra   sched_clock: Make...
4380
  		u64 now = perf_clock();
abd507139   Peter Zijlstra   perf: Reimplement...
4381
  		s64 delta = now - hwc->freq_time_stamp;
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
4382

abd507139   Peter Zijlstra   perf: Reimplement...
4383
  		hwc->freq_time_stamp = now;
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
4384

abd507139   Peter Zijlstra   perf: Reimplement...
4385
4386
  		if (delta > 0 && delta < 2*TICK_NSEC)
  			perf_adjust_period(event, delta, hwc->last_period);
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
4387
  	}
2023b3592   Peter Zijlstra   perf_counter: inh...
4388
4389
  	/*
  	 * XXX event_limit might not quite work as expected on inherited
cdd6c482c   Ingo Molnar   perf: Do the big ...
4390
  	 * events
2023b3592   Peter Zijlstra   perf_counter: inh...
4391
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
4392
4393
  	event->pending_kill = POLL_IN;
  	if (events && atomic_dec_and_test(&event->event_limit)) {
79f146415   Peter Zijlstra   perf_counter: cou...
4394
  		ret = 1;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4395
  		event->pending_kill = POLL_HUP;
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4396
4397
  		event->pending_disable = 1;
  		irq_work_queue(&event->pending);
79f146415   Peter Zijlstra   perf_counter: cou...
4398
  	}
453f19eea   Peter Zijlstra   perf: Allow for c...
4399
  	if (event->overflow_handler)
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4400
  		event->overflow_handler(event, data, regs);
453f19eea   Peter Zijlstra   perf: Allow for c...
4401
  	else
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4402
  		perf_event_output(event, data, regs);
453f19eea   Peter Zijlstra   perf: Allow for c...
4403

f506b3dc0   Peter Zijlstra   perf: Fix SIGIO h...
4404
  	if (event->fasync && event->pending_kill) {
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4405
4406
  		event->pending_wakeup = 1;
  		irq_work_queue(&event->pending);
f506b3dc0   Peter Zijlstra   perf: Fix SIGIO h...
4407
  	}
79f146415   Peter Zijlstra   perf_counter: cou...
4408
  	return ret;
f6c7d5fe5   Peter Zijlstra   perf_counter: the...
4409
  }
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4410
  int perf_event_overflow(struct perf_event *event,
5622f295b   Markus Metzger   x86, perf_counter...
4411
4412
  			  struct perf_sample_data *data,
  			  struct pt_regs *regs)
850bc73ff   Peter Zijlstra   perf_counter: Do ...
4413
  {
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4414
  	return __perf_event_overflow(event, 1, data, regs);
850bc73ff   Peter Zijlstra   perf_counter: Do ...
4415
  }
f6c7d5fe5   Peter Zijlstra   perf_counter: the...
4416
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
4417
   * Generic software event infrastructure
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4418
   */
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
  struct swevent_htable {
  	struct swevent_hlist		*swevent_hlist;
  	struct mutex			hlist_mutex;
  	int				hlist_refcount;
  
  	/* Recursion avoidance in each contexts */
  	int				recursion[PERF_NR_CONTEXTS];
  };
  
  static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4429
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
4430
4431
   * We directly increment event->count and keep a second value in
   * event->hw.period_left to count intervals. This period event
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4432
4433
4434
   * is kept in the range [-sample_period, 0] so that we can use the
   * sign as trigger.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
4435
  static u64 perf_swevent_set_period(struct perf_event *event)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4436
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4437
  	struct hw_perf_event *hwc = &event->hw;
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4438
4439
4440
4441
4442
  	u64 period = hwc->last_period;
  	u64 nr, offset;
  	s64 old, val;
  
  	hwc->last_period = hwc->sample_period;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4443
4444
  
  again:
e78505958   Peter Zijlstra   perf: Convert per...
4445
  	old = val = local64_read(&hwc->period_left);
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4446
4447
  	if (val < 0)
  		return 0;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4448

7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4449
4450
4451
  	nr = div64_u64(period + val, period);
  	offset = nr * period;
  	val -= offset;
e78505958   Peter Zijlstra   perf: Convert per...
4452
  	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4453
  		goto again;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4454

7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4455
  	return nr;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4456
  }
0cff784ae   Peter Zijlstra   perf: Optimize so...
4457
  static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4458
  				    struct perf_sample_data *data,
5622f295b   Markus Metzger   x86, perf_counter...
4459
  				    struct pt_regs *regs)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4460
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4461
  	struct hw_perf_event *hwc = &event->hw;
850bc73ff   Peter Zijlstra   perf_counter: Do ...
4462
  	int throttle = 0;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4463

cdd6c482c   Ingo Molnar   perf: Do the big ...
4464
  	data->period = event->hw.last_period;
0cff784ae   Peter Zijlstra   perf: Optimize so...
4465
4466
  	if (!overflow)
  		overflow = perf_swevent_set_period(event);
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4467

7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4468
4469
  	if (hwc->interrupts == MAX_INTERRUPTS)
  		return;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4470

7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4471
  	for (; overflow; overflow--) {
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4472
  		if (__perf_event_overflow(event, throttle,
5622f295b   Markus Metzger   x86, perf_counter...
4473
  					    data, regs)) {
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4474
4475
4476
4477
4478
4479
  			/*
  			 * We inhibit the overflow from happening when
  			 * hwc->interrupts == MAX_INTERRUPTS.
  			 */
  			break;
  		}
cf450a735   Peter Zijlstra   perf_counter: Fix...
4480
  		throttle = 1;
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4481
  	}
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4482
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4483
  static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4484
  			       struct perf_sample_data *data,
5622f295b   Markus Metzger   x86, perf_counter...
4485
  			       struct pt_regs *regs)
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4486
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4487
  	struct hw_perf_event *hwc = &event->hw;
d6d020e99   Peter Zijlstra   perf_counter: hrt...
4488

e78505958   Peter Zijlstra   perf: Convert per...
4489
  	local64_add(nr, &event->count);
d6d020e99   Peter Zijlstra   perf_counter: hrt...
4490

0cff784ae   Peter Zijlstra   perf: Optimize so...
4491
4492
  	if (!regs)
  		return;
6c7e550f1   Franck Bui-Huu   perf: Introduce i...
4493
  	if (!is_sampling_event(event))
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4494
  		return;
d6d020e99   Peter Zijlstra   perf_counter: hrt...
4495

0cff784ae   Peter Zijlstra   perf: Optimize so...
4496
  	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4497
  		return perf_swevent_overflow(event, 1, data, regs);
0cff784ae   Peter Zijlstra   perf: Optimize so...
4498

e78505958   Peter Zijlstra   perf: Convert per...
4499
  	if (local64_add_negative(nr, &hwc->period_left))
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4500
  		return;
df1a132bf   Peter Zijlstra   perf_counter: Int...
4501

a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4502
  	perf_swevent_overflow(event, 0, data, regs);
d6d020e99   Peter Zijlstra   perf_counter: hrt...
4503
  }
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4504
4505
4506
  static int perf_exclude_event(struct perf_event *event,
  			      struct pt_regs *regs)
  {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4507
  	if (event->hw.state & PERF_HES_STOPPED)
91b2f482e   Frederic Weisbecker   perf: Fix the sof...
4508
  		return 1;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4509

f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
  	if (regs) {
  		if (event->attr.exclude_user && user_mode(regs))
  			return 1;
  
  		if (event->attr.exclude_kernel && !user_mode(regs))
  			return 1;
  	}
  
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4520
  static int perf_swevent_match(struct perf_event *event,
1c432d899   Peter Zijlstra   perf_counter: Ren...
4521
  				enum perf_type_id type,
6fb2915df   Li Zefan   tracing/profile: ...
4522
4523
4524
  				u32 event_id,
  				struct perf_sample_data *data,
  				struct pt_regs *regs)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4525
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
4526
  	if (event->attr.type != type)
a21ca2cac   Ingo Molnar   perf_counter: Sep...
4527
  		return 0;
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4528

cdd6c482c   Ingo Molnar   perf: Do the big ...
4529
  	if (event->attr.config != event_id)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4530
  		return 0;
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4531
4532
  	if (perf_exclude_event(event, regs))
  		return 0;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4533
4534
4535
  
  	return 1;
  }
76e1d9047   Frederic Weisbecker   perf: Store activ...
4536
4537
4538
4539
4540
4541
  static inline u64 swevent_hash(u64 type, u32 event_id)
  {
  	u64 val = event_id | (type << 32);
  
  	return hash_64(val, SWEVENT_HLIST_BITS);
  }
49f135ed0   Frederic Weisbecker   perf: Comply with...
4542
4543
  static inline struct hlist_head *
  __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d9047   Frederic Weisbecker   perf: Store activ...
4544
  {
49f135ed0   Frederic Weisbecker   perf: Comply with...
4545
4546
4547
4548
  	u64 hash = swevent_hash(type, event_id);
  
  	return &hlist->heads[hash];
  }
76e1d9047   Frederic Weisbecker   perf: Store activ...
4549

49f135ed0   Frederic Weisbecker   perf: Comply with...
4550
4551
  /* For the read side: events when they trigger */
  static inline struct hlist_head *
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4552
  find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed0   Frederic Weisbecker   perf: Comply with...
4553
4554
  {
  	struct swevent_hlist *hlist;
76e1d9047   Frederic Weisbecker   perf: Store activ...
4555

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4556
  	hlist = rcu_dereference(swhash->swevent_hlist);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4557
4558
  	if (!hlist)
  		return NULL;
49f135ed0   Frederic Weisbecker   perf: Comply with...
4559
4560
4561
4562
4563
  	return __find_swevent_head(hlist, type, event_id);
  }
  
  /* For the event head insertion and removal in the hlist */
  static inline struct hlist_head *
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4564
  find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed0   Frederic Weisbecker   perf: Comply with...
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
  {
  	struct swevent_hlist *hlist;
  	u32 event_id = event->attr.config;
  	u64 type = event->attr.type;
  
  	/*
  	 * Event scheduling is always serialized against hlist allocation
  	 * and release. Which makes the protected version suitable here.
  	 * The context lock guarantees that.
  	 */
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4575
  	hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed0   Frederic Weisbecker   perf: Comply with...
4576
4577
4578
4579
4580
  					  lockdep_is_held(&event->ctx->lock));
  	if (!hlist)
  		return NULL;
  
  	return __find_swevent_head(hlist, type, event_id);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4581
4582
4583
  }
  
  static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4584
  				    u64 nr,
76e1d9047   Frederic Weisbecker   perf: Store activ...
4585
4586
  				    struct perf_sample_data *data,
  				    struct pt_regs *regs)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4587
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4588
  	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482c   Ingo Molnar   perf: Do the big ...
4589
  	struct perf_event *event;
76e1d9047   Frederic Weisbecker   perf: Store activ...
4590
4591
  	struct hlist_node *node;
  	struct hlist_head *head;
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4592

76e1d9047   Frederic Weisbecker   perf: Store activ...
4593
  	rcu_read_lock();
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4594
  	head = find_swevent_head_rcu(swhash, type, event_id);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4595
4596
4597
4598
  	if (!head)
  		goto end;
  
  	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
6fb2915df   Li Zefan   tracing/profile: ...
4599
  		if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4600
  			perf_swevent_event(event, nr, data, regs);
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4601
  	}
76e1d9047   Frederic Weisbecker   perf: Store activ...
4602
4603
  end:
  	rcu_read_unlock();
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4604
  }
4ed7c92d6   Peter Zijlstra   perf_events: Undo...
4605
  int perf_swevent_get_recursion_context(void)
96f6d4444   Peter Zijlstra   perf_counter: avo...
4606
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4607
  	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
96f6d4444   Peter Zijlstra   perf_counter: avo...
4608

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4609
  	return get_recursion_context(swhash->recursion);
96f6d4444   Peter Zijlstra   perf_counter: avo...
4610
  }
645e8cc0c   Ingo Molnar   perf_events: Fix ...
4611
  EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d4444   Peter Zijlstra   perf_counter: avo...
4612

fa9f90be7   Jesper Juhl   Kill off a bunch ...
4613
  inline void perf_swevent_put_recursion_context(int rctx)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4614
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4615
  	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
4616

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4617
  	put_recursion_context(swhash->recursion, rctx);
ce71b9df8   Frederic Weisbecker   tracing: Use the ...
4618
  }
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4619

a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4620
  void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514b   Peter Zijlstra   perf_counter: rev...
4621
  {
a4234bfcf   Ingo Molnar   perf_events: Opti...
4622
  	struct perf_sample_data data;
4ed7c92d6   Peter Zijlstra   perf_events: Undo...
4623
  	int rctx;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4624
  	preempt_disable_notrace();
4ed7c92d6   Peter Zijlstra   perf_events: Undo...
4625
4626
4627
  	rctx = perf_swevent_get_recursion_context();
  	if (rctx < 0)
  		return;
a4234bfcf   Ingo Molnar   perf_events: Opti...
4628

dc1d628a6   Peter Zijlstra   perf: Provide gen...
4629
  	perf_sample_data_init(&data, addr);
92bf309a9   Peter Zijlstra   perf_counter: Pus...
4630

a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4631
  	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4ed7c92d6   Peter Zijlstra   perf_events: Undo...
4632
4633
  
  	perf_swevent_put_recursion_context(rctx);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4634
  	preempt_enable_notrace();
b8e83514b   Peter Zijlstra   perf_counter: rev...
4635
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
4636
  static void perf_swevent_read(struct perf_event *event)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4637
  {
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4638
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4639
  static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4640
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4641
  	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482c   Ingo Molnar   perf: Do the big ...
4642
  	struct hw_perf_event *hwc = &event->hw;
76e1d9047   Frederic Weisbecker   perf: Store activ...
4643
  	struct hlist_head *head;
6c7e550f1   Franck Bui-Huu   perf: Introduce i...
4644
  	if (is_sampling_event(event)) {
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4645
  		hwc->last_period = hwc->sample_period;
cdd6c482c   Ingo Molnar   perf: Do the big ...
4646
  		perf_swevent_set_period(event);
7b4b6658e   Peter Zijlstra   perf_counter: Fix...
4647
  	}
76e1d9047   Frederic Weisbecker   perf: Store activ...
4648

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4649
  	hwc->state = !(flags & PERF_EF_START);
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4650
  	head = find_swevent_head(swhash, event);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4651
4652
4653
4654
  	if (WARN_ON_ONCE(!head))
  		return -EINVAL;
  
  	hlist_add_head_rcu(&event->hlist_entry, head);
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4655
4656
  	return 0;
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4657
  static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4658
  {
76e1d9047   Frederic Weisbecker   perf: Store activ...
4659
  	hlist_del_rcu(&event->hlist_entry);
15dbf27cc   Peter Zijlstra   perf_counter: sof...
4660
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4661
  static void perf_swevent_start(struct perf_event *event, int flags)
5c92d1241   Ingo Molnar   perf counters: im...
4662
  {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4663
  	event->hw.state = 0;
d6d020e99   Peter Zijlstra   perf_counter: hrt...
4664
  }
aa9c4c0f9   Ingo Molnar   perfcounters: fix...
4665

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4666
  static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e99   Peter Zijlstra   perf_counter: hrt...
4667
  {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4668
  	event->hw.state = PERF_HES_STOPPED;
bae43c994   Ingo Molnar   perf counters: im...
4669
  }
49f135ed0   Frederic Weisbecker   perf: Comply with...
4670
4671
  /* Deref the hlist from the update side */
  static inline struct swevent_hlist *
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4672
  swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed0   Frederic Weisbecker   perf: Comply with...
4673
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4674
4675
  	return rcu_dereference_protected(swhash->swevent_hlist,
  					 lockdep_is_held(&swhash->hlist_mutex));
49f135ed0   Frederic Weisbecker   perf: Comply with...
4676
  }
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4677
  static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d9047   Frederic Weisbecker   perf: Store activ...
4678
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4679
  	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4680

49f135ed0   Frederic Weisbecker   perf: Comply with...
4681
  	if (!hlist)
76e1d9047   Frederic Weisbecker   perf: Store activ...
4682
  		return;
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4683
  	rcu_assign_pointer(swhash->swevent_hlist, NULL);
fa4bbc4ca   Lai Jiangshan   perf,rcu: convert...
4684
  	kfree_rcu(hlist, rcu_head);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4685
4686
4687
4688
  }
  
  static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4689
  	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4690

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4691
  	mutex_lock(&swhash->hlist_mutex);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4692

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4693
4694
  	if (!--swhash->hlist_refcount)
  		swevent_hlist_release(swhash);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4695

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4696
  	mutex_unlock(&swhash->hlist_mutex);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
  }
  
  static void swevent_hlist_put(struct perf_event *event)
  {
  	int cpu;
  
  	if (event->cpu != -1) {
  		swevent_hlist_put_cpu(event, event->cpu);
  		return;
  	}
  
  	for_each_possible_cpu(cpu)
  		swevent_hlist_put_cpu(event, cpu);
  }
  
  static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4714
  	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4715
  	int err = 0;
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4716
  	mutex_lock(&swhash->hlist_mutex);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4717

b28ab83c5   Peter Zijlstra   perf: Remove the ...
4718
  	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d9047   Frederic Weisbecker   perf: Store activ...
4719
4720
4721
4722
4723
4724
4725
  		struct swevent_hlist *hlist;
  
  		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
  		if (!hlist) {
  			err = -ENOMEM;
  			goto exit;
  		}
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4726
  		rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4727
  	}
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4728
  	swhash->hlist_refcount++;
9ed6060d2   Peter Zijlstra   perf: Unindent la...
4729
  exit:
b28ab83c5   Peter Zijlstra   perf: Remove the ...
4730
  	mutex_unlock(&swhash->hlist_mutex);
76e1d9047   Frederic Weisbecker   perf: Store activ...
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
  
  	return err;
  }
  
  static int swevent_hlist_get(struct perf_event *event)
  {
  	int err;
  	int cpu, failed_cpu;
  
  	if (event->cpu != -1)
  		return swevent_hlist_get_cpu(event, event->cpu);
  
  	get_online_cpus();
  	for_each_possible_cpu(cpu) {
  		err = swevent_hlist_get_cpu(event, cpu);
  		if (err) {
  			failed_cpu = cpu;
  			goto fail;
  		}
  	}
  	put_online_cpus();
  
  	return 0;
9ed6060d2   Peter Zijlstra   perf: Unindent la...
4754
  fail:
76e1d9047   Frederic Weisbecker   perf: Store activ...
4755
4756
4757
4758
4759
4760
4761
4762
4763
  	for_each_possible_cpu(cpu) {
  		if (cpu == failed_cpu)
  			break;
  		swevent_hlist_put_cpu(event, cpu);
  	}
  
  	put_online_cpus();
  	return err;
  }
d430d3d7e   Jason Baron   jump label: Intro...
4764
  struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64a   Frederic Weisbecker   perf: Fix hlist r...
4765

b0a873ebb   Peter Zijlstra   perf: Register PM...
4766
4767
4768
  static void sw_perf_event_destroy(struct perf_event *event)
  {
  	u64 event_id = event->attr.config;
95476b64a   Frederic Weisbecker   perf: Fix hlist r...
4769

b0a873ebb   Peter Zijlstra   perf: Register PM...
4770
  	WARN_ON(event->parent);
7e54a5a0b   Peter Zijlstra   perf: Optimize sw...
4771
  	jump_label_dec(&perf_swevent_enabled[event_id]);
b0a873ebb   Peter Zijlstra   perf: Register PM...
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
  	swevent_hlist_put(event);
  }
  
  static int perf_swevent_init(struct perf_event *event)
  {
  	int event_id = event->attr.config;
  
  	if (event->attr.type != PERF_TYPE_SOFTWARE)
  		return -ENOENT;
  
  	switch (event_id) {
  	case PERF_COUNT_SW_CPU_CLOCK:
  	case PERF_COUNT_SW_TASK_CLOCK:
  		return -ENOENT;
  
  	default:
  		break;
  	}
ce677831a   Dan Carpenter   perf: Fix off by ...
4790
  	if (event_id >= PERF_COUNT_SW_MAX)
b0a873ebb   Peter Zijlstra   perf: Register PM...
4791
4792
4793
4794
4795
4796
4797
4798
  		return -ENOENT;
  
  	if (!event->parent) {
  		int err;
  
  		err = swevent_hlist_get(event);
  		if (err)
  			return err;
7e54a5a0b   Peter Zijlstra   perf: Optimize sw...
4799
  		jump_label_inc(&perf_swevent_enabled[event_id]);
b0a873ebb   Peter Zijlstra   perf: Register PM...
4800
4801
4802
4803
4804
4805
4806
  		event->destroy = sw_perf_event_destroy;
  	}
  
  	return 0;
  }
  
  static struct pmu perf_swevent = {
89a1e1873   Peter Zijlstra   perf: Provide a s...
4807
  	.task_ctx_nr	= perf_sw_context,
95476b64a   Frederic Weisbecker   perf: Fix hlist r...
4808

b0a873ebb   Peter Zijlstra   perf: Register PM...
4809
  	.event_init	= perf_swevent_init,
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4810
4811
4812
4813
  	.add		= perf_swevent_add,
  	.del		= perf_swevent_del,
  	.start		= perf_swevent_start,
  	.stop		= perf_swevent_stop,
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4814
  	.read		= perf_swevent_read,
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4815
  };
b0a873ebb   Peter Zijlstra   perf: Register PM...
4816
  #ifdef CONFIG_EVENT_TRACING
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
  static int perf_tp_filter_match(struct perf_event *event,
  				struct perf_sample_data *data)
  {
  	void *record = data->raw->data;
  
  	if (likely(!event->filter) || filter_match_preds(event->filter, record))
  		return 1;
  	return 0;
  }
  
  static int perf_tp_event_match(struct perf_event *event,
  				struct perf_sample_data *data,
  				struct pt_regs *regs)
  {
a0f7d0f7f   Frederic Weisbecker   perf: Handle stop...
4831
4832
  	if (event->hw.state & PERF_HES_STOPPED)
  		return 0;
580d607cd   Peter Zijlstra   perf: Optimize pe...
4833
4834
4835
4836
  	/*
  	 * All tracepoints are from kernel-space.
  	 */
  	if (event->attr.exclude_kernel)
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4837
4838
4839
4840
4841
4842
4843
4844
4845
  		return 0;
  
  	if (!perf_tp_filter_match(event, data))
  		return 0;
  
  	return 1;
  }
  
  void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
ecc55f84b   Peter Zijlstra   perf, trace: Inli...
4846
  		   struct pt_regs *regs, struct hlist_head *head, int rctx)
95476b64a   Frederic Weisbecker   perf: Fix hlist r...
4847
4848
  {
  	struct perf_sample_data data;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4849
4850
  	struct perf_event *event;
  	struct hlist_node *node;
95476b64a   Frederic Weisbecker   perf: Fix hlist r...
4851
4852
4853
4854
4855
4856
4857
  	struct perf_raw_record raw = {
  		.size = entry_size,
  		.data = record,
  	};
  
  	perf_sample_data_init(&data, addr);
  	data.raw = &raw;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4858
4859
  	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  		if (perf_tp_event_match(event, &data, regs))
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4860
  			perf_swevent_event(event, count, &data, regs);
4f41c013f   Peter Zijlstra   perf/ftrace: Opti...
4861
  	}
ecc55f84b   Peter Zijlstra   perf, trace: Inli...
4862
4863
  
  	perf_swevent_put_recursion_context(rctx);
95476b64a   Frederic Weisbecker   perf: Fix hlist r...
4864
4865
  }
  EXPORT_SYMBOL_GPL(perf_tp_event);
cdd6c482c   Ingo Molnar   perf: Do the big ...
4866
  static void tp_perf_event_destroy(struct perf_event *event)
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4867
  {
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4868
  	perf_trace_destroy(event);
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4869
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
4870
  static int perf_tp_event_init(struct perf_event *event)
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4871
  {
76e1d9047   Frederic Weisbecker   perf: Store activ...
4872
  	int err;
b0a873ebb   Peter Zijlstra   perf: Register PM...
4873
4874
  	if (event->attr.type != PERF_TYPE_TRACEPOINT)
  		return -ENOENT;
1c024eca5   Peter Zijlstra   perf, trace: Opti...
4875
4876
  	err = perf_trace_init(event);
  	if (err)
b0a873ebb   Peter Zijlstra   perf: Register PM...
4877
  		return err;
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4878

cdd6c482c   Ingo Molnar   perf: Do the big ...
4879
  	event->destroy = tp_perf_event_destroy;
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4880

b0a873ebb   Peter Zijlstra   perf: Register PM...
4881
4882
4883
4884
  	return 0;
  }
  
  static struct pmu perf_tracepoint = {
89a1e1873   Peter Zijlstra   perf: Provide a s...
4885
  	.task_ctx_nr	= perf_sw_context,
b0a873ebb   Peter Zijlstra   perf: Register PM...
4886
  	.event_init	= perf_tp_event_init,
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4887
4888
4889
4890
  	.add		= perf_trace_add,
  	.del		= perf_trace_del,
  	.start		= perf_swevent_start,
  	.stop		= perf_swevent_stop,
b0a873ebb   Peter Zijlstra   perf: Register PM...
4891
  	.read		= perf_swevent_read,
b0a873ebb   Peter Zijlstra   perf: Register PM...
4892
4893
4894
4895
  };
  
  static inline void perf_tp_register(void)
  {
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
4896
  	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4897
  }
6fb2915df   Li Zefan   tracing/profile: ...
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  {
  	char *filter_str;
  	int ret;
  
  	if (event->attr.type != PERF_TYPE_TRACEPOINT)
  		return -EINVAL;
  
  	filter_str = strndup_user(arg, PAGE_SIZE);
  	if (IS_ERR(filter_str))
  		return PTR_ERR(filter_str);
  
  	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
  
  	kfree(filter_str);
  	return ret;
  }
  
  static void perf_event_free_filter(struct perf_event *event)
  {
  	ftrace_profile_free_filter(event);
  }
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4921
  #else
6fb2915df   Li Zefan   tracing/profile: ...
4922

b0a873ebb   Peter Zijlstra   perf: Register PM...
4923
  static inline void perf_tp_register(void)
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4924
  {
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4925
  }
6fb2915df   Li Zefan   tracing/profile: ...
4926
4927
4928
4929
4930
4931
4932
4933
4934
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  {
  	return -ENOENT;
  }
  
  static void perf_event_free_filter(struct perf_event *event)
  {
  }
07b139c8c   Li Zefan   perf events: Remo...
4935
  #endif /* CONFIG_EVENT_TRACING */
e077df4f4   Peter Zijlstra   perf_counter: hoo...
4936

24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
4937
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4938
  void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
4939
  {
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4940
4941
  	struct perf_sample_data sample;
  	struct pt_regs *regs = data;
dc1d628a6   Peter Zijlstra   perf: Provide gen...
4942
  	perf_sample_data_init(&sample, bp->attr.bp_addr);
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4943

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
4944
  	if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4945
  		perf_swevent_event(bp, 1, &sample, regs);
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
4946
4947
  }
  #endif
b0a873ebb   Peter Zijlstra   perf: Register PM...
4948
4949
4950
  /*
   * hrtimer based swevent callback
   */
f29ac756a   Peter Zijlstra   perf_counter: Opt...
4951

b0a873ebb   Peter Zijlstra   perf: Register PM...
4952
  static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756a   Peter Zijlstra   perf_counter: Opt...
4953
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
4954
4955
4956
4957
4958
  	enum hrtimer_restart ret = HRTIMER_RESTART;
  	struct perf_sample_data data;
  	struct pt_regs *regs;
  	struct perf_event *event;
  	u64 period;
f29ac756a   Peter Zijlstra   perf_counter: Opt...
4959

b0a873ebb   Peter Zijlstra   perf: Register PM...
4960
  	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c6   Peter Zijlstra   perf: Optimize hr...
4961
4962
4963
  
  	if (event->state != PERF_EVENT_STATE_ACTIVE)
  		return HRTIMER_NORESTART;
b0a873ebb   Peter Zijlstra   perf: Register PM...
4964
  	event->pmu->read(event);
f344011cc   Peter Zijlstra   perf_counter: Opt...
4965

b0a873ebb   Peter Zijlstra   perf: Register PM...
4966
4967
4968
4969
4970
4971
  	perf_sample_data_init(&data, 0);
  	data.period = event->hw.last_period;
  	regs = get_irq_regs();
  
  	if (regs && !perf_exclude_event(event, regs)) {
  		if (!(event->attr.exclude_idle && current->pid == 0))
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
4972
  			if (perf_event_overflow(event, &data, regs))
b0a873ebb   Peter Zijlstra   perf: Register PM...
4973
4974
  				ret = HRTIMER_NORESTART;
  	}
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
4975

b0a873ebb   Peter Zijlstra   perf: Register PM...
4976
4977
  	period = max_t(u64, 10000, event->hw.sample_period);
  	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
4978

b0a873ebb   Peter Zijlstra   perf: Register PM...
4979
  	return ret;
f29ac756a   Peter Zijlstra   perf_counter: Opt...
4980
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
4981
  static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d1241   Ingo Molnar   perf counters: im...
4982
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
4983
  	struct hw_perf_event *hwc = &event->hw;
5d508e820   Franck Bui-Huu   perf: Don't bothe...
4984
4985
4986
4987
  	s64 period;
  
  	if (!is_sampling_event(event))
  		return;
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
4988

5d508e820   Franck Bui-Huu   perf: Don't bothe...
4989
4990
4991
4992
  	period = local64_read(&hwc->period_left);
  	if (period) {
  		if (period < 0)
  			period = 10000;
fa407f35e   Peter Zijlstra   perf: Shrink hw_p...
4993

5d508e820   Franck Bui-Huu   perf: Don't bothe...
4994
4995
4996
4997
4998
  		local64_set(&hwc->period_left, 0);
  	} else {
  		period = max_t(u64, 10000, hwc->sample_period);
  	}
  	__hrtimer_start_range_ns(&hwc->hrtimer,
b0a873ebb   Peter Zijlstra   perf: Register PM...
4999
  				ns_to_ktime(period), 0,
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
5000
  				HRTIMER_MODE_REL_PINNED, 0);
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5001
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
5002
5003
  
  static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5004
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5005
  	struct hw_perf_event *hwc = &event->hw;
6c7e550f1   Franck Bui-Huu   perf: Introduce i...
5006
  	if (is_sampling_event(event)) {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5007
  		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35e   Peter Zijlstra   perf: Shrink hw_p...
5008
  		local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873ebb   Peter Zijlstra   perf: Register PM...
5009
5010
5011
  
  		hrtimer_cancel(&hwc->hrtimer);
  	}
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5012
  }
ba3dd36c6   Peter Zijlstra   perf: Optimize hr...
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
  static void perf_swevent_init_hrtimer(struct perf_event *event)
  {
  	struct hw_perf_event *hwc = &event->hw;
  
  	if (!is_sampling_event(event))
  		return;
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
  
  	/*
  	 * Since hrtimers have a fixed rate, we can do a static freq->period
  	 * mapping and avoid the whole period adjust feedback stuff.
  	 */
  	if (event->attr.freq) {
  		long freq = event->attr.sample_freq;
  
  		event->attr.sample_period = NSEC_PER_SEC / freq;
  		hwc->sample_period = event->attr.sample_period;
  		local64_set(&hwc->period_left, hwc->sample_period);
  		event->attr.freq = 0;
  	}
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
5036
5037
5038
5039
5040
  /*
   * Software event: cpu wall time clock
   */
  
  static void cpu_clock_event_update(struct perf_event *event)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5041
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5042
5043
  	s64 prev;
  	u64 now;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5044
  	now = local_clock();
b0a873ebb   Peter Zijlstra   perf: Register PM...
5045
5046
  	prev = local64_xchg(&event->hw.prev_count, now);
  	local64_add(now - prev, &event->count);
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5047
  }
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5048

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5049
  static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873ebb   Peter Zijlstra   perf: Register PM...
5050
  {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5051
  	local64_set(&event->hw.prev_count, local_clock());
b0a873ebb   Peter Zijlstra   perf: Register PM...
5052
  	perf_swevent_start_hrtimer(event);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5053
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5054
  static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756a   Peter Zijlstra   perf_counter: Opt...
5055
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5056
5057
5058
  	perf_swevent_cancel_hrtimer(event);
  	cpu_clock_event_update(event);
  }
f29ac756a   Peter Zijlstra   perf_counter: Opt...
5059

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
  static int cpu_clock_event_add(struct perf_event *event, int flags)
  {
  	if (flags & PERF_EF_START)
  		cpu_clock_event_start(event, flags);
  
  	return 0;
  }
  
  static void cpu_clock_event_del(struct perf_event *event, int flags)
  {
  	cpu_clock_event_stop(event, flags);
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
5072
5073
5074
5075
  static void cpu_clock_event_read(struct perf_event *event)
  {
  	cpu_clock_event_update(event);
  }
f344011cc   Peter Zijlstra   perf_counter: Opt...
5076

b0a873ebb   Peter Zijlstra   perf: Register PM...
5077
5078
5079
5080
5081
5082
5083
  static int cpu_clock_event_init(struct perf_event *event)
  {
  	if (event->attr.type != PERF_TYPE_SOFTWARE)
  		return -ENOENT;
  
  	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
  		return -ENOENT;
ba3dd36c6   Peter Zijlstra   perf: Optimize hr...
5084
  	perf_swevent_init_hrtimer(event);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5085
  	return 0;
f29ac756a   Peter Zijlstra   perf_counter: Opt...
5086
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
5087
  static struct pmu perf_cpu_clock = {
89a1e1873   Peter Zijlstra   perf: Provide a s...
5088
  	.task_ctx_nr	= perf_sw_context,
b0a873ebb   Peter Zijlstra   perf: Register PM...
5089
  	.event_init	= cpu_clock_event_init,
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5090
5091
5092
5093
  	.add		= cpu_clock_event_add,
  	.del		= cpu_clock_event_del,
  	.start		= cpu_clock_event_start,
  	.stop		= cpu_clock_event_stop,
b0a873ebb   Peter Zijlstra   perf: Register PM...
5094
5095
5096
5097
5098
5099
5100
5101
  	.read		= cpu_clock_event_read,
  };
  
  /*
   * Software event: task time clock
   */
  
  static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d1241   Ingo Molnar   perf counters: im...
5102
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5103
5104
  	u64 prev;
  	s64 delta;
5c92d1241   Ingo Molnar   perf counters: im...
5105

b0a873ebb   Peter Zijlstra   perf: Register PM...
5106
5107
5108
5109
  	prev = local64_xchg(&event->hw.prev_count, now);
  	delta = now - prev;
  	local64_add(delta, &event->count);
  }
5c92d1241   Ingo Molnar   perf counters: im...
5110

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5111
  static void task_clock_event_start(struct perf_event *event, int flags)
b0a873ebb   Peter Zijlstra   perf: Register PM...
5112
  {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5113
  	local64_set(&event->hw.prev_count, event->ctx->time);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5114
  	perf_swevent_start_hrtimer(event);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5115
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5116
  static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873ebb   Peter Zijlstra   perf: Register PM...
5117
5118
5119
  {
  	perf_swevent_cancel_hrtimer(event);
  	task_clock_event_update(event, event->ctx->time);
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5120
5121
5122
5123
5124
5125
  }
  
  static int task_clock_event_add(struct perf_event *event, int flags)
  {
  	if (flags & PERF_EF_START)
  		task_clock_event_start(event, flags);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5126

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5127
5128
5129
5130
5131
5132
  	return 0;
  }
  
  static void task_clock_event_del(struct perf_event *event, int flags)
  {
  	task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5133
5134
5135
5136
  }
  
  static void task_clock_event_read(struct perf_event *event)
  {
768a06e2c   Peter Zijlstra   perf: Simplify ta...
5137
5138
5139
  	u64 now = perf_clock();
  	u64 delta = now - event->ctx->timestamp;
  	u64 time = event->ctx->time + delta;
b0a873ebb   Peter Zijlstra   perf: Register PM...
5140
5141
5142
5143
5144
  
  	task_clock_event_update(event, time);
  }
  
  static int task_clock_event_init(struct perf_event *event)
6fb2915df   Li Zefan   tracing/profile: ...
5145
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5146
5147
5148
5149
5150
  	if (event->attr.type != PERF_TYPE_SOFTWARE)
  		return -ENOENT;
  
  	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
  		return -ENOENT;
ba3dd36c6   Peter Zijlstra   perf: Optimize hr...
5151
  	perf_swevent_init_hrtimer(event);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5152
  	return 0;
6fb2915df   Li Zefan   tracing/profile: ...
5153
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
5154
  static struct pmu perf_task_clock = {
89a1e1873   Peter Zijlstra   perf: Provide a s...
5155
  	.task_ctx_nr	= perf_sw_context,
b0a873ebb   Peter Zijlstra   perf: Register PM...
5156
  	.event_init	= task_clock_event_init,
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
5157
5158
5159
5160
  	.add		= task_clock_event_add,
  	.del		= task_clock_event_del,
  	.start		= task_clock_event_start,
  	.stop		= task_clock_event_stop,
b0a873ebb   Peter Zijlstra   perf: Register PM...
5161
5162
  	.read		= task_clock_event_read,
  };
6fb2915df   Li Zefan   tracing/profile: ...
5163

ad5133b70   Peter Zijlstra   perf: Default PMU...
5164
  static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f4   Peter Zijlstra   perf_counter: hoo...
5165
  {
e077df4f4   Peter Zijlstra   perf_counter: hoo...
5166
  }
6fb2915df   Li Zefan   tracing/profile: ...
5167

ad5133b70   Peter Zijlstra   perf: Default PMU...
5168
  static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915df   Li Zefan   tracing/profile: ...
5169
  {
ad5133b70   Peter Zijlstra   perf: Default PMU...
5170
  	return 0;
6fb2915df   Li Zefan   tracing/profile: ...
5171
  }
ad5133b70   Peter Zijlstra   perf: Default PMU...
5172
  static void perf_pmu_start_txn(struct pmu *pmu)
6fb2915df   Li Zefan   tracing/profile: ...
5173
  {
ad5133b70   Peter Zijlstra   perf: Default PMU...
5174
  	perf_pmu_disable(pmu);
6fb2915df   Li Zefan   tracing/profile: ...
5175
  }
ad5133b70   Peter Zijlstra   perf: Default PMU...
5176
5177
5178
5179
5180
  static int perf_pmu_commit_txn(struct pmu *pmu)
  {
  	perf_pmu_enable(pmu);
  	return 0;
  }
e077df4f4   Peter Zijlstra   perf_counter: hoo...
5181

ad5133b70   Peter Zijlstra   perf: Default PMU...
5182
  static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5183
  {
ad5133b70   Peter Zijlstra   perf: Default PMU...
5184
  	perf_pmu_enable(pmu);
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5185
  }
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5186
5187
5188
5189
5190
  /*
   * Ensures all contexts with the same task_ctx_nr have the same
   * pmu_cpu_context too.
   */
  static void *find_pmu_context(int ctxn)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5191
  {
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5192
  	struct pmu *pmu;
b326e9560   Frederic Weisbecker   hw-breakpoints: U...
5193

8dc85d547   Peter Zijlstra   perf: Multiple ta...
5194
5195
  	if (ctxn < 0)
  		return NULL;
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5196

8dc85d547   Peter Zijlstra   perf: Multiple ta...
5197
5198
5199
5200
  	list_for_each_entry(pmu, &pmus, entry) {
  		if (pmu->task_ctx_nr == ctxn)
  			return pmu->pmu_cpu_context;
  	}
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5201

8dc85d547   Peter Zijlstra   perf: Multiple ta...
5202
  	return NULL;
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5203
  }
516769575   Peter Zijlstra   perf: Fix duplica...
5204
  static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5205
  {
516769575   Peter Zijlstra   perf: Fix duplica...
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
  	int cpu;
  
  	for_each_possible_cpu(cpu) {
  		struct perf_cpu_context *cpuctx;
  
  		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  
  		if (cpuctx->active_pmu == old_pmu)
  			cpuctx->active_pmu = pmu;
  	}
  }
  
  static void free_pmu_context(struct pmu *pmu)
  {
  	struct pmu *i;
f5ffe02e5   Frederic Weisbecker   perf: Add kernel ...
5221

8dc85d547   Peter Zijlstra   perf: Multiple ta...
5222
  	mutex_lock(&pmus_lock);
0475f9ea8   Paul Mackerras   perf_counters: al...
5223
  	/*
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5224
  	 * Like a real lame refcount.
0475f9ea8   Paul Mackerras   perf_counters: al...
5225
  	 */
516769575   Peter Zijlstra   perf: Fix duplica...
5226
5227
5228
  	list_for_each_entry(i, &pmus, entry) {
  		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
  			update_pmu_context(i, pmu);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5229
  			goto out;
516769575   Peter Zijlstra   perf: Fix duplica...
5230
  		}
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5231
  	}
d6d020e99   Peter Zijlstra   perf_counter: hrt...
5232

516769575   Peter Zijlstra   perf: Fix duplica...
5233
  	free_percpu(pmu->pmu_cpu_context);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5234
5235
  out:
  	mutex_unlock(&pmus_lock);
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5236
  }
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5237
  static struct idr pmu_idr;
d6d020e99   Peter Zijlstra   perf_counter: hrt...
5238

abe434005   Peter Zijlstra   perf: Sysfs enume...
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
  static ssize_t
  type_show(struct device *dev, struct device_attribute *attr, char *page)
  {
  	struct pmu *pmu = dev_get_drvdata(dev);
  
  	return snprintf(page, PAGE_SIZE-1, "%d
  ", pmu->type);
  }
  
  static struct device_attribute pmu_dev_attrs[] = {
         __ATTR_RO(type),
         __ATTR_NULL,
  };
  
  static int pmu_bus_running;
  static struct bus_type pmu_bus = {
  	.name		= "event_source",
  	.dev_attrs	= pmu_dev_attrs,
  };
  
  static void pmu_dev_release(struct device *dev)
  {
  	kfree(dev);
  }
  
  static int pmu_dev_alloc(struct pmu *pmu)
  {
  	int ret = -ENOMEM;
  
  	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  	if (!pmu->dev)
  		goto out;
  
  	device_initialize(pmu->dev);
  	ret = dev_set_name(pmu->dev, "%s", pmu->name);
  	if (ret)
  		goto free_dev;
  
  	dev_set_drvdata(pmu->dev, pmu);
  	pmu->dev->bus = &pmu_bus;
  	pmu->dev->release = pmu_dev_release;
  	ret = device_add(pmu->dev);
  	if (ret)
  		goto free_dev;
  
  out:
  	return ret;
  
  free_dev:
  	put_device(pmu->dev);
  	goto out;
  }
547e9fd7d   Peter Zijlstra   perf: Annotate cp...
5291
  static struct lock_class_key cpuctx_mutex;
facc43071   Peter Zijlstra   perf: Optimize ev...
5292
  static struct lock_class_key cpuctx_lock;
547e9fd7d   Peter Zijlstra   perf: Annotate cp...
5293

2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5294
  int perf_pmu_register(struct pmu *pmu, char *name, int type)
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5295
  {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5296
  	int cpu, ret;
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
5297

b0a873ebb   Peter Zijlstra   perf: Register PM...
5298
  	mutex_lock(&pmus_lock);
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
5299
5300
5301
5302
  	ret = -ENOMEM;
  	pmu->pmu_disable_count = alloc_percpu(int);
  	if (!pmu->pmu_disable_count)
  		goto unlock;
f29ac756a   Peter Zijlstra   perf_counter: Opt...
5303

2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
  	pmu->type = -1;
  	if (!name)
  		goto skip_type;
  	pmu->name = name;
  
  	if (type < 0) {
  		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
  		if (!err)
  			goto free_pdc;
  
  		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
  		if (err) {
  			ret = err;
  			goto free_pdc;
  		}
  	}
  	pmu->type = type;
abe434005   Peter Zijlstra   perf: Sysfs enume...
5321
5322
5323
5324
5325
  	if (pmu_bus_running) {
  		ret = pmu_dev_alloc(pmu);
  		if (ret)
  			goto free_idr;
  	}
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5326
  skip_type:
8dc85d547   Peter Zijlstra   perf: Multiple ta...
5327
5328
5329
  	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
  	if (pmu->pmu_cpu_context)
  		goto got_cpu_context;
f29ac756a   Peter Zijlstra   perf_counter: Opt...
5330

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5331
5332
  	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
  	if (!pmu->pmu_cpu_context)
abe434005   Peter Zijlstra   perf: Sysfs enume...
5333
  		goto free_dev;
f344011cc   Peter Zijlstra   perf_counter: Opt...
5334

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5335
5336
5337
5338
  	for_each_possible_cpu(cpu) {
  		struct perf_cpu_context *cpuctx;
  
  		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb1844798   Peter Zijlstra   perf: Clean up pe...
5339
  		__perf_event_init_context(&cpuctx->ctx);
547e9fd7d   Peter Zijlstra   perf: Annotate cp...
5340
  		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc43071   Peter Zijlstra   perf: Optimize ev...
5341
  		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
b04243ef7   Peter Zijlstra   perf: Complete so...
5342
  		cpuctx->ctx.type = cpu_context;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5343
  		cpuctx->ctx.pmu = pmu;
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
5344
5345
  		cpuctx->jiffies_interval = 1;
  		INIT_LIST_HEAD(&cpuctx->rotation_list);
516769575   Peter Zijlstra   perf: Fix duplica...
5346
  		cpuctx->active_pmu = pmu;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5347
  	}
76e1d9047   Frederic Weisbecker   perf: Store activ...
5348

8dc85d547   Peter Zijlstra   perf: Multiple ta...
5349
  got_cpu_context:
ad5133b70   Peter Zijlstra   perf: Default PMU...
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
  	if (!pmu->start_txn) {
  		if (pmu->pmu_enable) {
  			/*
  			 * If we have pmu_enable/pmu_disable calls, install
  			 * transaction stubs that use that to try and batch
  			 * hardware accesses.
  			 */
  			pmu->start_txn  = perf_pmu_start_txn;
  			pmu->commit_txn = perf_pmu_commit_txn;
  			pmu->cancel_txn = perf_pmu_cancel_txn;
  		} else {
  			pmu->start_txn  = perf_pmu_nop_void;
  			pmu->commit_txn = perf_pmu_nop_int;
  			pmu->cancel_txn = perf_pmu_nop_void;
f344011cc   Peter Zijlstra   perf_counter: Opt...
5364
  		}
5c92d1241   Ingo Molnar   perf counters: im...
5365
  	}
15dbf27cc   Peter Zijlstra   perf_counter: sof...
5366

ad5133b70   Peter Zijlstra   perf: Default PMU...
5367
5368
5369
5370
  	if (!pmu->pmu_enable) {
  		pmu->pmu_enable  = perf_pmu_nop_void;
  		pmu->pmu_disable = perf_pmu_nop_void;
  	}
b0a873ebb   Peter Zijlstra   perf: Register PM...
5371
  	list_add_rcu(&pmu->entry, &pmus);
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
5372
5373
  	ret = 0;
  unlock:
b0a873ebb   Peter Zijlstra   perf: Register PM...
5374
  	mutex_unlock(&pmus_lock);
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
5375
  	return ret;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5376

abe434005   Peter Zijlstra   perf: Sysfs enume...
5377
5378
5379
  free_dev:
  	device_del(pmu->dev);
  	put_device(pmu->dev);
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5380
5381
5382
  free_idr:
  	if (pmu->type >= PERF_TYPE_MAX)
  		idr_remove(&pmu_idr, pmu->type);
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
5383
5384
5385
  free_pdc:
  	free_percpu(pmu->pmu_disable_count);
  	goto unlock;
f29ac756a   Peter Zijlstra   perf_counter: Opt...
5386
  }
b0a873ebb   Peter Zijlstra   perf: Register PM...
5387
  void perf_pmu_unregister(struct pmu *pmu)
5c92d1241   Ingo Molnar   perf counters: im...
5388
  {
b0a873ebb   Peter Zijlstra   perf: Register PM...
5389
5390
5391
  	mutex_lock(&pmus_lock);
  	list_del_rcu(&pmu->entry);
  	mutex_unlock(&pmus_lock);
5c92d1241   Ingo Molnar   perf counters: im...
5392

0475f9ea8   Paul Mackerras   perf_counters: al...
5393
  	/*
cde8e8849   Peter Zijlstra   perf: Sanitize th...
5394
5395
  	 * We dereference the pmu list under both SRCU and regular RCU, so
  	 * synchronize against both of those.
0475f9ea8   Paul Mackerras   perf_counters: al...
5396
  	 */
b0a873ebb   Peter Zijlstra   perf: Register PM...
5397
  	synchronize_srcu(&pmus_srcu);
cde8e8849   Peter Zijlstra   perf: Sanitize th...
5398
  	synchronize_rcu();
d6d020e99   Peter Zijlstra   perf_counter: hrt...
5399

33696fc0d   Peter Zijlstra   perf: Per PMU dis...
5400
  	free_percpu(pmu->pmu_disable_count);
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5401
5402
  	if (pmu->type >= PERF_TYPE_MAX)
  		idr_remove(&pmu_idr, pmu->type);
abe434005   Peter Zijlstra   perf: Sysfs enume...
5403
5404
  	device_del(pmu->dev);
  	put_device(pmu->dev);
516769575   Peter Zijlstra   perf: Fix duplica...
5405
  	free_pmu_context(pmu);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5406
  }
d6d020e99   Peter Zijlstra   perf_counter: hrt...
5407

b0a873ebb   Peter Zijlstra   perf: Register PM...
5408
5409
5410
5411
  struct pmu *perf_init_event(struct perf_event *event)
  {
  	struct pmu *pmu = NULL;
  	int idx;
940c5b297   Lin Ming   perf: Fix the mis...
5412
  	int ret;
b0a873ebb   Peter Zijlstra   perf: Register PM...
5413
5414
  
  	idx = srcu_read_lock(&pmus_srcu);
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5415
5416
5417
5418
  
  	rcu_read_lock();
  	pmu = idr_find(&pmu_idr, event->attr.type);
  	rcu_read_unlock();
940c5b297   Lin Ming   perf: Fix the mis...
5419
5420
5421
5422
  	if (pmu) {
  		ret = pmu->event_init(event);
  		if (ret)
  			pmu = ERR_PTR(ret);
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5423
  		goto unlock;
940c5b297   Lin Ming   perf: Fix the mis...
5424
  	}
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
5425

b0a873ebb   Peter Zijlstra   perf: Register PM...
5426
  	list_for_each_entry_rcu(pmu, &pmus, entry) {
940c5b297   Lin Ming   perf: Fix the mis...
5427
  		ret = pmu->event_init(event);
b0a873ebb   Peter Zijlstra   perf: Register PM...
5428
  		if (!ret)
e5f4d3394   Peter Zijlstra   perf: Fix perf_in...
5429
  			goto unlock;
76e1d9047   Frederic Weisbecker   perf: Store activ...
5430

b0a873ebb   Peter Zijlstra   perf: Register PM...
5431
5432
  		if (ret != -ENOENT) {
  			pmu = ERR_PTR(ret);
e5f4d3394   Peter Zijlstra   perf: Fix perf_in...
5433
  			goto unlock;
f344011cc   Peter Zijlstra   perf_counter: Opt...
5434
  		}
5c92d1241   Ingo Molnar   perf counters: im...
5435
  	}
e5f4d3394   Peter Zijlstra   perf: Fix perf_in...
5436
5437
  	pmu = ERR_PTR(-ENOENT);
  unlock:
b0a873ebb   Peter Zijlstra   perf: Register PM...
5438
  	srcu_read_unlock(&pmus_srcu, idx);
15dbf27cc   Peter Zijlstra   perf_counter: sof...
5439

4aeb0b423   Robert Richter   perfcounters: ren...
5440
  	return pmu;
5c92d1241   Ingo Molnar   perf counters: im...
5441
  }
0793a61d4   Thomas Gleixner   performance count...
5442
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
5443
   * Allocate and initialize a event structure
0793a61d4   Thomas Gleixner   performance count...
5444
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
5445
  static struct perf_event *
c3f00c702   Peter Zijlstra   perf: Separate fi...
5446
  perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff869   Peter Zijlstra   perf, hw_breakpoi...
5447
5448
5449
  		 struct task_struct *task,
  		 struct perf_event *group_leader,
  		 struct perf_event *parent_event,
4dc0da869   Avi Kivity   perf: Add context...
5450
5451
  		 perf_overflow_handler_t overflow_handler,
  		 void *context)
0793a61d4   Thomas Gleixner   performance count...
5452
  {
51b0fe395   Peter Zijlstra   perf: Deconstify ...
5453
  	struct pmu *pmu;
cdd6c482c   Ingo Molnar   perf: Do the big ...
5454
5455
  	struct perf_event *event;
  	struct hw_perf_event *hwc;
d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5456
  	long err;
0793a61d4   Thomas Gleixner   performance count...
5457

66832eb4b   Oleg Nesterov   perf: Validate cp...
5458
5459
5460
5461
  	if ((unsigned)cpu >= nr_cpu_ids) {
  		if (!task || cpu != -1)
  			return ERR_PTR(-EINVAL);
  	}
c3f00c702   Peter Zijlstra   perf: Separate fi...
5462
  	event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482c   Ingo Molnar   perf: Do the big ...
5463
  	if (!event)
d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5464
  		return ERR_PTR(-ENOMEM);
0793a61d4   Thomas Gleixner   performance count...
5465

04289bb98   Ingo Molnar   perf counters: ad...
5466
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
5467
  	 * Single events are their own group leaders, with an
04289bb98   Ingo Molnar   perf counters: ad...
5468
5469
5470
  	 * empty sibling list:
  	 */
  	if (!group_leader)
cdd6c482c   Ingo Molnar   perf: Do the big ...
5471
  		group_leader = event;
04289bb98   Ingo Molnar   perf counters: ad...
5472

cdd6c482c   Ingo Molnar   perf: Do the big ...
5473
5474
  	mutex_init(&event->child_mutex);
  	INIT_LIST_HEAD(&event->child_list);
fccc714b3   Peter Zijlstra   perf_counter: San...
5475

cdd6c482c   Ingo Molnar   perf: Do the big ...
5476
5477
5478
5479
  	INIT_LIST_HEAD(&event->group_entry);
  	INIT_LIST_HEAD(&event->event_entry);
  	INIT_LIST_HEAD(&event->sibling_list);
  	init_waitqueue_head(&event->waitq);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
5480
  	init_irq_work(&event->pending, perf_pending_event);
0793a61d4   Thomas Gleixner   performance count...
5481

cdd6c482c   Ingo Molnar   perf: Do the big ...
5482
  	mutex_init(&event->mmap_mutex);
7b732a750   Peter Zijlstra   perf_counter: new...
5483

cdd6c482c   Ingo Molnar   perf: Do the big ...
5484
5485
5486
5487
  	event->cpu		= cpu;
  	event->attr		= *attr;
  	event->group_leader	= group_leader;
  	event->pmu		= NULL;
cdd6c482c   Ingo Molnar   perf: Do the big ...
5488
  	event->oncpu		= -1;
a96bbc164   Peter Zijlstra   perf_counter: Fix...
5489

cdd6c482c   Ingo Molnar   perf: Do the big ...
5490
  	event->parent		= parent_event;
b84fbc9fb   Peter Zijlstra   perf_counter: Pus...
5491

cdd6c482c   Ingo Molnar   perf: Do the big ...
5492
5493
  	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
  	event->id		= atomic64_inc_return(&perf_event_id);
a96bbc164   Peter Zijlstra   perf_counter: Fix...
5494

cdd6c482c   Ingo Molnar   perf: Do the big ...
5495
  	event->state		= PERF_EVENT_STATE_INACTIVE;
329d876d6   Ingo Molnar   perf_counter: Ini...
5496

d580ff869   Peter Zijlstra   perf, hw_breakpoi...
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
  	if (task) {
  		event->attach_state = PERF_ATTACH_TASK;
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
  		/*
  		 * hw_breakpoint is a bit difficult here..
  		 */
  		if (attr->type == PERF_TYPE_BREAKPOINT)
  			event->hw.bp_target = task;
  #endif
  	}
4dc0da869   Avi Kivity   perf: Add context...
5507
  	if (!overflow_handler && parent_event) {
b326e9560   Frederic Weisbecker   hw-breakpoints: U...
5508
  		overflow_handler = parent_event->overflow_handler;
4dc0da869   Avi Kivity   perf: Add context...
5509
5510
  		context = parent_event->overflow_handler_context;
  	}
66832eb4b   Oleg Nesterov   perf: Validate cp...
5511

b326e9560   Frederic Weisbecker   hw-breakpoints: U...
5512
  	event->overflow_handler	= overflow_handler;
4dc0da869   Avi Kivity   perf: Add context...
5513
  	event->overflow_handler_context = context;
97eaf5300   Frederic Weisbecker   perf/core: Add a ...
5514

0d48696f8   Peter Zijlstra   perf_counter: Ren...
5515
  	if (attr->disabled)
cdd6c482c   Ingo Molnar   perf: Do the big ...
5516
  		event->state = PERF_EVENT_STATE_OFF;
a86ed5085   Ingo Molnar   perfcounters: use...
5517

4aeb0b423   Robert Richter   perfcounters: ren...
5518
  	pmu = NULL;
b8e83514b   Peter Zijlstra   perf_counter: rev...
5519

cdd6c482c   Ingo Molnar   perf: Do the big ...
5520
  	hwc = &event->hw;
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
5521
  	hwc->sample_period = attr->sample_period;
0d48696f8   Peter Zijlstra   perf_counter: Ren...
5522
  	if (attr->freq && attr->sample_freq)
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
5523
  		hwc->sample_period = 1;
eced1dfcf   Peter Zijlstra   perf_counter: Fix...
5524
  	hwc->last_period = hwc->sample_period;
bd2b5b128   Peter Zijlstra   perf_counter: Mor...
5525

e78505958   Peter Zijlstra   perf: Convert per...
5526
  	local64_set(&hwc->period_left, hwc->sample_period);
60db5e09c   Peter Zijlstra   perf_counter: fre...
5527

2023b3592   Peter Zijlstra   perf_counter: inh...
5528
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
5529
  	 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b3592   Peter Zijlstra   perf_counter: inh...
5530
  	 */
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
5531
  	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
2023b3592   Peter Zijlstra   perf_counter: inh...
5532
  		goto done;
b0a873ebb   Peter Zijlstra   perf: Register PM...
5533
  	pmu = perf_init_event(event);
974802eaa   Peter Zijlstra   perf_counter: Add...
5534

d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5535
5536
  done:
  	err = 0;
4aeb0b423   Robert Richter   perfcounters: ren...
5537
  	if (!pmu)
d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5538
  		err = -EINVAL;
4aeb0b423   Robert Richter   perfcounters: ren...
5539
5540
  	else if (IS_ERR(pmu))
  		err = PTR_ERR(pmu);
5c92d1241   Ingo Molnar   perf counters: im...
5541

d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5542
  	if (err) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
5543
5544
5545
  		if (event->ns)
  			put_pid_ns(event->ns);
  		kfree(event);
d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5546
  		return ERR_PTR(err);
621a01eac   Ingo Molnar   perf counters: hw...
5547
  	}
d5d2bc0dd   Paul Mackerras   perf_counter: mak...
5548

cdd6c482c   Ingo Molnar   perf: Do the big ...
5549
  	event->pmu = pmu;
0793a61d4   Thomas Gleixner   performance count...
5550

cdd6c482c   Ingo Molnar   perf: Do the big ...
5551
  	if (!event->parent) {
82cd6def9   Peter Zijlstra   perf: Use jump_la...
5552
  		if (event->attach_state & PERF_ATTACH_TASK)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
5553
  			jump_label_inc(&perf_sched_events);
3af9e8592   Eric B Munson   perf: Add non-exe...
5554
  		if (event->attr.mmap || event->attr.mmap_data)
cdd6c482c   Ingo Molnar   perf: Do the big ...
5555
5556
5557
5558
5559
  			atomic_inc(&nr_mmap_events);
  		if (event->attr.comm)
  			atomic_inc(&nr_comm_events);
  		if (event->attr.task)
  			atomic_inc(&nr_task_events);
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
5560
5561
5562
5563
5564
5565
5566
  		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
  			err = get_callchain_buffers();
  			if (err) {
  				free_event(event);
  				return ERR_PTR(err);
  			}
  		}
f344011cc   Peter Zijlstra   perf_counter: Opt...
5567
  	}
9ee318a78   Peter Zijlstra   perf_counter: opt...
5568

cdd6c482c   Ingo Molnar   perf: Do the big ...
5569
  	return event;
0793a61d4   Thomas Gleixner   performance count...
5570
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
5571
5572
  static int perf_copy_attr(struct perf_event_attr __user *uattr,
  			  struct perf_event_attr *attr)
974802eaa   Peter Zijlstra   perf_counter: Add...
5573
  {
974802eaa   Peter Zijlstra   perf_counter: Add...
5574
  	u32 size;
cdf8073d6   Ian Schram   perf_counter: Fix...
5575
  	int ret;
974802eaa   Peter Zijlstra   perf_counter: Add...
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
  
  	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  		return -EFAULT;
  
  	/*
  	 * zero the full structure, so that a short copy will be nice.
  	 */
  	memset(attr, 0, sizeof(*attr));
  
  	ret = get_user(size, &uattr->size);
  	if (ret)
  		return ret;
  
  	if (size > PAGE_SIZE)	/* silly large */
  		goto err_size;
  
  	if (!size)		/* abi compat */
  		size = PERF_ATTR_SIZE_VER0;
  
  	if (size < PERF_ATTR_SIZE_VER0)
  		goto err_size;
  
  	/*
  	 * If we're handed a bigger struct than we know of,
cdf8073d6   Ian Schram   perf_counter: Fix...
5600
5601
5602
  	 * ensure all the unknown bits are 0 - i.e. new
  	 * user-space does not rely on any kernel feature
  	 * extensions we dont know about yet.
974802eaa   Peter Zijlstra   perf_counter: Add...
5603
5604
  	 */
  	if (size > sizeof(*attr)) {
cdf8073d6   Ian Schram   perf_counter: Fix...
5605
5606
5607
  		unsigned char __user *addr;
  		unsigned char __user *end;
  		unsigned char val;
974802eaa   Peter Zijlstra   perf_counter: Add...
5608

cdf8073d6   Ian Schram   perf_counter: Fix...
5609
5610
  		addr = (void __user *)uattr + sizeof(*attr);
  		end  = (void __user *)uattr + size;
974802eaa   Peter Zijlstra   perf_counter: Add...
5611

cdf8073d6   Ian Schram   perf_counter: Fix...
5612
  		for (; addr < end; addr++) {
974802eaa   Peter Zijlstra   perf_counter: Add...
5613
5614
5615
5616
5617
5618
  			ret = get_user(val, addr);
  			if (ret)
  				return ret;
  			if (val)
  				goto err_size;
  		}
b3e62e350   Xiao Guangrong   perf_counter: Fix...
5619
  		size = sizeof(*attr);
974802eaa   Peter Zijlstra   perf_counter: Add...
5620
5621
5622
5623
5624
  	}
  
  	ret = copy_from_user(attr, uattr, size);
  	if (ret)
  		return -EFAULT;
cd757645f   Mahesh Salgaonkar   perf: Make bp_len...
5625
  	if (attr->__reserved_1)
974802eaa   Peter Zijlstra   perf_counter: Add...
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
  		return -EINVAL;
  
  	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  		return -EINVAL;
  
  	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  		return -EINVAL;
  
  out:
  	return ret;
  
  err_size:
  	put_user(sizeof(*attr), &uattr->size);
  	ret = -E2BIG;
  	goto out;
  }
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5642
5643
  static int
  perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c277   Peter Zijlstra   perf_counter: All...
5644
  {
76369139c   Frederic Weisbecker   perf: Split up bu...
5645
  	struct ring_buffer *rb = NULL, *old_rb = NULL;
a4be7c277   Peter Zijlstra   perf_counter: All...
5646
  	int ret = -EINVAL;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5647
  	if (!output_event)
a4be7c277   Peter Zijlstra   perf_counter: All...
5648
  		goto set;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5649
5650
  	/* don't allow circular references */
  	if (event == output_event)
a4be7c277   Peter Zijlstra   perf_counter: All...
5651
  		goto out;
0f139300c   Peter Zijlstra   perf: Ensure that...
5652
5653
5654
5655
5656
5657
5658
  	/*
  	 * Don't allow cross-cpu buffers
  	 */
  	if (output_event->cpu != event->cpu)
  		goto out;
  
  	/*
76369139c   Frederic Weisbecker   perf: Split up bu...
5659
  	 * If its not a per-cpu rb, it must be the same task.
0f139300c   Peter Zijlstra   perf: Ensure that...
5660
5661
5662
  	 */
  	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
  		goto out;
a4be7c277   Peter Zijlstra   perf_counter: All...
5663
  set:
cdd6c482c   Ingo Molnar   perf: Do the big ...
5664
  	mutex_lock(&event->mmap_mutex);
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5665
5666
5667
  	/* Can't redirect output if we've got an active mmap() */
  	if (atomic_read(&event->mmap_count))
  		goto unlock;
a4be7c277   Peter Zijlstra   perf_counter: All...
5668

ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5669
  	if (output_event) {
76369139c   Frederic Weisbecker   perf: Split up bu...
5670
5671
5672
  		/* get the rb we want to redirect to */
  		rb = ring_buffer_get(output_event);
  		if (!rb)
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5673
  			goto unlock;
a4be7c277   Peter Zijlstra   perf_counter: All...
5674
  	}
76369139c   Frederic Weisbecker   perf: Split up bu...
5675
5676
  	old_rb = event->rb;
  	rcu_assign_pointer(event->rb, rb);
a4be7c277   Peter Zijlstra   perf_counter: All...
5677
  	ret = 0;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5678
5679
  unlock:
  	mutex_unlock(&event->mmap_mutex);
76369139c   Frederic Weisbecker   perf: Split up bu...
5680
5681
  	if (old_rb)
  		ring_buffer_put(old_rb);
a4be7c277   Peter Zijlstra   perf_counter: All...
5682
  out:
a4be7c277   Peter Zijlstra   perf_counter: All...
5683
5684
  	return ret;
  }
0793a61d4   Thomas Gleixner   performance count...
5685
  /**
cdd6c482c   Ingo Molnar   perf: Do the big ...
5686
   * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a3810   Ingo Molnar   perf counters: re...
5687
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
5688
   * @attr_uptr:	event_id type attributes for monitoring/sampling
0793a61d4   Thomas Gleixner   performance count...
5689
   * @pid:		target pid
9f66a3810   Ingo Molnar   perf counters: re...
5690
   * @cpu:		target cpu
cdd6c482c   Ingo Molnar   perf: Do the big ...
5691
   * @group_fd:		group leader event fd
0793a61d4   Thomas Gleixner   performance count...
5692
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
5693
5694
  SYSCALL_DEFINE5(perf_event_open,
  		struct perf_event_attr __user *, attr_uptr,
2743a5b0f   Paul Mackerras   perfcounters: pro...
5695
  		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d4   Thomas Gleixner   performance count...
5696
  {
b04243ef7   Peter Zijlstra   perf: Complete so...
5697
5698
  	struct perf_event *group_leader = NULL, *output_event = NULL;
  	struct perf_event *event, *sibling;
cdd6c482c   Ingo Molnar   perf: Do the big ...
5699
5700
5701
  	struct perf_event_attr attr;
  	struct perf_event_context *ctx;
  	struct file *event_file = NULL;
04289bb98   Ingo Molnar   perf counters: ad...
5702
  	struct file *group_file = NULL;
38a81da22   Matt Helsley   perf events: Clea...
5703
  	struct task_struct *task = NULL;
89a1e1873   Peter Zijlstra   perf: Provide a s...
5704
  	struct pmu *pmu;
ea635c64e   Al Viro   Fix racy use of a...
5705
  	int event_fd;
b04243ef7   Peter Zijlstra   perf: Complete so...
5706
  	int move_group = 0;
04289bb98   Ingo Molnar   perf counters: ad...
5707
  	int fput_needed = 0;
dc86cabe4   Ingo Molnar   perf_counter: Fix...
5708
  	int err;
0793a61d4   Thomas Gleixner   performance count...
5709

2743a5b0f   Paul Mackerras   perfcounters: pro...
5710
  	/* for future expandability... */
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
5711
  	if (flags & ~PERF_FLAG_ALL)
2743a5b0f   Paul Mackerras   perfcounters: pro...
5712
  		return -EINVAL;
dc86cabe4   Ingo Molnar   perf_counter: Fix...
5713
5714
5715
  	err = perf_copy_attr(attr_uptr, &attr);
  	if (err)
  		return err;
eab656ae0   Thomas Gleixner   perf counters: cl...
5716

0764771da   Peter Zijlstra   perf_counter: Mor...
5717
5718
5719
5720
  	if (!attr.exclude_kernel) {
  		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  			return -EACCES;
  	}
df58ab24b   Peter Zijlstra   perf_counter: Ren...
5721
  	if (attr.freq) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
5722
  		if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24b   Peter Zijlstra   perf_counter: Ren...
5723
5724
  			return -EINVAL;
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
5725
5726
5727
5728
5729
5730
5731
5732
  	/*
  	 * In cgroup mode, the pid argument is used to pass the fd
  	 * opened to the cgroup directory in cgroupfs. The cpu argument
  	 * designates the cpu on which to monitor threads from that
  	 * cgroup.
  	 */
  	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
  		return -EINVAL;
ea635c64e   Al Viro   Fix racy use of a...
5733
5734
5735
  	event_fd = get_unused_fd_flags(O_RDWR);
  	if (event_fd < 0)
  		return event_fd;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5736
5737
5738
5739
  	if (group_fd != -1) {
  		group_leader = perf_fget_light(group_fd, &fput_needed);
  		if (IS_ERR(group_leader)) {
  			err = PTR_ERR(group_leader);
d14b12d7a   Stephane Eranian   perf_events: Fix ...
5740
  			goto err_fd;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5741
5742
5743
5744
5745
5746
5747
  		}
  		group_file = group_leader->filp;
  		if (flags & PERF_FLAG_FD_OUTPUT)
  			output_event = group_leader;
  		if (flags & PERF_FLAG_FD_NO_GROUP)
  			group_leader = NULL;
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
5748
  	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5cb   Peter Zijlstra   perf: Find task b...
5749
5750
5751
5752
5753
5754
  		task = find_lively_task_by_vpid(pid);
  		if (IS_ERR(task)) {
  			err = PTR_ERR(task);
  			goto err_group_fd;
  		}
  	}
4dc0da869   Avi Kivity   perf: Add context...
5755
5756
  	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
  				 NULL, NULL);
d14b12d7a   Stephane Eranian   perf_events: Fix ...
5757
5758
  	if (IS_ERR(event)) {
  		err = PTR_ERR(event);
c6be5a5cb   Peter Zijlstra   perf: Find task b...
5759
  		goto err_task;
d14b12d7a   Stephane Eranian   perf_events: Fix ...
5760
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
5761
5762
5763
5764
  	if (flags & PERF_FLAG_PID_CGROUP) {
  		err = perf_cgroup_connect(pid, event, &attr, group_leader);
  		if (err)
  			goto err_alloc;
08309379b   Peter Zijlstra   perf: Fix cgroup ...
5765
5766
5767
5768
5769
5770
5771
  		/*
  		 * one more event:
  		 * - that has cgroup constraint on event->cpu
  		 * - that may need work on context switch
  		 */
  		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
  		jump_label_inc(&perf_sched_events);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
5772
  	}
ccff286d8   Ingo Molnar   perf counters: gr...
5773
  	/*
89a1e1873   Peter Zijlstra   perf: Provide a s...
5774
5775
5776
5777
  	 * Special case software events and allow them to be part of
  	 * any hardware group.
  	 */
  	pmu = event->pmu;
b04243ef7   Peter Zijlstra   perf: Complete so...
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
  
  	if (group_leader &&
  	    (is_software_event(event) != is_software_event(group_leader))) {
  		if (is_software_event(event)) {
  			/*
  			 * If event and group_leader are not both a software
  			 * event, and event is, then group leader is not.
  			 *
  			 * Allow the addition of software events to !software
  			 * groups, this is safe because software events never
  			 * fail to schedule.
  			 */
  			pmu = group_leader->pmu;
  		} else if (is_software_event(group_leader) &&
  			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
  			/*
  			 * In case the group is a pure software group, and we
  			 * try to add a hardware event, move the whole group to
  			 * the hardware context.
  			 */
  			move_group = 1;
  		}
  	}
89a1e1873   Peter Zijlstra   perf: Provide a s...
5801
5802
5803
5804
  
  	/*
  	 * Get the target context (task or percpu):
  	 */
38a81da22   Matt Helsley   perf events: Clea...
5805
  	ctx = find_get_context(pmu, task, cpu);
89a1e1873   Peter Zijlstra   perf: Provide a s...
5806
5807
  	if (IS_ERR(ctx)) {
  		err = PTR_ERR(ctx);
c6be5a5cb   Peter Zijlstra   perf: Find task b...
5808
  		goto err_alloc;
89a1e1873   Peter Zijlstra   perf: Provide a s...
5809
  	}
fd1edb3aa   Peter Zijlstra   perf: Fix task_st...
5810
5811
5812
5813
  	if (task) {
  		put_task_struct(task);
  		task = NULL;
  	}
ccff286d8   Ingo Molnar   perf counters: gr...
5814
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
5815
  	 * Look up the group leader (we will attach this event to it):
04289bb98   Ingo Molnar   perf counters: ad...
5816
  	 */
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5817
  	if (group_leader) {
dc86cabe4   Ingo Molnar   perf_counter: Fix...
5818
  		err = -EINVAL;
04289bb98   Ingo Molnar   perf counters: ad...
5819

04289bb98   Ingo Molnar   perf counters: ad...
5820
  		/*
ccff286d8   Ingo Molnar   perf counters: gr...
5821
5822
5823
5824
  		 * Do not allow a recursive hierarchy (this new sibling
  		 * becoming part of another group-sibling):
  		 */
  		if (group_leader->group_leader != group_leader)
c3f00c702   Peter Zijlstra   perf: Separate fi...
5825
  			goto err_context;
ccff286d8   Ingo Molnar   perf counters: gr...
5826
5827
5828
  		/*
  		 * Do not allow to attach to a group in a different
  		 * task or CPU context:
04289bb98   Ingo Molnar   perf counters: ad...
5829
  		 */
b04243ef7   Peter Zijlstra   perf: Complete so...
5830
5831
5832
5833
5834
5835
5836
  		if (move_group) {
  			if (group_leader->ctx->type != ctx->type)
  				goto err_context;
  		} else {
  			if (group_leader->ctx != ctx)
  				goto err_context;
  		}
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
5837
5838
5839
  		/*
  		 * Only a group leader can be exclusive or pinned
  		 */
0d48696f8   Peter Zijlstra   perf_counter: Ren...
5840
  		if (attr.exclusive || attr.pinned)
c3f00c702   Peter Zijlstra   perf: Separate fi...
5841
  			goto err_context;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5842
5843
5844
5845
5846
  	}
  
  	if (output_event) {
  		err = perf_event_set_output(event, output_event);
  		if (err)
c3f00c702   Peter Zijlstra   perf: Separate fi...
5847
  			goto err_context;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
5848
  	}
0793a61d4   Thomas Gleixner   performance count...
5849

ea635c64e   Al Viro   Fix racy use of a...
5850
5851
5852
  	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
  	if (IS_ERR(event_file)) {
  		err = PTR_ERR(event_file);
c3f00c702   Peter Zijlstra   perf: Separate fi...
5853
  		goto err_context;
ea635c64e   Al Viro   Fix racy use of a...
5854
  	}
9b51f66dc   Ingo Molnar   perfcounters: imp...
5855

b04243ef7   Peter Zijlstra   perf: Complete so...
5856
5857
5858
5859
  	if (move_group) {
  		struct perf_event_context *gctx = group_leader->ctx;
  
  		mutex_lock(&gctx->mutex);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
5860
  		perf_remove_from_context(group_leader);
b04243ef7   Peter Zijlstra   perf: Complete so...
5861
5862
  		list_for_each_entry(sibling, &group_leader->sibling_list,
  				    group_entry) {
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
5863
  			perf_remove_from_context(sibling);
b04243ef7   Peter Zijlstra   perf: Complete so...
5864
5865
5866
5867
  			put_ctx(gctx);
  		}
  		mutex_unlock(&gctx->mutex);
  		put_ctx(gctx);
ea635c64e   Al Viro   Fix racy use of a...
5868
  	}
9b51f66dc   Ingo Molnar   perfcounters: imp...
5869

cdd6c482c   Ingo Molnar   perf: Do the big ...
5870
  	event->filp = event_file;
ad3a37de8   Paul Mackerras   perf_counter: Don...
5871
  	WARN_ON_ONCE(ctx->parent_ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
5872
  	mutex_lock(&ctx->mutex);
b04243ef7   Peter Zijlstra   perf: Complete so...
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
  
  	if (move_group) {
  		perf_install_in_context(ctx, group_leader, cpu);
  		get_ctx(ctx);
  		list_for_each_entry(sibling, &group_leader->sibling_list,
  				    group_entry) {
  			perf_install_in_context(ctx, sibling, cpu);
  			get_ctx(ctx);
  		}
  	}
cdd6c482c   Ingo Molnar   perf: Do the big ...
5883
  	perf_install_in_context(ctx, event, cpu);
ad3a37de8   Paul Mackerras   perf_counter: Don...
5884
  	++ctx->generation;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
5885
  	perf_unpin_context(ctx);
d859e29fe   Paul Mackerras   perf_counter: Add...
5886
  	mutex_unlock(&ctx->mutex);
9b51f66dc   Ingo Molnar   perfcounters: imp...
5887

cdd6c482c   Ingo Molnar   perf: Do the big ...
5888
  	event->owner = current;
8882135bc   Peter Zijlstra   perf: Fix owner-l...
5889

cdd6c482c   Ingo Molnar   perf: Do the big ...
5890
5891
5892
  	mutex_lock(&current->perf_event_mutex);
  	list_add_tail(&event->owner_entry, &current->perf_event_list);
  	mutex_unlock(&current->perf_event_mutex);
082ff5a27   Peter Zijlstra   perf_counter: Cha...
5893

8a49542c0   Peter Zijlstra   perf_events: Fix ...
5894
  	/*
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
5895
5896
5897
  	 * Precalculate sample_data sizes
  	 */
  	perf_event__header_size(event);
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
5898
  	perf_event__id_header_size(event);
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
5899
5900
  
  	/*
8a49542c0   Peter Zijlstra   perf_events: Fix ...
5901
5902
5903
5904
5905
  	 * Drop the reference on the group_event after placing the
  	 * new event on the sibling_list. This ensures destruction
  	 * of the group leader will find the pointer to itself in
  	 * perf_group_detach().
  	 */
ea635c64e   Al Viro   Fix racy use of a...
5906
5907
5908
  	fput_light(group_file, fput_needed);
  	fd_install(event_fd, event_file);
  	return event_fd;
0793a61d4   Thomas Gleixner   performance count...
5909

c3f00c702   Peter Zijlstra   perf: Separate fi...
5910
  err_context:
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
5911
  	perf_unpin_context(ctx);
ea635c64e   Al Viro   Fix racy use of a...
5912
  	put_ctx(ctx);
c6be5a5cb   Peter Zijlstra   perf: Find task b...
5913
  err_alloc:
ea635c64e   Al Viro   Fix racy use of a...
5914
  	free_event(event);
e7d0bc047   Peter Zijlstra   perf: Fix task re...
5915
5916
5917
  err_task:
  	if (task)
  		put_task_struct(task);
89a1e1873   Peter Zijlstra   perf: Provide a s...
5918
  err_group_fd:
dc86cabe4   Ingo Molnar   perf_counter: Fix...
5919
  	fput_light(group_file, fput_needed);
ea635c64e   Al Viro   Fix racy use of a...
5920
5921
  err_fd:
  	put_unused_fd(event_fd);
dc86cabe4   Ingo Molnar   perf_counter: Fix...
5922
  	return err;
0793a61d4   Thomas Gleixner   performance count...
5923
  }
fb0459d75   Arjan van de Ven   perf/core: Provid...
5924
5925
5926
5927
5928
  /**
   * perf_event_create_kernel_counter
   *
   * @attr: attributes of the counter to create
   * @cpu: cpu in which the counter is bound
38a81da22   Matt Helsley   perf events: Clea...
5929
   * @task: task to profile (NULL for percpu)
fb0459d75   Arjan van de Ven   perf/core: Provid...
5930
5931
5932
   */
  struct perf_event *
  perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da22   Matt Helsley   perf events: Clea...
5933
  				 struct task_struct *task,
4dc0da869   Avi Kivity   perf: Add context...
5934
5935
  				 perf_overflow_handler_t overflow_handler,
  				 void *context)
fb0459d75   Arjan van de Ven   perf/core: Provid...
5936
  {
fb0459d75   Arjan van de Ven   perf/core: Provid...
5937
  	struct perf_event_context *ctx;
c3f00c702   Peter Zijlstra   perf: Separate fi...
5938
  	struct perf_event *event;
fb0459d75   Arjan van de Ven   perf/core: Provid...
5939
  	int err;
d859e29fe   Paul Mackerras   perf_counter: Add...
5940

fb0459d75   Arjan van de Ven   perf/core: Provid...
5941
5942
5943
  	/*
  	 * Get the target context (task or percpu):
  	 */
d859e29fe   Paul Mackerras   perf_counter: Add...
5944

4dc0da869   Avi Kivity   perf: Add context...
5945
5946
  	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
  				 overflow_handler, context);
c3f00c702   Peter Zijlstra   perf: Separate fi...
5947
5948
5949
5950
  	if (IS_ERR(event)) {
  		err = PTR_ERR(event);
  		goto err;
  	}
d859e29fe   Paul Mackerras   perf_counter: Add...
5951

38a81da22   Matt Helsley   perf events: Clea...
5952
  	ctx = find_get_context(event->pmu, task, cpu);
c6567f642   Frederic Weisbecker   hw-breakpoints: I...
5953
5954
  	if (IS_ERR(ctx)) {
  		err = PTR_ERR(ctx);
c3f00c702   Peter Zijlstra   perf: Separate fi...
5955
  		goto err_free;
d859e29fe   Paul Mackerras   perf_counter: Add...
5956
  	}
fb0459d75   Arjan van de Ven   perf/core: Provid...
5957
5958
5959
5960
5961
5962
  
  	event->filp = NULL;
  	WARN_ON_ONCE(ctx->parent_ctx);
  	mutex_lock(&ctx->mutex);
  	perf_install_in_context(ctx, event, cpu);
  	++ctx->generation;
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
5963
  	perf_unpin_context(ctx);
fb0459d75   Arjan van de Ven   perf/core: Provid...
5964
  	mutex_unlock(&ctx->mutex);
fb0459d75   Arjan van de Ven   perf/core: Provid...
5965
  	return event;
c3f00c702   Peter Zijlstra   perf: Separate fi...
5966
5967
5968
  err_free:
  	free_event(event);
  err:
c6567f642   Frederic Weisbecker   hw-breakpoints: I...
5969
  	return ERR_PTR(err);
9b51f66dc   Ingo Molnar   perfcounters: imp...
5970
  }
fb0459d75   Arjan van de Ven   perf/core: Provid...
5971
  EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66dc   Ingo Molnar   perfcounters: imp...
5972

cdd6c482c   Ingo Molnar   perf: Do the big ...
5973
  static void sync_child_event(struct perf_event *child_event,
38b200d67   Peter Zijlstra   perf_counter: Add...
5974
  			       struct task_struct *child)
d859e29fe   Paul Mackerras   perf_counter: Add...
5975
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
5976
  	struct perf_event *parent_event = child_event->parent;
8bc209595   Peter Zijlstra   perf_counter: Fix...
5977
  	u64 child_val;
d859e29fe   Paul Mackerras   perf_counter: Add...
5978

cdd6c482c   Ingo Molnar   perf: Do the big ...
5979
5980
  	if (child_event->attr.inherit_stat)
  		perf_event_read_event(child_event, child);
38b200d67   Peter Zijlstra   perf_counter: Add...
5981

b5e58793c   Peter Zijlstra   perf: Add perf_ev...
5982
  	child_val = perf_event_count(child_event);
d859e29fe   Paul Mackerras   perf_counter: Add...
5983
5984
5985
5986
  
  	/*
  	 * Add back the child's count to the parent's count:
  	 */
a6e6dea68   Peter Zijlstra   perf: Add perf_ev...
5987
  	atomic64_add(child_val, &parent_event->child_count);
cdd6c482c   Ingo Molnar   perf: Do the big ...
5988
5989
5990
5991
  	atomic64_add(child_event->total_time_enabled,
  		     &parent_event->child_total_time_enabled);
  	atomic64_add(child_event->total_time_running,
  		     &parent_event->child_total_time_running);
d859e29fe   Paul Mackerras   perf_counter: Add...
5992
5993
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
5994
  	 * Remove this event from the parent's list
d859e29fe   Paul Mackerras   perf_counter: Add...
5995
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
5996
5997
5998
5999
  	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  	mutex_lock(&parent_event->child_mutex);
  	list_del_init(&child_event->child_list);
  	mutex_unlock(&parent_event->child_mutex);
d859e29fe   Paul Mackerras   perf_counter: Add...
6000
6001
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
6002
  	 * Release the parent event, if this was the last
d859e29fe   Paul Mackerras   perf_counter: Add...
6003
6004
  	 * reference to it.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
6005
  	fput(parent_event->filp);
d859e29fe   Paul Mackerras   perf_counter: Add...
6006
  }
9b51f66dc   Ingo Molnar   perfcounters: imp...
6007
  static void
cdd6c482c   Ingo Molnar   perf: Do the big ...
6008
6009
  __perf_event_exit_task(struct perf_event *child_event,
  			 struct perf_event_context *child_ctx,
38b200d67   Peter Zijlstra   perf_counter: Add...
6010
  			 struct task_struct *child)
9b51f66dc   Ingo Molnar   perfcounters: imp...
6011
  {
38b435b16   Peter Zijlstra   perf: Fix tear-do...
6012
6013
6014
6015
6016
  	if (child_event->parent) {
  		raw_spin_lock_irq(&child_ctx->lock);
  		perf_group_detach(child_event);
  		raw_spin_unlock_irq(&child_ctx->lock);
  	}
9b51f66dc   Ingo Molnar   perfcounters: imp...
6017

fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
6018
  	perf_remove_from_context(child_event);
0cc0c027d   Ingo Molnar   perfcounters: rel...
6019

9b51f66dc   Ingo Molnar   perfcounters: imp...
6020
  	/*
38b435b16   Peter Zijlstra   perf: Fix tear-do...
6021
  	 * It can happen that the parent exits first, and has events
9b51f66dc   Ingo Molnar   perfcounters: imp...
6022
  	 * that are still around due to the child reference. These
38b435b16   Peter Zijlstra   perf: Fix tear-do...
6023
  	 * events need to be zapped.
9b51f66dc   Ingo Molnar   perfcounters: imp...
6024
  	 */
38b435b16   Peter Zijlstra   perf: Fix tear-do...
6025
  	if (child_event->parent) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
6026
6027
  		sync_child_event(child_event, child);
  		free_event(child_event);
4bcf349a0   Paul Mackerras   perfcounters: fix...
6028
  	}
9b51f66dc   Ingo Molnar   perfcounters: imp...
6029
  }
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6030
  static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66dc   Ingo Molnar   perfcounters: imp...
6031
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
6032
6033
  	struct perf_event *child_event, *tmp;
  	struct perf_event_context *child_ctx;
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
6034
  	unsigned long flags;
9b51f66dc   Ingo Molnar   perfcounters: imp...
6035

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6036
  	if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
6037
  		perf_event_task(child, NULL, 0);
9b51f66dc   Ingo Molnar   perfcounters: imp...
6038
  		return;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
6039
  	}
9b51f66dc   Ingo Molnar   perfcounters: imp...
6040

a63eaf34a   Paul Mackerras   perf_counter: Dyn...
6041
  	local_irq_save(flags);
ad3a37de8   Paul Mackerras   perf_counter: Don...
6042
6043
6044
6045
6046
6047
  	/*
  	 * We can't reschedule here because interrupts are disabled,
  	 * and either child is current or it is a task that can't be
  	 * scheduled, so we are now safe from rescheduling changing
  	 * our context.
  	 */
806839b22   Oleg Nesterov   perf: perf_event_...
6048
  	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f76690   Paul Mackerras   perf_counter: Fix...
6049
6050
6051
  
  	/*
  	 * Take the context lock here so that if find_get_context is
cdd6c482c   Ingo Molnar   perf: Do the big ...
6052
  	 * reading child->perf_event_ctxp, we wait until it has
c93f76690   Paul Mackerras   perf_counter: Fix...
6053
6054
  	 * incremented the context's refcount before we do put_ctx below.
  	 */
e625cce1b   Thomas Gleixner   perf_event: Conve...
6055
  	raw_spin_lock(&child_ctx->lock);
04dc2dbbf   Peter Zijlstra   perf: Remove task...
6056
  	task_ctx_sched_out(child_ctx);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6057
  	child->perf_event_ctxp[ctxn] = NULL;
71a851b4d   Peter Zijlstra   perf_counter: Sto...
6058
6059
6060
  	/*
  	 * If this context is a clone; unclone it so it can't get
  	 * swapped to another process while we're removing all
cdd6c482c   Ingo Molnar   perf: Do the big ...
6061
  	 * the events from it.
71a851b4d   Peter Zijlstra   perf_counter: Sto...
6062
6063
  	 */
  	unclone_ctx(child_ctx);
5e942bb33   Peter Zijlstra   perf_events: Upda...
6064
  	update_context_time(child_ctx);
e625cce1b   Thomas Gleixner   perf_event: Conve...
6065
  	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
6066
6067
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
6068
6069
6070
  	 * Report the task dead after unscheduling the events so that we
  	 * won't get any samples after PERF_RECORD_EXIT. We can however still
  	 * get a few PERF_RECORD_READ events.
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
6071
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
6072
  	perf_event_task(child, child_ctx, 0);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
6073

66fff2248   Peter Zijlstra   perf_counter: Ann...
6074
6075
6076
  	/*
  	 * We can recurse on the same lock type through:
  	 *
cdd6c482c   Ingo Molnar   perf: Do the big ...
6077
6078
6079
  	 *   __perf_event_exit_task()
  	 *     sync_child_event()
  	 *       fput(parent_event->filp)
66fff2248   Peter Zijlstra   perf_counter: Ann...
6080
6081
6082
6083
6084
  	 *         perf_release()
  	 *           mutex_lock(&ctx->mutex)
  	 *
  	 * But since its the parent context it won't be the same instance.
  	 */
a0507c84b   Peter Zijlstra   perf: Annotate pe...
6085
  	mutex_lock(&child_ctx->mutex);
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
6086

8bc209595   Peter Zijlstra   perf_counter: Fix...
6087
  again:
889ff0150   Frederic Weisbecker   perf/core: Split ...
6088
6089
6090
6091
6092
  	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
  				 group_entry)
  		__perf_event_exit_task(child_event, child_ctx, child);
  
  	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
65abc8653   Ingo Molnar   perf_counter: Ren...
6093
  				 group_entry)
cdd6c482c   Ingo Molnar   perf: Do the big ...
6094
  		__perf_event_exit_task(child_event, child_ctx, child);
8bc209595   Peter Zijlstra   perf_counter: Fix...
6095
6096
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
6097
  	 * If the last event was a group event, it will have appended all
8bc209595   Peter Zijlstra   perf_counter: Fix...
6098
6099
6100
  	 * its siblings to the list, but we obtained 'tmp' before that which
  	 * will still point to the list head terminating the iteration.
  	 */
889ff0150   Frederic Weisbecker   perf/core: Split ...
6101
6102
  	if (!list_empty(&child_ctx->pinned_groups) ||
  	    !list_empty(&child_ctx->flexible_groups))
8bc209595   Peter Zijlstra   perf_counter: Fix...
6103
  		goto again;
a63eaf34a   Paul Mackerras   perf_counter: Dyn...
6104
6105
6106
6107
  
  	mutex_unlock(&child_ctx->mutex);
  
  	put_ctx(child_ctx);
9b51f66dc   Ingo Molnar   perfcounters: imp...
6108
  }
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6109
6110
6111
6112
6113
  /*
   * When a child task exits, feed back event values to parent events.
   */
  void perf_event_exit_task(struct task_struct *child)
  {
8882135bc   Peter Zijlstra   perf: Fix owner-l...
6114
  	struct perf_event *event, *tmp;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6115
  	int ctxn;
8882135bc   Peter Zijlstra   perf: Fix owner-l...
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
  	mutex_lock(&child->perf_event_mutex);
  	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
  				 owner_entry) {
  		list_del_init(&event->owner_entry);
  
  		/*
  		 * Ensure the list deletion is visible before we clear
  		 * the owner, closes a race against perf_release() where
  		 * we need to serialize on the owner->perf_event_mutex.
  		 */
  		smp_wmb();
  		event->owner = NULL;
  	}
  	mutex_unlock(&child->perf_event_mutex);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6130
6131
6132
  	for_each_task_context_nr(ctxn)
  		perf_event_exit_task_context(child, ctxn);
  }
889ff0150   Frederic Weisbecker   perf/core: Split ...
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
  static void perf_free_event(struct perf_event *event,
  			    struct perf_event_context *ctx)
  {
  	struct perf_event *parent = event->parent;
  
  	if (WARN_ON_ONCE(!parent))
  		return;
  
  	mutex_lock(&parent->child_mutex);
  	list_del_init(&event->child_list);
  	mutex_unlock(&parent->child_mutex);
  
  	fput(parent->filp);
8a49542c0   Peter Zijlstra   perf_events: Fix ...
6146
  	perf_group_detach(event);
889ff0150   Frederic Weisbecker   perf/core: Split ...
6147
6148
6149
  	list_del_event(event, ctx);
  	free_event(event);
  }
9b51f66dc   Ingo Molnar   perfcounters: imp...
6150
  /*
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6151
   * free an unexposed, unused context as created by inheritance by
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6152
   * perf_event_init_task below, used by fork() in case of fail.
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6153
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
6154
  void perf_event_free_task(struct task_struct *task)
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6155
  {
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6156
  	struct perf_event_context *ctx;
cdd6c482c   Ingo Molnar   perf: Do the big ...
6157
  	struct perf_event *event, *tmp;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6158
  	int ctxn;
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6159

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6160
6161
6162
6163
  	for_each_task_context_nr(ctxn) {
  		ctx = task->perf_event_ctxp[ctxn];
  		if (!ctx)
  			continue;
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6164

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6165
  		mutex_lock(&ctx->mutex);
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6166
  again:
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6167
6168
6169
  		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
  				group_entry)
  			perf_free_event(event, ctx);
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6170

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6171
6172
6173
  		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
  				group_entry)
  			perf_free_event(event, ctx);
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6174

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6175
6176
6177
  		if (!list_empty(&ctx->pinned_groups) ||
  				!list_empty(&ctx->flexible_groups))
  			goto again;
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6178

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6179
  		mutex_unlock(&ctx->mutex);
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6180

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6181
6182
  		put_ctx(ctx);
  	}
889ff0150   Frederic Weisbecker   perf/core: Split ...
6183
  }
4e231c796   Peter Zijlstra   perf: Fix up dela...
6184
6185
6186
6187
6188
6189
6190
  void perf_event_delayed_put(struct task_struct *task)
  {
  	int ctxn;
  
  	for_each_task_context_nr(ctxn)
  		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
  }
97dee4f32   Peter Zijlstra   perf: Move some c...
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
  /*
   * inherit a event from parent task to child task:
   */
  static struct perf_event *
  inherit_event(struct perf_event *parent_event,
  	      struct task_struct *parent,
  	      struct perf_event_context *parent_ctx,
  	      struct task_struct *child,
  	      struct perf_event *group_leader,
  	      struct perf_event_context *child_ctx)
  {
  	struct perf_event *child_event;
cee010ec5   Peter Zijlstra   perf: Ensure we c...
6203
  	unsigned long flags;
97dee4f32   Peter Zijlstra   perf: Move some c...
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
  
  	/*
  	 * Instead of creating recursive hierarchies of events,
  	 * we link inherited events back to the original parent,
  	 * which has a filp for sure, which we use as the reference
  	 * count:
  	 */
  	if (parent_event->parent)
  		parent_event = parent_event->parent;
  
  	child_event = perf_event_alloc(&parent_event->attr,
  					   parent_event->cpu,
d580ff869   Peter Zijlstra   perf, hw_breakpoi...
6216
  					   child,
97dee4f32   Peter Zijlstra   perf: Move some c...
6217
  					   group_leader, parent_event,
4dc0da869   Avi Kivity   perf: Add context...
6218
  				           NULL, NULL);
97dee4f32   Peter Zijlstra   perf: Move some c...
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
  	if (IS_ERR(child_event))
  		return child_event;
  	get_ctx(child_ctx);
  
  	/*
  	 * Make the child state follow the state of the parent event,
  	 * not its attr.disabled bit.  We hold the parent's mutex,
  	 * so we won't race with perf_event_{en, dis}able_family.
  	 */
  	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
  		child_event->state = PERF_EVENT_STATE_INACTIVE;
  	else
  		child_event->state = PERF_EVENT_STATE_OFF;
  
  	if (parent_event->attr.freq) {
  		u64 sample_period = parent_event->hw.sample_period;
  		struct hw_perf_event *hwc = &child_event->hw;
  
  		hwc->sample_period = sample_period;
  		hwc->last_period   = sample_period;
  
  		local64_set(&hwc->period_left, sample_period);
  	}
  
  	child_event->ctx = child_ctx;
  	child_event->overflow_handler = parent_event->overflow_handler;
4dc0da869   Avi Kivity   perf: Add context...
6245
6246
  	child_event->overflow_handler_context
  		= parent_event->overflow_handler_context;
97dee4f32   Peter Zijlstra   perf: Move some c...
6247
6248
  
  	/*
614b6780e   Thomas Gleixner   perf events: Fix ...
6249
6250
6251
  	 * Precalculate sample_data sizes
  	 */
  	perf_event__header_size(child_event);
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
6252
  	perf_event__id_header_size(child_event);
614b6780e   Thomas Gleixner   perf events: Fix ...
6253
6254
  
  	/*
97dee4f32   Peter Zijlstra   perf: Move some c...
6255
6256
  	 * Link it up in the child's context:
  	 */
cee010ec5   Peter Zijlstra   perf: Ensure we c...
6257
  	raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f32   Peter Zijlstra   perf: Move some c...
6258
  	add_event_to_ctx(child_event, child_ctx);
cee010ec5   Peter Zijlstra   perf: Ensure we c...
6259
  	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f32   Peter Zijlstra   perf: Move some c...
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
  
  	/*
  	 * Get a reference to the parent filp - we will fput it
  	 * when the child event exits. This is safe to do because
  	 * we are in the parent and we know that the filp still
  	 * exists and has a nonzero count:
  	 */
  	atomic_long_inc(&parent_event->filp->f_count);
  
  	/*
  	 * Link this into the parent event's child list
  	 */
  	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  	mutex_lock(&parent_event->child_mutex);
  	list_add_tail(&child_event->child_list, &parent_event->child_list);
  	mutex_unlock(&parent_event->child_mutex);
  
  	return child_event;
  }
  
  static int inherit_group(struct perf_event *parent_event,
  	      struct task_struct *parent,
  	      struct perf_event_context *parent_ctx,
  	      struct task_struct *child,
  	      struct perf_event_context *child_ctx)
  {
  	struct perf_event *leader;
  	struct perf_event *sub;
  	struct perf_event *child_ctr;
  
  	leader = inherit_event(parent_event, parent, parent_ctx,
  				 child, NULL, child_ctx);
  	if (IS_ERR(leader))
  		return PTR_ERR(leader);
  	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
  		child_ctr = inherit_event(sub, parent, parent_ctx,
  					    child, leader, child_ctx);
  		if (IS_ERR(child_ctr))
  			return PTR_ERR(child_ctr);
  	}
  	return 0;
889ff0150   Frederic Weisbecker   perf/core: Split ...
6301
6302
6303
6304
6305
  }
  
  static int
  inherit_task_group(struct perf_event *event, struct task_struct *parent,
  		   struct perf_event_context *parent_ctx,
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6306
  		   struct task_struct *child, int ctxn,
889ff0150   Frederic Weisbecker   perf/core: Split ...
6307
6308
6309
  		   int *inherited_all)
  {
  	int ret;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6310
  	struct perf_event_context *child_ctx;
889ff0150   Frederic Weisbecker   perf/core: Split ...
6311
6312
6313
6314
  
  	if (!event->attr.inherit) {
  		*inherited_all = 0;
  		return 0;
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6315
  	}
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
6316
  	child_ctx = child->perf_event_ctxp[ctxn];
889ff0150   Frederic Weisbecker   perf/core: Split ...
6317
6318
6319
6320
6321
6322
6323
  	if (!child_ctx) {
  		/*
  		 * This is executed from the parent task context, so
  		 * inherit events that have been marked for cloning.
  		 * First allocate and initialize a context for the
  		 * child.
  		 */
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6324

eb1844798   Peter Zijlstra   perf: Clean up pe...
6325
  		child_ctx = alloc_perf_context(event->pmu, child);
889ff0150   Frederic Weisbecker   perf/core: Split ...
6326
6327
  		if (!child_ctx)
  			return -ENOMEM;
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6328

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6329
  		child->perf_event_ctxp[ctxn] = child_ctx;
889ff0150   Frederic Weisbecker   perf/core: Split ...
6330
6331
6332
6333
6334
6335
6336
6337
6338
  	}
  
  	ret = inherit_group(event, parent, parent_ctx,
  			    child, child_ctx);
  
  	if (ret)
  		*inherited_all = 0;
  
  	return ret;
bbbee9082   Peter Zijlstra   perf_counter: Amm...
6339
6340
6341
  }
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
6342
   * Initialize the perf_event context in task_struct
9b51f66dc   Ingo Molnar   perfcounters: imp...
6343
   */
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6344
  int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66dc   Ingo Molnar   perfcounters: imp...
6345
  {
889ff0150   Frederic Weisbecker   perf/core: Split ...
6346
  	struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482c   Ingo Molnar   perf: Do the big ...
6347
6348
  	struct perf_event_context *cloned_ctx;
  	struct perf_event *event;
9b51f66dc   Ingo Molnar   perfcounters: imp...
6349
  	struct task_struct *parent = current;
564c2b210   Paul Mackerras   perf_counter: Opt...
6350
  	int inherited_all = 1;
dddd3379a   Thomas Gleixner   perf: Fix inherit...
6351
  	unsigned long flags;
6ab423e0e   Peter Zijlstra   perf_counter: Pro...
6352
  	int ret = 0;
9b51f66dc   Ingo Molnar   perfcounters: imp...
6353

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6354
  	if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0e   Peter Zijlstra   perf_counter: Pro...
6355
  		return 0;
9b51f66dc   Ingo Molnar   perfcounters: imp...
6356
  	/*
25346b93c   Paul Mackerras   perf_counter: Pro...
6357
6358
  	 * If the parent's context is a clone, pin it so it won't get
  	 * swapped under us.
ad3a37de8   Paul Mackerras   perf_counter: Don...
6359
  	 */
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6360
  	parent_ctx = perf_pin_task_context(parent, ctxn);
25346b93c   Paul Mackerras   perf_counter: Pro...
6361

ad3a37de8   Paul Mackerras   perf_counter: Don...
6362
6363
6364
6365
6366
6367
  	/*
  	 * No need to check if parent_ctx != NULL here; since we saw
  	 * it non-NULL earlier, the only reason for it to become NULL
  	 * is if we exit, and since we're currently in the middle of
  	 * a fork we can't be exiting at the same time.
  	 */
ad3a37de8   Paul Mackerras   perf_counter: Don...
6368
6369
  
  	/*
9b51f66dc   Ingo Molnar   perfcounters: imp...
6370
6371
6372
  	 * Lock the parent list. No need to lock the child - not PID
  	 * hashed yet and not running, so nobody can access it.
  	 */
d859e29fe   Paul Mackerras   perf_counter: Add...
6373
  	mutex_lock(&parent_ctx->mutex);
9b51f66dc   Ingo Molnar   perfcounters: imp...
6374
6375
6376
6377
6378
  
  	/*
  	 * We dont have to disable NMIs - we are only looking at
  	 * the list, not manipulating it:
  	 */
889ff0150   Frederic Weisbecker   perf/core: Split ...
6379
  	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6380
6381
  		ret = inherit_task_group(event, parent, parent_ctx,
  					 child, ctxn, &inherited_all);
889ff0150   Frederic Weisbecker   perf/core: Split ...
6382
6383
6384
  		if (ret)
  			break;
  	}
b93f7978a   Xiao Guangrong   perf_event: Alloc...
6385

dddd3379a   Thomas Gleixner   perf: Fix inherit...
6386
6387
6388
6389
6390
6391
6392
6393
  	/*
  	 * We can't hold ctx->lock when iterating the ->flexible_group list due
  	 * to allocations, but we need to prevent rotation because
  	 * rotate_ctx() will change the list from interrupt context.
  	 */
  	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
  	parent_ctx->rotate_disable = 1;
  	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
889ff0150   Frederic Weisbecker   perf/core: Split ...
6394
  	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6395
6396
  		ret = inherit_task_group(event, parent, parent_ctx,
  					 child, ctxn, &inherited_all);
889ff0150   Frederic Weisbecker   perf/core: Split ...
6397
  		if (ret)
9b51f66dc   Ingo Molnar   perfcounters: imp...
6398
  			break;
564c2b210   Paul Mackerras   perf_counter: Opt...
6399
  	}
dddd3379a   Thomas Gleixner   perf: Fix inherit...
6400
6401
  	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
  	parent_ctx->rotate_disable = 0;
dddd3379a   Thomas Gleixner   perf: Fix inherit...
6402

8dc85d547   Peter Zijlstra   perf: Multiple ta...
6403
  	child_ctx = child->perf_event_ctxp[ctxn];
889ff0150   Frederic Weisbecker   perf/core: Split ...
6404

05cbaa285   Peter Zijlstra   perf: Fix NULL de...
6405
  	if (child_ctx && inherited_all) {
564c2b210   Paul Mackerras   perf_counter: Opt...
6406
6407
6408
  		/*
  		 * Mark the child context as a clone of the parent
  		 * context, or of whatever the parent is a clone of.
c5ed51455   Peter Zijlstra   perf: Fix context...
6409
6410
6411
  		 *
  		 * Note that if the parent is a clone, the holding of
  		 * parent_ctx->lock avoids it from being uncloned.
564c2b210   Paul Mackerras   perf_counter: Opt...
6412
  		 */
c5ed51455   Peter Zijlstra   perf: Fix context...
6413
  		cloned_ctx = parent_ctx->parent_ctx;
ad3a37de8   Paul Mackerras   perf_counter: Don...
6414
6415
  		if (cloned_ctx) {
  			child_ctx->parent_ctx = cloned_ctx;
25346b93c   Paul Mackerras   perf_counter: Pro...
6416
  			child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b210   Paul Mackerras   perf_counter: Opt...
6417
6418
6419
6420
6421
  		} else {
  			child_ctx->parent_ctx = parent_ctx;
  			child_ctx->parent_gen = parent_ctx->generation;
  		}
  		get_ctx(child_ctx->parent_ctx);
9b51f66dc   Ingo Molnar   perfcounters: imp...
6422
  	}
c5ed51455   Peter Zijlstra   perf: Fix context...
6423
  	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29fe   Paul Mackerras   perf_counter: Add...
6424
  	mutex_unlock(&parent_ctx->mutex);
6ab423e0e   Peter Zijlstra   perf_counter: Pro...
6425

25346b93c   Paul Mackerras   perf_counter: Pro...
6426
  	perf_unpin_context(parent_ctx);
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
6427
  	put_ctx(parent_ctx);
ad3a37de8   Paul Mackerras   perf_counter: Don...
6428

6ab423e0e   Peter Zijlstra   perf_counter: Pro...
6429
  	return ret;
9b51f66dc   Ingo Molnar   perfcounters: imp...
6430
  }
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6431
6432
6433
6434
6435
6436
  /*
   * Initialize the perf_event context in task_struct
   */
  int perf_event_init_task(struct task_struct *child)
  {
  	int ctxn, ret;
8550d7cb6   Oleg Nesterov   perf: Fix perf_ev...
6437
6438
6439
  	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
  	mutex_init(&child->perf_event_mutex);
  	INIT_LIST_HEAD(&child->perf_event_list);
8dc85d547   Peter Zijlstra   perf: Multiple ta...
6440
6441
6442
6443
6444
6445
6446
6447
  	for_each_task_context_nr(ctxn) {
  		ret = perf_event_init_context(child, ctxn);
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
220b140b5   Paul Mackerras   perf_event: Fix o...
6448
6449
  static void __init perf_event_init_all_cpus(void)
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
6450
  	struct swevent_htable *swhash;
220b140b5   Paul Mackerras   perf_event: Fix o...
6451
  	int cpu;
220b140b5   Paul Mackerras   perf_event: Fix o...
6452
6453
  
  	for_each_possible_cpu(cpu) {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
6454
6455
  		swhash = &per_cpu(swevent_htable, cpu);
  		mutex_init(&swhash->hlist_mutex);
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
6456
  		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
220b140b5   Paul Mackerras   perf_event: Fix o...
6457
6458
  	}
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
6459
  static void __cpuinit perf_event_init_cpu(int cpu)
0793a61d4   Thomas Gleixner   performance count...
6460
  {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6461
  	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d4   Thomas Gleixner   performance count...
6462

b28ab83c5   Peter Zijlstra   perf: Remove the ...
6463
6464
  	mutex_lock(&swhash->hlist_mutex);
  	if (swhash->hlist_refcount > 0) {
76e1d9047   Frederic Weisbecker   perf: Store activ...
6465
  		struct swevent_hlist *hlist;
b28ab83c5   Peter Zijlstra   perf: Remove the ...
6466
6467
6468
  		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
  		WARN_ON(!hlist);
  		rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d9047   Frederic Weisbecker   perf: Store activ...
6469
  	}
b28ab83c5   Peter Zijlstra   perf: Remove the ...
6470
  	mutex_unlock(&swhash->hlist_mutex);
0793a61d4   Thomas Gleixner   performance count...
6471
  }
c277443cf   Peter Zijlstra   perf: Stop all co...
6472
  #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
6473
  static void perf_pmu_rotate_stop(struct pmu *pmu)
0793a61d4   Thomas Gleixner   performance count...
6474
  {
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
6475
6476
6477
6478
6479
6480
  	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  
  	WARN_ON(!irqs_disabled());
  
  	list_del_init(&cpuctx->rotation_list);
  }
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6481
  static void __perf_event_exit_context(void *__info)
0793a61d4   Thomas Gleixner   performance count...
6482
  {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6483
  	struct perf_event_context *ctx = __info;
cdd6c482c   Ingo Molnar   perf: Do the big ...
6484
  	struct perf_event *event, *tmp;
0793a61d4   Thomas Gleixner   performance count...
6485

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6486
  	perf_pmu_rotate_stop(ctx->pmu);
b5ab4cd56   Peter Zijlstra   perf: Per cpu-con...
6487

889ff0150   Frederic Weisbecker   perf/core: Split ...
6488
  	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
6489
  		__perf_remove_from_context(event);
889ff0150   Frederic Weisbecker   perf/core: Split ...
6490
  	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
fe4b04fa3   Peter Zijlstra   perf: Cure task_o...
6491
  		__perf_remove_from_context(event);
0793a61d4   Thomas Gleixner   performance count...
6492
  }
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6493
6494
6495
6496
6497
6498
6499
6500
6501
  
  static void perf_event_exit_cpu_context(int cpu)
  {
  	struct perf_event_context *ctx;
  	struct pmu *pmu;
  	int idx;
  
  	idx = srcu_read_lock(&pmus_srcu);
  	list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c9   Peter Zijlstra   perf: Fix perf_ev...
6502
  		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6503
6504
6505
6506
6507
6508
  
  		mutex_lock(&ctx->mutex);
  		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
  		mutex_unlock(&ctx->mutex);
  	}
  	srcu_read_unlock(&pmus_srcu, idx);
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6509
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
6510
  static void perf_event_exit_cpu(int cpu)
0793a61d4   Thomas Gleixner   performance count...
6511
  {
b28ab83c5   Peter Zijlstra   perf: Remove the ...
6512
  	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29fe   Paul Mackerras   perf_counter: Add...
6513

b28ab83c5   Peter Zijlstra   perf: Remove the ...
6514
6515
6516
  	mutex_lock(&swhash->hlist_mutex);
  	swevent_hlist_release(swhash);
  	mutex_unlock(&swhash->hlist_mutex);
76e1d9047   Frederic Weisbecker   perf: Store activ...
6517

108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
6518
  	perf_event_exit_cpu_context(cpu);
0793a61d4   Thomas Gleixner   performance count...
6519
6520
  }
  #else
cdd6c482c   Ingo Molnar   perf: Do the big ...
6521
  static inline void perf_event_exit_cpu(int cpu) { }
0793a61d4   Thomas Gleixner   performance count...
6522
  #endif
c277443cf   Peter Zijlstra   perf: Stop all co...
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
  static int
  perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
  {
  	int cpu;
  
  	for_each_online_cpu(cpu)
  		perf_event_exit_cpu(cpu);
  
  	return NOTIFY_OK;
  }
  
  /*
   * Run the perf reboot notifier at the very last possible moment so that
   * the generic watchdog code runs as long as possible.
   */
  static struct notifier_block perf_reboot_notifier = {
  	.notifier_call = perf_reboot,
  	.priority = INT_MIN,
  };
0793a61d4   Thomas Gleixner   performance count...
6542
6543
6544
6545
  static int __cpuinit
  perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  {
  	unsigned int cpu = (long)hcpu;
5e11637e2   Peter Zijlstra   perf: Fix CPU hot...
6546
  	switch (action & ~CPU_TASKS_FROZEN) {
0793a61d4   Thomas Gleixner   performance count...
6547
6548
  
  	case CPU_UP_PREPARE:
5e11637e2   Peter Zijlstra   perf: Fix CPU hot...
6549
  	case CPU_DOWN_FAILED:
cdd6c482c   Ingo Molnar   perf: Do the big ...
6550
  		perf_event_init_cpu(cpu);
0793a61d4   Thomas Gleixner   performance count...
6551
  		break;
5e11637e2   Peter Zijlstra   perf: Fix CPU hot...
6552
  	case CPU_UP_CANCELED:
0793a61d4   Thomas Gleixner   performance count...
6553
  	case CPU_DOWN_PREPARE:
cdd6c482c   Ingo Molnar   perf: Do the big ...
6554
  		perf_event_exit_cpu(cpu);
0793a61d4   Thomas Gleixner   performance count...
6555
6556
6557
6558
6559
6560
6561
6562
  		break;
  
  	default:
  		break;
  	}
  
  	return NOTIFY_OK;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
6563
  void __init perf_event_init(void)
0793a61d4   Thomas Gleixner   performance count...
6564
  {
3c502e7a0   Jason Wessel   perf,hw_breakpoin...
6565
  	int ret;
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
6566
  	idr_init(&pmu_idr);
220b140b5   Paul Mackerras   perf_event: Fix o...
6567
  	perf_event_init_all_cpus();
b0a873ebb   Peter Zijlstra   perf: Register PM...
6568
  	init_srcu_struct(&pmus_srcu);
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
6569
6570
6571
  	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
  	perf_pmu_register(&perf_cpu_clock, NULL, -1);
  	perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873ebb   Peter Zijlstra   perf: Register PM...
6572
6573
  	perf_tp_register();
  	perf_cpu_notifier(perf_cpu_notify);
c277443cf   Peter Zijlstra   perf: Stop all co...
6574
  	register_reboot_notifier(&perf_reboot_notifier);
3c502e7a0   Jason Wessel   perf,hw_breakpoin...
6575
6576
6577
  
  	ret = init_hw_breakpoint();
  	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
0793a61d4   Thomas Gleixner   performance count...
6578
  }
abe434005   Peter Zijlstra   perf: Sysfs enume...
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
  
  static int __init perf_event_sysfs_init(void)
  {
  	struct pmu *pmu;
  	int ret;
  
  	mutex_lock(&pmus_lock);
  
  	ret = bus_register(&pmu_bus);
  	if (ret)
  		goto unlock;
  
  	list_for_each_entry(pmu, &pmus, entry) {
  		if (!pmu->name || pmu->type < 0)
  			continue;
  
  		ret = pmu_dev_alloc(pmu);
  		WARN(ret, "Failed to register pmu: %s, reason %d
  ", pmu->name, ret);
  	}
  	pmu_bus_running = 1;
  	ret = 0;
  
  unlock:
  	mutex_unlock(&pmus_lock);
  
  	return ret;
  }
  device_initcall(perf_event_sysfs_init);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6608
6609
6610
6611
6612
6613
  
  #ifdef CONFIG_CGROUP_PERF
  static struct cgroup_subsys_state *perf_cgroup_create(
  	struct cgroup_subsys *ss, struct cgroup *cont)
  {
  	struct perf_cgroup *jc;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6614

1b15d0558   Li Zefan   perf cgroup: Clea...
6615
  	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6616
6617
  	if (!jc)
  		return ERR_PTR(-ENOMEM);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6618
6619
6620
6621
6622
  	jc->info = alloc_percpu(struct perf_cgroup_info);
  	if (!jc->info) {
  		kfree(jc);
  		return ERR_PTR(-ENOMEM);
  	}
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
  	return &jc->css;
  }
  
  static void perf_cgroup_destroy(struct cgroup_subsys *ss,
  				struct cgroup *cont)
  {
  	struct perf_cgroup *jc;
  	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
  			  struct perf_cgroup, css);
  	free_percpu(jc->info);
  	kfree(jc);
  }
  
  static int __perf_cgroup_move(void *info)
  {
  	struct task_struct *task = info;
  	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
  	return 0;
  }
74c355fbd   Peter Zijlstra   perf, cgroups: Fi...
6642
6643
  static void
  perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6644
6645
6646
  {
  	task_function_call(task, __perf_cgroup_move, task);
  }
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
  static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
  		struct cgroup *old_cgrp, struct task_struct *task)
  {
  	/*
  	 * cgroup_exit() is called in the copy_process() failure path.
  	 * Ignore this case since the task hasn't ran yet, this avoids
  	 * trying to poke a half freed task state from generic code.
  	 */
  	if (!(task->flags & PF_EXITING))
  		return;
74c355fbd   Peter Zijlstra   perf, cgroups: Fi...
6657
  	perf_cgroup_attach_task(cgrp, task);
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6658
6659
6660
  }
  
  struct cgroup_subsys perf_subsys = {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
6661
6662
6663
6664
6665
  	.name		= "perf_event",
  	.subsys_id	= perf_subsys_id,
  	.create		= perf_cgroup_create,
  	.destroy	= perf_cgroup_destroy,
  	.exit		= perf_cgroup_exit,
74c355fbd   Peter Zijlstra   perf, cgroups: Fi...
6666
  	.attach_task	= perf_cgroup_attach_task,
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
6667
6668
  };
  #endif /* CONFIG_CGROUP_PERF */