Blame view

kernel/events/internal.h 4.42 KB
76369139c   Frederic Weisbecker   perf: Split up bu...
1
2
  #ifndef _KERNEL_EVENTS_INTERNAL_H
  #define _KERNEL_EVENTS_INTERNAL_H
9251f904f   Borislav Petkov   perf: Carve out c...
3
  #include <linux/hardirq.h>
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
4
  #include <linux/uaccess.h>
9251f904f   Borislav Petkov   perf: Carve out c...
5
6
  
  /* Buffer handling */
76369139c   Frederic Weisbecker   perf: Split up bu...
7
8
9
10
11
12
13
14
15
16
  #define RING_BUFFER_WRITABLE		0x01
  
  struct ring_buffer {
  	atomic_t			refcount;
  	struct rcu_head			rcu_head;
  #ifdef CONFIG_PERF_USE_VMALLOC
  	struct work_struct		work;
  	int				page_order;	/* allocation order  */
  #endif
  	int				nr_pages;	/* nr of data pages  */
dd9c086d9   Stephane Eranian   perf: Fix ring_bu...
17
  	int				overwrite;	/* can overwrite itself */
76369139c   Frederic Weisbecker   perf: Split up bu...
18
19
20
21
22
23
24
25
26
27
  
  	atomic_t			poll;		/* POLL_ for wakeups */
  
  	local_t				head;		/* write position    */
  	local_t				nest;		/* nested writers    */
  	local_t				events;		/* event limit       */
  	local_t				wakeup;		/* wakeup stamp      */
  	local_t				lost;		/* nr records lost   */
  
  	long				watermark;	/* wakeup watermark  */
10c6db110   Peter Zijlstra   perf: Fix loss of...
28
29
30
  	/* poll crap */
  	spinlock_t			event_lock;
  	struct list_head		event_list;
76369139c   Frederic Weisbecker   perf: Split up bu...
31

9bb5d40cd   Peter Zijlstra   perf: Fix mmap() ...
32
33
  	atomic_t			mmap_count;
  	unsigned long			mmap_locked;
26cb63ad1   Peter Zijlstra   perf: Fix perf mm...
34
  	struct user_struct		*mmap_user;
76369139c   Frederic Weisbecker   perf: Split up bu...
35
36
37
  	struct perf_event_mmap_page	*user_page;
  	void				*data_pages[0];
  };
76369139c   Frederic Weisbecker   perf: Split up bu...
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  extern void rb_free(struct ring_buffer *rb);
  extern struct ring_buffer *
  rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  extern void perf_event_wakeup(struct perf_event *event);
  
  extern void
  perf_event_header__init_id(struct perf_event_header *header,
  			   struct perf_sample_data *data,
  			   struct perf_event *event);
  extern void
  perf_event__output_id_sample(struct perf_event *event,
  			     struct perf_output_handle *handle,
  			     struct perf_sample_data *sample);
  
  extern struct page *
  perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  
  #ifdef CONFIG_PERF_USE_VMALLOC
  /*
   * Back perf_mmap() with vmalloc memory.
   *
   * Required for architectures that have d-cache aliasing issues.
   */
  
  static inline int page_order(struct ring_buffer *rb)
  {
  	return rb->page_order;
  }
  
  #else
  
  static inline int page_order(struct ring_buffer *rb)
  {
  	return 0;
  }
  #endif
9251f904f   Borislav Petkov   perf: Carve out c...
74
  static inline unsigned long perf_data_size(struct ring_buffer *rb)
76369139c   Frederic Weisbecker   perf: Split up bu...
75
76
77
  {
  	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  }
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
78
  #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
79
  static inline unsigned long						\
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
80
  func_name(struct perf_output_handle *handle,				\
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
81
  	  const void *buf, unsigned long len)				\
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
82
83
84
85
  {									\
  	unsigned long size, written;					\
  									\
  	do {								\
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
86
  		size    = min(handle->size, len);			\
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
87
  		written = memcpy_func(handle->addr, buf, size);		\
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
88
  		written = size - written;				\
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  									\
  		len -= written;						\
  		handle->addr += written;				\
  		buf += written;						\
  		handle->size -= written;				\
  		if (!handle->size) {					\
  			struct ring_buffer *rb = handle->rb;		\
  									\
  			handle->page++;					\
  			handle->page &= rb->nr_pages - 1;		\
  			handle->addr = rb->data_pages[handle->page];	\
  			handle->size = PAGE_SIZE << page_order(rb);	\
  		}							\
  	} while (len && written == size);				\
  									\
  	return len;							\
  }
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
106
107
  static inline unsigned long
  memcpy_common(void *dst, const void *src, unsigned long n)
76369139c   Frederic Weisbecker   perf: Split up bu...
108
  {
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
109
  	memcpy(dst, src, n);
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
110
  	return 0;
76369139c   Frederic Weisbecker   perf: Split up bu...
111
  }
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
112
  DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
113
114
115
116
117
  static inline unsigned long
  memcpy_skip(void *dst, const void *src, unsigned long n)
  {
  	return 0;
  }
5685e0ff4   Jiri Olsa   perf: Add perf_ou...
118

0a196848c   Peter Zijlstra   perf: Fix arch_pe...
119
  DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
5685e0ff4   Jiri Olsa   perf: Add perf_ou...
120

91d7753a4   Frederic Weisbecker   perf: Factor __ou...
121
  #ifndef arch_perf_out_copy_user
0a196848c   Peter Zijlstra   perf: Fix arch_pe...
122
123
124
125
126
127
128
129
130
131
132
133
134
  #define arch_perf_out_copy_user arch_perf_out_copy_user
  
  static inline unsigned long
  arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  {
  	unsigned long ret;
  
  	pagefault_disable();
  	ret = __copy_from_user_inatomic(dst, src, n);
  	pagefault_enable();
  
  	return ret;
  }
91d7753a4   Frederic Weisbecker   perf: Factor __ou...
135
136
137
  #endif
  
  DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
9251f904f   Borislav Petkov   perf: Carve out c...
138
  /* Callchain handling */
e6dab5ffa   Andrew Vagin   perf/trace: Add a...
139
140
  extern struct perf_callchain_entry *
  perf_callchain(struct perf_event *event, struct pt_regs *regs);
9251f904f   Borislav Petkov   perf: Carve out c...
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  extern int get_callchain_buffers(void);
  extern void put_callchain_buffers(void);
  
  static inline int get_recursion_context(int *recursion)
  {
  	int rctx;
  
  	if (in_nmi())
  		rctx = 3;
  	else if (in_irq())
  		rctx = 2;
  	else if (in_softirq())
  		rctx = 1;
  	else
  		rctx = 0;
  
  	if (recursion[rctx])
  		return -1;
  
  	recursion[rctx]++;
  	barrier();
  
  	return rctx;
  }
  
  static inline void put_recursion_context(int *recursion, int rctx)
  {
  	barrier();
  	recursion[rctx]--;
  }
c5ebcedb5   Jiri Olsa   perf: Add ability...
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
  #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  static inline bool arch_perf_have_user_stack_dump(void)
  {
  	return true;
  }
  
  #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  #else
  static inline bool arch_perf_have_user_stack_dump(void)
  {
  	return false;
  }
  
  #define perf_user_stack_pointer(regs) 0
  #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
76369139c   Frederic Weisbecker   perf: Split up bu...
186
  #endif /* _KERNEL_EVENTS_INTERNAL_H */