Blame view
include/linux/ftrace_event.h
11.1 KB
97f202515
|
1 2 |
#ifndef _LINUX_FTRACE_EVENT_H #define _LINUX_FTRACE_EVENT_H |
97f202515
|
3 |
#include <linux/ring_buffer.h> |
16bb8eb1b
|
4 |
#include <linux/trace_seq.h> |
be74b73a5
|
5 |
#include <linux/percpu.h> |
20ab4425a
|
6 |
#include <linux/hardirq.h> |
430ad5a60
|
7 |
#include <linux/perf_event.h> |
97f202515
|
8 9 |
struct trace_array; |
12883efb6
|
10 |
struct trace_buffer; |
97f202515
|
11 |
struct tracer; |
6d723736e
|
12 |
struct dentry; |
97f202515
|
13 |
|
be74b73a5
|
14 15 16 17 |
struct trace_print_flags { unsigned long mask; const char *name; }; |
2fc1b6f0d
|
18 19 20 21 |
struct trace_print_flags_u64 { unsigned long long mask; const char *name; }; |
be74b73a5
|
22 23 24 |
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, const struct trace_print_flags *flag_array); |
0f4fc29dd
|
25 26 |
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, const struct trace_print_flags *symbol_array); |
2fc1b6f0d
|
27 28 29 30 31 32 |
#if BITS_PER_LONG == 32 const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, const struct trace_print_flags_u64 *symbol_array); #endif |
5a2e39959
|
33 34 |
const char *ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int len); |
f71130de5
|
35 36 37 38 39 |
struct trace_iterator; struct trace_event; int ftrace_raw_output_prep(struct trace_iterator *iter, struct trace_event *event); |
97f202515
|
40 41 42 43 44 45 46 |
/* * The trace entry - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: * * bash-15816 [01] 235.197585: idle_cpu <- irq_enter */ struct trace_entry { |
89ec0dee9
|
47 |
unsigned short type; |
97f202515
|
48 49 50 |
unsigned char flags; unsigned char preempt_count; int pid; |
97f202515
|
51 |
}; |
89ec0dee9
|
52 53 |
#define FTRACE_MAX_EVENT \ ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) |
97f202515
|
54 55 56 57 58 59 60 |
/* * Trace iterator - used by printout routines who present trace * results to users and which routines might sleep, etc: */ struct trace_iterator { struct trace_array *tr; struct tracer *trace; |
12883efb6
|
61 |
struct trace_buffer *trace_buffer; |
97f202515
|
62 63 64 |
void *private; int cpu_file; struct mutex mutex; |
6d158a813
|
65 |
struct ring_buffer_iter **buffer_iter; |
112f38a7e
|
66 |
unsigned long iter_flags; |
97f202515
|
67 |
|
bc289ae98
|
68 69 |
/* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; |
ed5467da0
|
70 71 72 73 |
cpumask_var_t started; /* it's true when current open file is snapshot */ bool snapshot; |
97f202515
|
74 75 76 |
/* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; |
bc21b4784
|
77 |
unsigned long lost_events; |
a63ce5b30
|
78 |
int leftover; |
4a9bd3f13
|
79 |
int ent_size; |
97f202515
|
80 81 |
int cpu; u64 ts; |
97f202515
|
82 83 |
loff_t pos; long idx; |
ed5467da0
|
84 |
/* All new field here will be zeroed out in pipe_read */ |
97f202515
|
85 |
}; |
8be0709f1
|
86 87 88 89 90 |
enum trace_iter_flags { TRACE_FILE_LAT_FMT = 1, TRACE_FILE_ANNOTATE = 2, TRACE_FILE_TIME_IN_NS = 4, }; |
97f202515
|
91 92 |
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, |
a9a577638
|
93 94 95 |
int flags, struct trace_event *event); struct trace_event_functions { |
97f202515
|
96 97 98 99 100 |
trace_print_func trace; trace_print_func raw; trace_print_func hex; trace_print_func binary; }; |
a9a577638
|
101 102 103 104 105 106 |
struct trace_event { struct hlist_node node; struct list_head list; int type; struct trace_event_functions *funcs; }; |
97f202515
|
107 108 109 110 111 112 113 114 115 116 |
extern int register_ftrace_event(struct trace_event *event); extern int unregister_ftrace_event(struct trace_event *event); /* Return values for print_line callback */ enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; |
f413cdb80
|
117 118 119 |
void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc); |
ccb469a19
|
120 121 122 123 124 125 126 |
struct ftrace_event_file; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, struct ftrace_event_file *ftrace_file, int type, unsigned long len, unsigned long flags, int pc); |
97f202515
|
127 |
struct ring_buffer_event * |
e77405ad8
|
128 129 |
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, int type, unsigned long len, |
97f202515
|
130 |
unsigned long flags, int pc); |
e77405ad8
|
131 132 |
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, |
97f202515
|
133 |
unsigned long flags, int pc); |
0d5c6e1c1
|
134 135 136 137 138 139 140 |
void trace_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc); void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs); |
e77405ad8
|
141 142 |
void trace_current_buffer_discard_commit(struct ring_buffer *buffer, struct ring_buffer_event *event); |
97f202515
|
143 144 |
void tracing_record_cmdline(struct task_struct *tsk); |
1f9963cbb
|
145 |
struct event_filter; |
2239291ae
|
146 147 148 |
enum trace_reg { TRACE_REG_REGISTER, TRACE_REG_UNREGISTER, |
37d739989
|
149 |
#ifdef CONFIG_PERF_EVENTS |
2239291ae
|
150 151 |
TRACE_REG_PERF_REGISTER, TRACE_REG_PERF_UNREGISTER, |
ceec0b6fc
|
152 153 |
TRACE_REG_PERF_OPEN, TRACE_REG_PERF_CLOSE, |
489c75c3b
|
154 155 |
TRACE_REG_PERF_ADD, TRACE_REG_PERF_DEL, |
37d739989
|
156 |
#endif |
2239291ae
|
157 158 159 |
}; struct ftrace_event_call; |
8f0820183
|
160 161 |
struct ftrace_event_class { char *system; |
2239291ae
|
162 163 164 165 166 |
void *probe; #ifdef CONFIG_PERF_EVENTS void *perf_probe; #endif int (*reg)(struct ftrace_event_call *event, |
ceec0b6fc
|
167 |
enum trace_reg type, void *data); |
2e33af029
|
168 169 170 |
int (*define_fields)(struct ftrace_event_call *); struct list_head *(*get_fields)(struct ftrace_event_call *); struct list_head fields; |
0405ab80a
|
171 |
int (*raw_init)(struct ftrace_event_call *); |
8f0820183
|
172 |
}; |
a1d0ce821
|
173 |
extern int ftrace_event_reg(struct ftrace_event_call *event, |
ceec0b6fc
|
174 |
enum trace_reg type, void *data); |
a1d0ce821
|
175 |
|
553552ce1
|
176 |
enum { |
553552ce1
|
177 |
TRACE_EVENT_FL_FILTERED_BIT, |
61c32659b
|
178 |
TRACE_EVENT_FL_CAP_ANY_BIT, |
27b14b56a
|
179 |
TRACE_EVENT_FL_NO_SET_FILTER_BIT, |
9b63776fa
|
180 |
TRACE_EVENT_FL_IGNORE_ENABLE_BIT, |
575380da8
|
181 |
TRACE_EVENT_FL_WAS_ENABLED_BIT, |
553552ce1
|
182 |
}; |
ae63b31e4
|
183 184 185 186 187 |
/* * Event flags: * FILTERED - The event has a filter attached * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored |
2a30c11f6
|
188 |
* IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file |
575380da8
|
189 190 191 |
* WAS_ENABLED - Set and stays set when an event was ever enabled * (used for module unloading, if a module event is enabled, * it is best to clear the buffers that used it). |
ae63b31e4
|
192 |
*/ |
553552ce1
|
193 |
enum { |
e870e9a12
|
194 |
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), |
61c32659b
|
195 |
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), |
27b14b56a
|
196 |
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), |
9b63776fa
|
197 |
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), |
575380da8
|
198 |
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), |
553552ce1
|
199 |
}; |
97f202515
|
200 |
struct ftrace_event_call { |
a59fd6027
|
201 |
struct list_head list; |
8f0820183
|
202 |
struct ftrace_event_class *class; |
97f202515
|
203 |
char *name; |
80decc70a
|
204 |
struct trace_event event; |
509e760cd
|
205 |
const char *print_fmt; |
1f9963cbb
|
206 |
struct event_filter *filter; |
ae63b31e4
|
207 |
struct list_head *files; |
6d723736e
|
208 |
void *mod; |
69fd4f0eb
|
209 |
void *data; |
57d01ad09
|
210 211 212 213 214 215 216 |
/* * bit 0: filter_active * bit 1: allow trace by non root (cap any) * bit 2: failed to apply filter * bit 3: ftrace internal event (do not enable) * bit 4: Event was enabled by module */ |
ae63b31e4
|
217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
int flags; /* static flags of different events */ #ifdef CONFIG_PERF_EVENTS int perf_refcount; struct hlist_head __percpu *perf_events; #endif }; struct trace_array; struct ftrace_subsystem_dir; enum { FTRACE_EVENT_FL_ENABLED_BIT, FTRACE_EVENT_FL_RECORDED_CMD_BIT, |
417944c4c
|
231 232 |
FTRACE_EVENT_FL_SOFT_MODE_BIT, FTRACE_EVENT_FL_SOFT_DISABLED_BIT, |
ae63b31e4
|
233 234 235 236 |
}; /* * Ftrace event file flags: |
57d01ad09
|
237 |
* ENABLED - The event is enabled |
ae63b31e4
|
238 |
* RECORDED_CMD - The comms should be recorded at sched_switch |
417944c4c
|
239 240 241 |
* SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED * SOFT_DISABLED - When set, do not trace the event (even though its * tracepoint may be enabled) |
ae63b31e4
|
242 243 244 245 |
*/ enum { FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), |
417944c4c
|
246 247 |
FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), |
ae63b31e4
|
248 249 250 251 252 253 254 255 |
}; struct ftrace_event_file { struct list_head list; struct ftrace_event_call *event_call; struct dentry *dir; struct trace_array *tr; struct ftrace_subsystem_dir *system; |
97f202515
|
256 |
|
553552ce1
|
257 258 |
/* * 32 bit flags: |
57d01ad09
|
259 260 |
* bit 0: enabled * bit 1: enabled cmd record |
417944c4c
|
261 262 |
* bit 2: enable/disable with the soft disable bit * bit 3: soft disabled |
553552ce1
|
263 |
* |
417944c4c
|
264 265 266 |
* Note: The bits must be set atomically to prevent races * from other writers. Reads of flags do not need to be in * sync as they occur in critical sections. But the way flags |
ae63b31e4
|
267 |
* is currently used, these changes do not affect the code |
1eaa4787a
|
268 269 |
* except that when a change is made, it may have a slight * delay in propagating the changes to other CPUs due to |
417944c4c
|
270 |
* caching and such. Which is mostly OK ;-) |
553552ce1
|
271 |
*/ |
417944c4c
|
272 |
unsigned long flags; |
1cf4c0732
|
273 |
atomic_t sm_ref; /* soft-mode reference counter */ |
97f202515
|
274 |
}; |
53cf810b1
|
275 276 277 278 279 280 281 |
#define __TRACE_EVENT_FLAGS(name, value) \ static int __init trace_init_flags_##name(void) \ { \ event_##name.flags = value; \ return 0; \ } \ early_initcall(trace_init_flags_##name); |
97d5a2200
|
282 |
#define PERF_MAX_TRACE_SIZE 2048 |
20ab4425a
|
283 |
|
16bb8eb1b
|
284 |
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
97f202515
|
285 |
|
2df75e415
|
286 |
extern void destroy_preds(struct ftrace_event_call *call); |
6fb2915df
|
287 |
extern int filter_match_preds(struct event_filter *filter, void *rec); |
e77405ad8
|
288 289 |
extern int filter_current_check_discard(struct ring_buffer *buffer, struct ftrace_event_call *call, |
97f202515
|
290 291 |
void *rec, struct ring_buffer_event *event); |
43b51ead3
|
292 293 294 295 |
enum { FILTER_OTHER = 0, FILTER_STATIC_STRING, FILTER_DYN_STRING, |
87a342f5d
|
296 |
FILTER_PTR_STRING, |
02aa3162e
|
297 |
FILTER_TRACE_FN, |
43b51ead3
|
298 |
}; |
042957801
|
299 300 301 |
#define EVENT_STORAGE_SIZE 128 extern struct mutex event_storage_mutex; extern char event_storage[EVENT_STORAGE_SIZE]; |
87d9b4e1c
|
302 |
extern int trace_event_raw_init(struct ftrace_event_call *call); |
aeaeae118
|
303 304 305 |
extern int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type); |
bd1a5c849
|
306 |
extern int trace_add_event_call(struct ftrace_event_call *call); |
2816c551c
|
307 |
extern int trace_remove_event_call(struct ftrace_event_call *call); |
97f202515
|
308 |
|
d2802d073
|
309 |
#define is_signed_type(type) (((type)(-1)) < (type)1) |
97f202515
|
310 |
|
4671c7940
|
311 |
int trace_set_clr_event(const char *system, const char *event, int set); |
97f202515
|
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
/* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a * constant. Even with the outer if statement optimizing out. */ #define event_trace_printk(ip, fmt, args...) \ do { \ __trace_printk_check_format(fmt, ##args); \ tracing_record_cmdline(current); \ if (__builtin_constant_p(fmt)) { \ static const char *trace_printk_fmt \ __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __trace_bprintk(ip, trace_printk_fmt, ##args); \ } else \ __trace_printk(ip, fmt, ##args); \ } while (0) |
07b139c8c
|
330 |
#ifdef CONFIG_PERF_EVENTS |
6fb2915df
|
331 |
struct perf_event; |
c530665c3
|
332 333 |
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
1c024eca5
|
334 335 |
extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); |
a4eaf7f14
|
336 337 |
extern int perf_trace_add(struct perf_event *event, int flags); extern void perf_trace_del(struct perf_event *event, int flags); |
1c024eca5
|
338 |
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
6fb2915df
|
339 340 |
char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); |
b7e2ecef9
|
341 342 |
extern void *perf_trace_buf_prepare(int size, unsigned short type, struct pt_regs *regs, int *rctxp); |
430ad5a60
|
343 344 |
static inline void |
97d5a2200
|
345 |
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
e6dab5ffa
|
346 347 |
u64 count, struct pt_regs *regs, void *head, struct task_struct *task) |
430ad5a60
|
348 |
{ |
e6dab5ffa
|
349 |
perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); |
430ad5a60
|
350 |
} |
6fb2915df
|
351 |
#endif |
97f202515
|
352 |
#endif /* _LINUX_FTRACE_EVENT_H */ |