Blame view
kernel/trace/trace.h
67 KB
bb730b583 tracing: Fix SPDX... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
bac5fb97a tracing: Add and ... |
2 |
|
bc0c38d13 ftrace: latency t... |
3 4 5 6 |
#ifndef _LINUX_KERNEL_TRACE_H #define _LINUX_KERNEL_TRACE_H #include <linux/fs.h> |
60063497a atomic: use <linu... |
7 |
#include <linux/atomic.h> |
bc0c38d13 ftrace: latency t... |
8 9 |
#include <linux/sched.h> #include <linux/clocksource.h> |
3928a8a2d ftrace: make work... |
10 |
#include <linux/ring_buffer.h> |
bd8ac686c ftrace: mmiotrace... |
11 |
#include <linux/mmiotrace.h> |
4e5292ea1 tracing: use the ... |
12 |
#include <linux/tracepoint.h> |
d13744cd6 tracing/ftrace: a... |
13 |
#include <linux/ftrace.h> |
2d6425af6 tracing: Declare ... |
14 |
#include <linux/trace.h> |
24f1e32c6 hw-breakpoints: R... |
15 |
#include <linux/hw_breakpoint.h> |
9504504cb tracing: make tra... |
16 |
#include <linux/trace_seq.h> |
af658dca2 tracing: Rename f... |
17 |
#include <linux/trace_events.h> |
52f5684c8 kernel: use macro... |
18 |
#include <linux/compiler.h> |
60f1d5e3b ftrace: Support f... |
19 |
#include <linux/glob.h> |
91edde2e6 ftrace: Implement... |
20 21 |
#include <linux/irq_work.h> #include <linux/workqueue.h> |
42d120e2d tracing: Move is_... |
22 |
#include <linux/ctype.h> |
9504504cb tracing: make tra... |
23 |
|
12ab74ee0 tracing: Make sys... |
24 25 26 27 |
#ifdef CONFIG_FTRACE_SYSCALLS #include <asm/unistd.h> /* For NR_SYSCALLS */ #include <asm/syscall.h> /* some archs define it here */ #endif |
72829bc3d ftrace: move enum... |
28 29 30 31 32 33 34 |
enum trace_type { __TRACE_FIRST_TYPE = 0, TRACE_FN, TRACE_CTX, TRACE_WAKE, TRACE_STACK, |
dd0e545f0 ftrace: printk fo... |
35 |
TRACE_PRINT, |
48ead0203 tracing/core: bri... |
36 |
TRACE_BPRINT, |
bd8ac686c ftrace: mmiotrace... |
37 38 |
TRACE_MMIO_RW, TRACE_MMIO_MAP, |
9f029e83e ftrace: rename un... |
39 |
TRACE_BRANCH, |
287b6e68c tracing/function-... |
40 41 |
TRACE_GRAPH_RET, TRACE_GRAPH_ENT, |
02b67518e tracing: add supp... |
42 |
TRACE_USER_STACK, |
c71a89615 blktrace: add ftr... |
43 |
TRACE_BLK, |
09ae72348 tracing: Add trac... |
44 |
TRACE_BPUTS, |
e7c15cd8a tracing: Added ha... |
45 |
TRACE_HWLAT, |
fa32e8557 tracing: Add new ... |
46 |
TRACE_RAW_DATA, |
72829bc3d ftrace: move enum... |
47 |
|
f0868d1e2 ftrace: set up tr... |
48 |
__TRACE_LAST_TYPE, |
72829bc3d ftrace: move enum... |
49 |
}; |
bc0c38d13 ftrace: latency t... |
50 |
|
0a1c49db8 tracing: use macr... |
51 52 |
#undef __field #define __field(type, item) type item; |
86387f7ee ftrace: add stack... |
53 |
|
04ae87a52 ftrace: Rework ev... |
54 55 |
#undef __field_fn #define __field_fn(type, item) type item; |
d73150943 tracing: show det... |
56 57 |
#undef __field_struct #define __field_struct(type, item) __field(type, item) |
86387f7ee ftrace: add stack... |
58 |
|
d73150943 tracing: show det... |
59 60 |
#undef __field_desc #define __field_desc(type, container, item) |
02b67518e tracing: add supp... |
61 |
|
4649079b9 tracing: Make ftr... |
62 63 |
#undef __field_packed #define __field_packed(type, container, item) |
0a1c49db8 tracing: use macr... |
64 65 |
#undef __array #define __array(type, item, size) type item[size]; |
1427cdf05 tracing: infrastr... |
66 |
|
d73150943 tracing: show det... |
67 68 |
#undef __array_desc #define __array_desc(type, container, item, size) |
777e208d4 ftrace: take adva... |
69 |
|
0a1c49db8 tracing: use macr... |
70 71 |
#undef __dynamic_array #define __dynamic_array(type, item) type item[]; |
777e208d4 ftrace: take adva... |
72 |
|
0a1c49db8 tracing: use macr... |
73 74 |
#undef F_STRUCT #define F_STRUCT(args...) args |
742390728 tracing/fastboot:... |
75 |
|
0a1c49db8 tracing: use macr... |
76 |
#undef FTRACE_ENTRY |
04ae87a52 ftrace: Rework ev... |
77 |
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
02aa3162e ftrace: Allow to ... |
78 79 80 |
struct struct_name { \ struct trace_entry ent; \ tstruct \ |
0a1c49db8 tracing: use macr... |
81 |
} |
777e208d4 ftrace: take adva... |
82 |
|
0a1c49db8 tracing: use macr... |
83 |
#undef FTRACE_ENTRY_DUP |
04ae87a52 ftrace: Rework ev... |
84 |
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
1e9b51c28 x86, bts, ftrace:... |
85 |
|
e59a0bff3 ftrace: Add FTRAC... |
86 |
#undef FTRACE_ENTRY_REG |
04ae87a52 ftrace: Rework ev... |
87 88 |
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
e59a0bff3 ftrace: Add FTRAC... |
89 |
|
a4a551b8f ftrace: Reduce si... |
90 |
#undef FTRACE_ENTRY_PACKED |
04ae87a52 ftrace: Rework ev... |
91 92 |
#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed |
a4a551b8f ftrace: Reduce si... |
93 |
|
0a1c49db8 tracing: use macr... |
94 |
#include "trace_entries.h" |
36994e58a tracing/kmemtrace... |
95 |
|
24589e3a2 tracing: Use pr_e... |
96 97 |
/* Use this for memory failure errors */ #define MEM_FAIL(condition, fmt, ...) ({ \ |
33def8498 treewide: Convert... |
98 |
static bool __section(".data.once") __warned; \ |
24589e3a2 tracing: Use pr_e... |
99 100 101 102 103 104 105 106 |
int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ __warned = true; \ pr_err("ERROR: " fmt, ##__VA_ARGS__); \ } \ unlikely(__ret_warn_once); \ }) |
0a1c49db8 tracing: use macr... |
107 108 109 110 |
/* * syscalls are special, and need special handling, this is why * they are not included in trace_entries.h */ |
bed1ffca0 tracing/syscalls:... |
111 112 113 114 115 116 117 118 119 |
struct syscall_trace_enter { struct trace_entry ent; int nr; unsigned long args[]; }; struct syscall_trace_exit { struct trace_entry ent; int nr; |
99df5a6a2 trace/syscalls: C... |
120 |
long ret; |
bed1ffca0 tracing/syscalls:... |
121 |
}; |
93ccae7a2 tracing/kprobes: ... |
122 |
struct kprobe_trace_entry_head { |
413d37d1e tracing: Add kpro... |
123 124 |
struct trace_entry ent; unsigned long ip; |
413d37d1e tracing: Add kpro... |
125 |
}; |
93ccae7a2 tracing/kprobes: ... |
126 |
struct kretprobe_trace_entry_head { |
413d37d1e tracing: Add kpro... |
127 128 129 |
struct trace_entry ent; unsigned long func; unsigned long ret_ip; |
413d37d1e tracing: Add kpro... |
130 |
}; |
dd0e545f0 ftrace: printk fo... |
131 |
/* |
fc5e27ae4 mmiotrace: handle... |
132 133 |
* trace_flag_type is an enumeration that holds different * states when a trace occurs. These are: |
9244489a7 ftrace: handle ar... |
134 |
* IRQS_OFF - interrupts were disabled |
9de36825b tracing: trace_bp... |
135 |
* IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
bd9cfca9c tracing: format c... |
136 |
* NEED_RESCHED - reschedule is requested |
9244489a7 ftrace: handle ar... |
137 138 |
* HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler |
fc5e27ae4 mmiotrace: handle... |
139 140 141 |
*/ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, |
9244489a7 ftrace: handle ar... |
142 143 144 145 |
TRACE_FLAG_IRQS_NOSUPPORT = 0x02, TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, |
e5137b50a ftrace, sched: Ad... |
146 |
TRACE_FLAG_PREEMPT_RESCHED = 0x20, |
7e6867bf8 tracing: Record a... |
147 |
TRACE_FLAG_NMI = 0x40, |
fc5e27ae4 mmiotrace: handle... |
148 |
}; |
5bf9a1ee3 ftrace: inject ma... |
149 |
#define TRACE_BUF_SIZE 1024 |
bc0c38d13 ftrace: latency t... |
150 |
|
2b6080f28 tracing: Encapsul... |
151 |
struct trace_array; |
bc0c38d13 ftrace: latency t... |
152 153 154 155 156 157 |
/* * The CPU trace array - it consists of thousands of trace entries * plus some other descriptor data: (for example which task started * the trace, etc.) */ struct trace_array_cpu { |
bc0c38d13 ftrace: latency t... |
158 |
atomic_t disabled; |
2cadf9135 tracing: add bina... |
159 |
void *buffer_page; /* ring buffer spare */ |
4e3c3333f ftrace: fix time ... |
160 |
|
438ced172 ring-buffer: Add ... |
161 |
unsigned long entries; |
bc0c38d13 ftrace: latency t... |
162 163 164 165 166 167 168 |
unsigned long saved_latency; unsigned long critical_start; unsigned long critical_end; unsigned long critical_sequence; unsigned long nice; unsigned long policy; unsigned long rt_priority; |
2f26ebd54 tracing: use time... |
169 |
unsigned long skipped_entries; |
a5a1d1c29 clocksource: Use ... |
170 |
u64 preempt_timestamp; |
bc0c38d13 ftrace: latency t... |
171 |
pid_t pid; |
d20b92ab6 userns: Teach tra... |
172 |
kuid_t uid; |
bc0c38d13 ftrace: latency t... |
173 |
char comm[TASK_COMM_LEN]; |
3fdaf80f4 tracing: Implemen... |
174 |
|
345ddcc88 ftrace: Have set_... |
175 |
#ifdef CONFIG_FUNCTION_TRACER |
717e3f5eb ftrace: Make func... |
176 |
int ftrace_ignore_pid; |
345ddcc88 ftrace: Have set_... |
177 |
#endif |
717e3f5eb ftrace: Make func... |
178 |
bool ignore_pid; |
bc0c38d13 ftrace: latency t... |
179 |
}; |
2b6080f28 tracing: Encapsul... |
180 |
struct tracer; |
37aea98b8 tracing: Add trac... |
181 |
struct trace_option_dentry; |
2b6080f28 tracing: Encapsul... |
182 |
|
1c5eb4481 tracing: Rename t... |
183 |
struct array_buffer { |
12883efb6 tracing: Consolid... |
184 |
struct trace_array *tr; |
132924943 tracing: Make str... |
185 |
struct trace_buffer *buffer; |
12883efb6 tracing: Consolid... |
186 |
struct trace_array_cpu __percpu *data; |
a5a1d1c29 clocksource: Use ... |
187 |
u64 time_start; |
12883efb6 tracing: Consolid... |
188 189 |
int cpu; }; |
9a38a8856 tracing: Add a me... |
190 |
#define TRACE_FLAGS_MAX_SIZE 32 |
37aea98b8 tracing: Add trac... |
191 192 193 194 |
struct trace_options { struct tracer *tracer; struct trace_option_dentry *topts; }; |
490901078 tracing: Add set_... |
195 |
struct trace_pid_list { |
f4d34a87e tracing: Use pid ... |
196 197 |
int pid_max; unsigned long *pids; |
490901078 tracing: Add set_... |
198 |
}; |
276836260 tracing: Create s... |
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
enum { TRACE_PIDS = BIT(0), TRACE_NO_PIDS = BIT(1), }; static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, struct trace_pid_list *no_pid_list) { /* Return true if the pid list in type has pids */ return ((type & TRACE_PIDS) && pid_list) || ((type & TRACE_NO_PIDS) && no_pid_list); } static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, struct trace_pid_list *no_pid_list) { /* * Turning off what is in @type, return true if the "other" * pid list, still has pids in it. */ return (!(type & TRACE_PIDS) && pid_list) || (!(type & TRACE_NO_PIDS) && no_pid_list); } |
a35873a09 tracing: Add cond... |
222 223 224 225 226 227 228 229 230 231 232 233 |
typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); /** * struct cond_snapshot - conditional snapshot data and callback * * The cond_snapshot structure encapsulates a callback function and * data associated with the snapshot for a given tracing instance. * * When a snapshot is taken conditionally, by invoking * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is * passed in turn to the cond_snapshot.update() function. That data * can be compared by the update() implementation with the cond_data |
499f7bb08 tracing: Fix some... |
234 |
* contained within the struct cond_snapshot instance associated with |
a35873a09 tracing: Add cond... |
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 |
* the trace_array. Because the tr->max_lock is held throughout the * update() call, the update() function can directly retrieve the * cond_snapshot and cond_data associated with the per-instance * snapshot associated with the trace_array. * * The cond_snapshot.update() implementation can save data to be * associated with the snapshot if it decides to, and returns 'true' * in that case, or it returns 'false' if the conditional snapshot * shouldn't be taken. * * The cond_snapshot instance is created and associated with the * user-defined cond_data by tracing_cond_snapshot_enable(). * Likewise, the cond_snapshot instance is destroyed and is no longer * associated with the trace instance by * tracing_cond_snapshot_disable(). * * The method below is required. * * @update: When a conditional snapshot is invoked, the update() * callback function is invoked with the tr->max_lock held. The * update() implementation signals whether or not to actually * take the snapshot, by returning 'true' if so, 'false' if no * snapshot should be taken. Because the max_lock is held for * the duration of update(), the implementation is safe to |
499f7bb08 tracing: Fix some... |
259 |
* directly retrieved and save any implementation data it needs |
a35873a09 tracing: Add cond... |
260 261 262 263 264 265 |
* to in association with the snapshot. */ struct cond_snapshot { void *cond_data; cond_update_fn_t update; }; |
bc0c38d13 ftrace: latency t... |
266 267 268 269 270 271 |
/* * The trace array - an array of per-CPU trace arrays. This is the * highest level data structure that individual tracers deal with. * They have on/off state as well: */ struct trace_array { |
ae63b31e4 tracing: Separate... |
272 |
struct list_head list; |
277ba0446 tracing: Add inte... |
273 |
char *name; |
1c5eb4481 tracing: Rename t... |
274 |
struct array_buffer array_buffer; |
12883efb6 tracing: Consolid... |
275 276 277 278 279 280 281 |
#ifdef CONFIG_TRACER_MAX_TRACE /* * The max_buffer is used to snapshot the trace when a maximum * latency is reached, or when the user initiates a snapshot. * Some tracers will use this to store a maximum trace while * it continues examining live traces. * |
1c5eb4481 tracing: Rename t... |
282 |
* The buffers for the max_buffer are set up the same as the array_buffer |
12883efb6 tracing: Consolid... |
283 |
* When a snapshot is taken, the buffer of the max_buffer is swapped |
1c5eb4481 tracing: Rename t... |
284 285 |
* with the buffer of the array_buffer and the buffers are reset for * the array_buffer so the tracing can continue. |
12883efb6 tracing: Consolid... |
286 |
*/ |
1c5eb4481 tracing: Rename t... |
287 |
struct array_buffer max_buffer; |
45ad21ca5 tracing: Have tra... |
288 |
bool allocated_snapshot; |
f971cc9aa tracing: Have max... |
289 290 |
#endif #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
6d9b3fa5e tracing: Move tra... |
291 |
unsigned long max_latency; |
91edde2e6 ftrace: Implement... |
292 293 294 295 296 |
#ifdef CONFIG_FSNOTIFY struct dentry *d_max_latency; struct work_struct fsnotify_work; struct irq_work fsnotify_irqwork; #endif |
12883efb6 tracing: Consolid... |
297 |
#endif |
490901078 tracing: Add set_... |
298 |
struct trace_pid_list __rcu *filtered_pids; |
276836260 tracing: Create s... |
299 |
struct trace_pid_list __rcu *filtered_no_pids; |
0b9b12c1b tracing: Move ftr... |
300 301 302 303 304 305 306 307 308 309 310 311 312 313 |
/* * max_lock is used to protect the swapping of buffers * when taking a max snapshot. The buffers themselves are * protected by per_cpu spinlocks. But the action of the swap * needs its own lock. * * This is defined as a arch_spinlock_t in order to help * with performance when lockdep debugging is enabled. * * It is also used in other places outside the update_max_tr * so it needs to be defined outside of the * CONFIG_TRACER_MAX_TRACE. */ arch_spinlock_t max_lock; |
499e54705 tracing/ring-buff... |
314 |
int buffer_disabled; |
12ab74ee0 tracing: Make sys... |
315 316 317 |
#ifdef CONFIG_FTRACE_SYSCALLS int sys_refcount_enter; int sys_refcount_exit; |
7f1d2f821 tracing: Rename f... |
318 319 |
struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; |
12ab74ee0 tracing: Make sys... |
320 |
#endif |
2b6080f28 tracing: Encapsul... |
321 322 |
int stop_count; int clock_id; |
37aea98b8 tracing: Add trac... |
323 |
int nr_topts; |
065e63f95 tracing: Only hav... |
324 |
bool clear_trace; |
03329f993 tracing: Add trac... |
325 |
int buffer_percent; |
2f754e771 tracing: Have the... |
326 |
unsigned int n_err_log_entries; |
2b6080f28 tracing: Encapsul... |
327 |
struct tracer *current_trace; |
983f938ae tracing: Move tra... |
328 |
unsigned int trace_flags; |
9a38a8856 tracing: Add a me... |
329 |
unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; |
ae63b31e4 tracing: Separate... |
330 |
unsigned int flags; |
2b6080f28 tracing: Encapsul... |
331 |
raw_spinlock_t start_lock; |
2f754e771 tracing: Have the... |
332 |
struct list_head err_log; |
ae63b31e4 tracing: Separate... |
333 |
struct dentry *dir; |
2b6080f28 tracing: Encapsul... |
334 335 |
struct dentry *options; struct dentry *percpu_dir; |
ae63b31e4 tracing: Separate... |
336 |
struct dentry *event_dir; |
37aea98b8 tracing: Add trac... |
337 |
struct trace_options *topts; |
ae63b31e4 tracing: Separate... |
338 339 |
struct list_head systems; struct list_head events; |
3dd809536 tracing: Add trig... |
340 |
struct trace_event_file *trace_marker_file; |
ccfe9e42e tracing: Make tra... |
341 |
cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
a695cb581 tracing: Prevent ... |
342 |
int ref; |
7ef282e05 tracing: Move pip... |
343 |
int trace_ref; |
f20a58062 ftrace: Allow ins... |
344 345 |
#ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops *ops; |
345ddcc88 ftrace: Have set_... |
346 |
struct trace_pid_list __rcu *function_pids; |
b3b1e6ede ftrace: Create se... |
347 |
struct trace_pid_list __rcu *function_no_pids; |
04ec7bb64 tracing: Have the... |
348 |
#ifdef CONFIG_DYNAMIC_FTRACE |
673feb9d7 ftrace: Add :mod:... |
349 |
/* All of these are protected by the ftrace_lock */ |
04ec7bb64 tracing: Have the... |
350 |
struct list_head func_probes; |
673feb9d7 ftrace: Add :mod:... |
351 352 |
struct list_head mod_trace; struct list_head mod_notrace; |
04ec7bb64 tracing: Have the... |
353 |
#endif |
f20a58062 ftrace: Allow ins... |
354 355 356 |
/* function tracing enabled */ int function_enabled; #endif |
00b414529 ring-buffer: Add ... |
357 |
int time_stamp_abs_ref; |
067fe038e tracing: Add vari... |
358 |
struct list_head hist_vars; |
a35873a09 tracing: Add cond... |
359 360 361 |
#ifdef CONFIG_TRACER_SNAPSHOT struct cond_snapshot *cond_snapshot; #endif |
bc0c38d13 ftrace: latency t... |
362 |
}; |
ae63b31e4 tracing: Separate... |
363 364 365 366 367 |
enum { TRACE_ARRAY_FL_GLOBAL = (1 << 0) }; extern struct list_head ftrace_trace_arrays; |
a82274151 tracing: Protect ... |
368 |
extern struct mutex trace_types_lock; |
8e2e2fa47 tracing: Add trac... |
369 |
extern int trace_array_get(struct trace_array *tr); |
8530dec63 tracing: Add trac... |
370 |
extern int tracing_check_open_get_tr(struct trace_array *tr); |
89c95fcef tracing: Add trac... |
371 372 |
extern struct trace_array *trace_array_find(const char *instance); extern struct trace_array *trace_array_find_get(const char *instance); |
8e2e2fa47 tracing: Add trac... |
373 |
|
00b414529 ring-buffer: Add ... |
374 |
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); |
d71bd34d7 tracing: Make tra... |
375 |
extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); |
00b414529 ring-buffer: Add ... |
376 |
|
860f9f6b0 tracing: Add usec... |
377 |
extern bool trace_clock_in_ns(struct trace_array *tr); |
ae63b31e4 tracing: Separate... |
378 379 380 381 382 383 384 |
/* * The global tracer (top) should be the first trace array added, * but we check the flag anyway. */ static inline struct trace_array *top_trace_array(void) { struct trace_array *tr; |
da9c3413a tracing: Fix chec... |
385 |
if (list_empty(&ftrace_trace_arrays)) |
dc81e5e3a tracing: Return e... |
386 |
return NULL; |
ae63b31e4 tracing: Separate... |
387 388 389 390 391 |
tr = list_entry(ftrace_trace_arrays.prev, typeof(*tr), list); WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); return tr; } |
7104f300c ftrace: type cast... |
392 393 394 395 |
#define FTRACE_CMP_TYPE(var, type) \ __builtin_types_compatible_p(typeof(var), type *) #undef IF_ASSIGN |
968e51709 tracing: Fix clan... |
396 397 398 399 400 |
#define IF_ASSIGN(var, entry, etype, id) \ if (FTRACE_CMP_TYPE(var, etype)) { \ var = (typeof(var))(entry); \ WARN_ON(id != 0 && (entry)->type != id); \ break; \ |
7104f300c ftrace: type cast... |
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 |
} /* Will cause compile errors if type is not found. */ extern void __ftrace_bad_type(void); /* * The trace_assign_type is a verifier that the entry type is * the same as the type being assigned. To add new types simply * add a line with the following format: * * IF_ASSIGN(var, ent, type, id); * * Where "type" is the trace type that includes the trace_entry * as the "ent" item. And "id" is the trace identifier that is * used in the trace_type enum. * * If the type can have more than one id, then use zero. */ #define trace_assign_type(var, ent) \ do { \ IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
7104f300c ftrace: type cast... |
423 |
IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518e tracing: add supp... |
424 |
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300c ftrace: type cast... |
425 |
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
48ead0203 tracing/core: bri... |
426 |
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
09ae72348 tracing: Add trac... |
427 |
IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ |
e7c15cd8a tracing: Added ha... |
428 |
IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ |
fa32e8557 tracing: Add new ... |
429 |
IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ |
7104f300c ftrace: type cast... |
430 431 432 433 |
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ TRACE_MMIO_RW); \ IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ TRACE_MMIO_MAP); \ |
9f029e83e ftrace: rename un... |
434 |
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68c tracing/function-... |
435 436 437 438 |
IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ TRACE_GRAPH_ENT); \ IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ TRACE_GRAPH_RET); \ |
7104f300c ftrace: type cast... |
439 440 |
__ftrace_bad_type(); \ } while (0) |
2c4f035f6 tracing/ftrace: c... |
441 |
|
adf9f1957 tracing/ftrace: i... |
442 443 444 445 446 447 |
/* * An option specific to a tracer. This is a boolean value. * The bit is the bit index that sets its value on the * flags value in struct tracer_flags. */ struct tracer_opt { |
9de36825b tracing: trace_bp... |
448 449 |
const char *name; /* Will appear on the trace_options file */ u32 bit; /* Mask assigned in val field in tracer_flags */ |
adf9f1957 tracing/ftrace: i... |
450 451 452 453 454 455 456 457 |
}; /* * The set of specific options for a tracer. Your tracer * have to set the initial value of the flags val. */ struct tracer_flags { u32 val; |
9de36825b tracing: trace_bp... |
458 |
struct tracer_opt *opts; |
d39cdd203 tracing: Make tra... |
459 |
struct tracer *trace; |
adf9f1957 tracing/ftrace: i... |
460 461 462 463 |
}; /* Makes more easy to define a tracer opt */ #define TRACER_OPT(s, b) .name = #s, .bit = b |
034939b65 tracing/ftrace: h... |
464 |
|
41d9c0bec tracing: Always s... |
465 466 467 468 469 470 |
struct trace_option_dentry { struct tracer_opt *opt; struct tracer_flags *flags; struct trace_array *tr; struct dentry *entry; }; |
6eaaa5d57 tracing/core: use... |
471 |
/** |
8434dc934 tracing: Convert ... |
472 |
* struct tracer - a specific tracer and its callbacks to interact with tracefs |
6eaaa5d57 tracing/core: use... |
473 474 475 |
* @name: the name chosen to select it on the available_tracers file * @init: called when one switches to this tracer (echo name > current_tracer) * @reset: called when one switches to another tracer |
05a724bd4 tracing: Fix comm... |
476 477 |
* @start: called when tracing is unpaused (echo 1 > tracing_on) * @stop: called when tracing is paused (echo 0 > tracing_on) |
6508fa761 tracing: let user... |
478 |
* @update_thresh: called when tracing_thresh is updated |
6eaaa5d57 tracing/core: use... |
479 480 |
* @open: called when the trace file is opened * @pipe_open: called when the trace_pipe file is opened |
6eaaa5d57 tracing/core: use... |
481 |
* @close: called when the trace file is released |
c521efd17 tracing: Add pipe... |
482 |
* @pipe_close: called when the trace_pipe file is released |
6eaaa5d57 tracing/core: use... |
483 484 485 486 487 488 489 |
* @read: override the default read callback on trace_pipe * @splice_read: override the default splice_read callback on trace_pipe * @selftest: selftest to run on boot (see trace_selftest.c) * @print_headers: override the first lines that describe your columns * @print_line: callback that prints a trace * @set_flag: signals one of your private flags changed (trace_options file) * @flags: your private flags |
bc0c38d13 ftrace: latency t... |
490 491 492 |
*/ struct tracer { const char *name; |
1c80025a4 tracing/ftrace: c... |
493 |
int (*init)(struct trace_array *tr); |
bc0c38d13 ftrace: latency t... |
494 |
void (*reset)(struct trace_array *tr); |
9036990d4 ftrace: restructu... |
495 496 |
void (*start)(struct trace_array *tr); void (*stop)(struct trace_array *tr); |
6508fa761 tracing: let user... |
497 |
int (*update_thresh)(struct trace_array *tr); |
bc0c38d13 ftrace: latency t... |
498 |
void (*open)(struct trace_iterator *iter); |
107bad8be ftrace: add trace... |
499 |
void (*pipe_open)(struct trace_iterator *iter); |
bc0c38d13 ftrace: latency t... |
500 |
void (*close)(struct trace_iterator *iter); |
c521efd17 tracing: Add pipe... |
501 |
void (*pipe_close)(struct trace_iterator *iter); |
107bad8be ftrace: add trace... |
502 503 504 |
ssize_t (*read)(struct trace_iterator *iter, struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos); |
3c56819b1 tracing: splice s... |
505 506 507 508 509 510 |
ssize_t (*splice_read)(struct trace_iterator *iter, struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); |
60a11774b ftrace: add self-... |
511 512 513 514 |
#ifdef CONFIG_FTRACE_STARTUP_TEST int (*selftest)(struct tracer *trace, struct trace_array *tr); #endif |
8bba1bf5e x86, ftrace: call... |
515 |
void (*print_header)(struct seq_file *m); |
2c4f035f6 tracing/ftrace: c... |
516 |
enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f1957 tracing/ftrace: i... |
517 |
/* If you handled the flag setting, return 0 */ |
8c1a49aed tracing: Pass tra... |
518 519 |
int (*set_flag)(struct trace_array *tr, u32 old_flags, u32 bit, int set); |
613f04a0f tracing: Prevent ... |
520 |
/* Return 0 if OK with change, else return non-zero */ |
bf6065b5c tracing: Pass tra... |
521 |
int (*flag_changed)(struct trace_array *tr, |
613f04a0f tracing: Prevent ... |
522 |
u32 mask, int set); |
bc0c38d13 ftrace: latency t... |
523 |
struct tracer *next; |
9de36825b tracing: trace_bp... |
524 |
struct tracer_flags *flags; |
50512ab57 tracing: Convert ... |
525 |
int enabled; |
f43c738bf tracing: Change t... |
526 |
bool print_max; |
607e2ea16 tracing: Set up i... |
527 |
bool allow_instances; |
12883efb6 tracing: Consolid... |
528 |
#ifdef CONFIG_TRACER_MAX_TRACE |
f43c738bf tracing: Change t... |
529 |
bool use_max_tr; |
12883efb6 tracing: Consolid... |
530 |
#endif |
c7b3ae0bd tracing: Ignore m... |
531 532 |
/* True if tracer cannot be enabled in kernel param */ bool noboot; |
bc0c38d13 ftrace: latency t... |
533 |
}; |
f9520750c tracing: make tra... |
534 |
|
e4a3f541f tracing: Still tr... |
535 |
/* Only current can touch trace_recursion */ |
e4a3f541f tracing: Still tr... |
536 |
|
edc15cafc tracing: Avoid un... |
537 538 539 540 541 542 543 544 545 546 547 548 549 |
/* * For function tracing recursion: * The order of these bits are important. * * When function tracing occurs, the following steps are made: * If arch does not support a ftrace feature: * call internal function (uses INTERNAL bits) which calls... * If callback is registered to the "global" list, the list * function is called and recursion checks the GLOBAL bits. * then this function calls... * The function callback, which can use the FTRACE bits to * check for recursion. * |
499f7bb08 tracing: Fix some... |
550 |
* Now if the arch does not support a feature, and it calls |
edc15cafc tracing: Avoid un... |
551 552 553 554 555 556 557 558 559 560 561 |
* the global list function which calls the ftrace callback * all three of these steps will do a recursion protection. * There's no reason to do one if the previous caller already * did. The recursion that we are protecting against will * go through the same steps again. * * To prevent the multiple recursion checks, if a recursion * bit is set that is higher than the MAX bit of the current * check, then we know that the check was made by the previous * caller, and we can skip the current check. */ |
e46cbf75c tracing: Make the... |
562 |
enum { |
5412e0b76 tracing: Remove u... |
563 |
/* Function recursion bits */ |
567cd4da5 ring-buffer: User... |
564 |
TRACE_FTRACE_BIT, |
edc15cafc tracing: Avoid un... |
565 566 567 |
TRACE_FTRACE_NMI_BIT, TRACE_FTRACE_IRQ_BIT, TRACE_FTRACE_SIRQ_BIT, |
e46cbf75c tracing: Make the... |
568 |
|
4104d326b ftrace: Remove gl... |
569 |
/* INTERNAL_BITs must be greater than FTRACE_BITs */ |
edc15cafc tracing: Avoid un... |
570 571 572 573 |
TRACE_INTERNAL_BIT, TRACE_INTERNAL_NMI_BIT, TRACE_INTERNAL_IRQ_BIT, TRACE_INTERNAL_SIRQ_BIT, |
6224beb12 tracing: Have bra... |
574 |
TRACE_BRANCH_BIT, |
e4a3f541f tracing: Still tr... |
575 576 577 578 579 580 581 |
/* * Abuse of the trace_recursion. * As we need a way to maintain state if we are tracing the function * graph in irq because we want to trace a particular function that * was called in irq context but we have irq tracing off. Since this * can only be modified by current, we can reuse trace_recursion. */ |
e46cbf75c tracing: Make the... |
582 |
TRACE_IRQ_BIT, |
5cf99a0f3 tracing/fgraph: F... |
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 |
/* Set if the function is in the set_graph_function file */ TRACE_GRAPH_BIT, /* * In the very unlikely case that an interrupt came in * at a start of graph tracing, and we want to trace * the function in that interrupt, the depth can be greater * than zero, because of the preempted start of a previous * trace. In an even more unlikely case, depth could be 2 * if a softirq interrupted the start of graph tracing, * followed by an interrupt preempting a start of graph * tracing in the softirq, and depth can even be 3 * if an NMI came in at the start of an interrupt function * that preempted a softirq start of a function that * preempted normal context!!!! Luckily, it can't be * greater than 3, so the next two bits are a mask * of what the depth is when we set TRACE_GRAPH_BIT */ TRACE_GRAPH_DEPTH_START_BIT, TRACE_GRAPH_DEPTH_END_BIT, |
9cd2992f2 fgraph: Have set_... |
605 606 607 608 609 610 611 |
/* * To implement set_graph_notrace, if this bit is set, we ignore * function graph tracing of called functions, until the return * function is called to clear it. */ TRACE_GRAPH_NOTRACE_BIT, |
726b3d3f1 ftrace: Handle tr... |
612 613 614 615 616 617 |
/* * When transitioning between context, the preempt_count() may * not be correct. Allow for a single recursion to cover this case. */ TRACE_TRANSITION_BIT, |
e46cbf75c tracing: Make the... |
618 |
}; |
e4a3f541f tracing: Still tr... |
619 |
|
e46cbf75c tracing: Make the... |
620 621 622 |
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) |
e4a3f541f tracing: Still tr... |
623 |
|
5cf99a0f3 tracing/fgraph: F... |
624 625 626 627 628 629 630 631 632 |
#define trace_recursion_depth() \ (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) #define trace_recursion_set_depth(depth) \ do { \ current->trace_recursion &= \ ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ current->trace_recursion |= \ ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ } while (0) |
edc15cafc tracing: Avoid un... |
633 634 635 636 |
#define TRACE_CONTEXT_BITS 4 #define TRACE_FTRACE_START TRACE_FTRACE_BIT #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) |
edc15cafc tracing: Avoid un... |
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 |
#define TRACE_LIST_START TRACE_INTERNAL_BIT #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) #define TRACE_CONTEXT_MASK TRACE_LIST_MAX static __always_inline int trace_get_context_bit(void) { int bit; if (in_interrupt()) { if (in_nmi()) bit = 0; else if (in_irq()) bit = 1; else bit = 2; } else bit = 3; return bit; } static __always_inline int trace_test_and_set_recursion(int start, int max) { unsigned int val = current->trace_recursion; int bit; /* A previous recursion check was made */ if ((val & TRACE_CONTEXT_MASK) > max) return 0; bit = trace_get_context_bit() + start; |
726b3d3f1 ftrace: Handle tr... |
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 |
if (unlikely(val & (1 << bit))) { /* * It could be that preempt_count has not been updated during * a switch between contexts. Allow for a single recursion. */ bit = TRACE_TRANSITION_BIT; if (trace_recursion_test(bit)) return -1; trace_recursion_set(bit); barrier(); return bit + 1; } /* Normal check passed, clear the transition to allow it again */ trace_recursion_clear(TRACE_TRANSITION_BIT); |
edc15cafc tracing: Avoid un... |
685 686 687 688 |
val |= 1 << bit; current->trace_recursion = val; barrier(); |
ee11b93f9 ftrace: Fix recur... |
689 |
return bit + 1; |
edc15cafc tracing: Avoid un... |
690 691 692 693 694 695 696 697 |
} static __always_inline void trace_clear_recursion(int bit) { unsigned int val = current->trace_recursion; if (!bit) return; |
ee11b93f9 ftrace: Fix recur... |
698 |
bit--; |
edc15cafc tracing: Avoid un... |
699 700 701 702 703 704 |
bit = 1 << bit; val &= ~bit; barrier(); current->trace_recursion = val; } |
6d158a813 tracing: Remove N... |
705 706 707 |
static inline struct ring_buffer_iter * trace_buffer_iter(struct trace_iterator *iter, int cpu) { |
f26808ba7 tracing: Optimize... |
708 |
return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; |
6d158a813 tracing: Remove N... |
709 |
} |
b6f11df26 trace: Call traci... |
710 |
int tracer_init(struct tracer *t, struct trace_array *tr); |
9036990d4 ftrace: restructu... |
711 |
int tracing_is_enabled(void); |
1c5eb4481 tracing: Rename t... |
712 |
void tracing_reset_online_cpus(struct array_buffer *buf); |
9456f0fa6 tracing: reset ri... |
713 |
void tracing_reset_current(int cpu); |
873c642f5 tracing: Clear al... |
714 |
void tracing_reset_all_online_cpus(void); |
bc0c38d13 ftrace: latency t... |
715 |
int tracing_open_generic(struct inode *inode, struct file *filp); |
aa07d71f1 tracing: Have tra... |
716 |
int tracing_open_generic_tr(struct inode *inode, struct file *filp); |
2e86421de tracing: Add help... |
717 |
bool tracing_is_disabled(void); |
ec5735088 tracing: Make tra... |
718 |
bool tracer_tracing_is_on(struct trace_array *tr); |
2290f2c58 tracing/ftrace: A... |
719 720 |
void tracer_tracing_on(struct trace_array *tr); void tracer_tracing_off(struct trace_array *tr); |
5452af664 tracing/ftrace: f... |
721 |
struct dentry *trace_create_file(const char *name, |
f4ae40a6a switch debugfs to... |
722 |
umode_t mode, |
5452af664 tracing/ftrace: f... |
723 724 725 |
struct dentry *parent, void *data, const struct file_operations *fops); |
22c36b182 tracing: make tra... |
726 |
int tracing_init_dentry(void); |
d618b3e6e ftrace: sysprof u... |
727 |
|
51a763dd8 tracing: Introduc... |
728 |
struct ring_buffer_event; |
e77405ad8 tracing: pass aro... |
729 |
struct ring_buffer_event * |
132924943 tracing: Make str... |
730 |
trace_buffer_lock_reserve(struct trace_buffer *buffer, |
e77405ad8 tracing: pass aro... |
731 732 733 734 |
int type, unsigned long len, unsigned long flags, int pc); |
51a763dd8 tracing: Introduc... |
735 |
|
45dcd8b8a ftrace: move mmio... |
736 737 |
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data); |
c4a8e8be2 trace: better man... |
738 739 740 |
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts); |
132924943 tracing: Make str... |
741 |
void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
52ffabe38 tracing: Make __b... |
742 |
struct ring_buffer_event *event); |
7ffbd48d5 tracing: Cache co... |
743 |
|
955b61e59 ftrace,kdb: Exten... |
744 745 746 747 748 749 750 |
int trace_empty(struct trace_iterator *iter); void *trace_find_next_entry_inc(struct trace_iterator *iter); void trace_init_global_iter(struct trace_iterator *iter); void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
ecffc8a8c tracing: Add trac... |
751 752 |
unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); unsigned long trace_total_entries(struct trace_array *tr); |
6fb44b717 ftrace: add trace... |
753 |
void trace_function(struct trace_array *tr, |
6fb44b717 ftrace: add trace... |
754 755 |
unsigned long ip, unsigned long parent_ip, |
38697053f ftrace: preempt d... |
756 |
unsigned long flags, int pc); |
0a772620a tracing: Make gra... |
757 758 759 760 |
void trace_graph_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); |
7e9a49ef5 tracing/latency: ... |
761 |
void trace_latency_header(struct seq_file *m); |
62b915f10 tracing: Add grap... |
762 763 764 |
void trace_default_header(struct seq_file *m); void print_trace_header(struct seq_file *m, struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter); |
bc0c38d13 ftrace: latency t... |
765 |
|
287b6e68c tracing/function-... |
766 |
void trace_graph_return(struct ftrace_graph_ret *trace); |
e49dc19c6 ftrace: function ... |
767 |
int trace_graph_entry(struct ftrace_graph_ent *trace); |
1a0799a8f tracing/function-... |
768 |
void set_graph_array(struct trace_array *tr); |
1e9b51c28 x86, bts, ftrace:... |
769 |
|
41bc8144d ftrace: fix up cm... |
770 771 |
void tracing_start_cmdline_record(void); void tracing_stop_cmdline_record(void); |
d914ba37d tracing: Add supp... |
772 773 |
void tracing_start_tgid_record(void); void tracing_stop_tgid_record(void); |
bc0c38d13 ftrace: latency t... |
774 |
int register_tracer(struct tracer *type); |
b5130b1e7 tracing: do not u... |
775 |
int is_tracing_stopped(void); |
955b61e59 ftrace,kdb: Exten... |
776 |
|
098c879e1 tracing: Add gene... |
777 |
loff_t tracing_lseek(struct file *file, loff_t offset, int whence); |
955b61e59 ftrace,kdb: Exten... |
778 779 780 781 |
extern cpumask_var_t __read_mostly tracing_buffer_mask; #define for_each_tracing_cpu(cpu) \ for_each_cpu(cpu, tracing_buffer_mask) |
bc0c38d13 ftrace: latency t... |
782 783 |
extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
0e9501735 function-graph: A... |
784 |
extern unsigned long tracing_thresh; |
4e267db13 tracing: Make the... |
785 |
/* PID filtering */ |
76c813e26 tracing: Move pid... |
786 787 |
extern int pid_max; |
4e267db13 tracing: Make the... |
788 789 790 |
bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid); bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, |
b3b1e6ede ftrace: Create se... |
791 |
struct trace_pid_list *filtered_no_pids, |
4e267db13 tracing: Make the... |
792 793 794 795 |
struct task_struct *task); void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task); |
5cc8976bd tracing: Move the... |
796 797 798 |
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); int trace_pid_show(struct seq_file *m, void *v); |
76c813e26 tracing: Move pid... |
799 800 801 802 |
void trace_free_pid_list(struct trace_pid_list *pid_list); int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt); |
4e267db13 tracing: Make the... |
803 |
|
5d4a9dba2 tracing: only sho... |
804 |
#ifdef CONFIG_TRACER_MAX_TRACE |
a35873a09 tracing: Add cond... |
805 806 |
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, void *cond_data); |
bc0c38d13 ftrace: latency t... |
807 808 |
void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu); |
5d4a9dba2 tracing: only sho... |
809 |
#endif /* CONFIG_TRACER_MAX_TRACE */ |
bc0c38d13 ftrace: latency t... |
810 |
|
91edde2e6 ftrace: Implement... |
811 812 813 814 815 816 |
#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ defined(CONFIG_FSNOTIFY) void latency_fsnotify(struct trace_array *tr); #else |
36b3615dc tracing: Add miss... |
817 |
static inline void latency_fsnotify(struct trace_array *tr) { } |
91edde2e6 ftrace: Implement... |
818 819 |
#endif |
c0a0d0d3f tracing/core: Mak... |
820 |
#ifdef CONFIG_STACKTRACE |
c0a0d0d3f tracing/core: Mak... |
821 822 823 |
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc); #else |
c0a0d0d3f tracing/core: Mak... |
824 825 826 827 828 |
static inline void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { } #endif /* CONFIG_STACKTRACE */ |
536149910 ftrace: add stack... |
829 |
|
a5a1d1c29 clocksource: Use ... |
830 |
extern u64 ftrace_now(int cpu); |
bc0c38d13 ftrace: latency t... |
831 |
|
4ca530852 tracing: protect ... |
832 |
extern void trace_find_cmdline(int pid, char comm[]); |
d914ba37d tracing: Add supp... |
833 |
extern int trace_find_tgid(int pid); |
c37775d57 tracing: Add infr... |
834 |
extern void trace_event_follow_fork(struct trace_array *tr, bool enable); |
f7d48cbde tracing/ftrace: m... |
835 |
|
bc0c38d13 ftrace: latency t... |
836 837 |
#ifdef CONFIG_DYNAMIC_FTRACE extern unsigned long ftrace_update_tot_cnt; |
da537f0ae ftrace: Add infor... |
838 839 |
extern unsigned long ftrace_number_of_pages; extern unsigned long ftrace_number_of_groups; |
04ec7bb64 tracing: Have the... |
840 841 842 |
void ftrace_init_trace_array(struct trace_array *tr); #else static inline void ftrace_init_trace_array(struct trace_array *tr) { } |
ad97772ad ftrace: Add selft... |
843 |
#endif |
d05cdb25d ftrace: fix dynam... |
844 845 |
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func extern int DYN_FTRACE_TEST_NAME(void); |
95950c2ec ftrace: Add self-... |
846 847 |
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 extern int DYN_FTRACE_TEST_NAME2(void); |
bc0c38d13 ftrace: latency t... |
848 |
|
55034cd6e tracing: Add allo... |
849 |
extern bool ring_buffer_expanded; |
020e5f85c tracing/events: A... |
850 |
extern bool tracing_selftest_disabled; |
60a11774b ftrace: add self-... |
851 |
#ifdef CONFIG_FTRACE_STARTUP_TEST |
f2d7cffc2 tracing: Disable ... |
852 |
extern void __init disable_tracing_selftest(const char *reason); |
60a11774b ftrace: add self-... |
853 854 |
extern int trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr); |
7447dce96 tracing/function-... |
855 856 |
extern int trace_selftest_startup_function_graph(struct tracer *trace, struct trace_array *tr); |
60a11774b ftrace: add self-... |
857 858 |
extern int trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr); |
60a11774b ftrace: add self-... |
859 860 |
extern int trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr); |
60a11774b ftrace: add self-... |
861 862 |
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr); |
60a11774b ftrace: add self-... |
863 864 |
extern int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr); |
fb1b6d8b5 ftrace: add nop t... |
865 866 |
extern int trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr); |
80e5ea450 ftrace: add trace... |
867 868 |
extern int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr); |
8f7689933 tracing: Add ref_... |
869 870 871 872 873 874 875 |
/* * Tracer data references selftest functions that only occur * on boot up. These can be __init functions. Thus, when selftests * are enabled, then the tracers need to reference __init functions. */ #define __tracer_data __refdata #else |
f2d7cffc2 tracing: Disable ... |
876 877 878 |
static inline void __init disable_tracing_selftest(const char *reason) { } |
8f7689933 tracing: Add ref_... |
879 880 |
/* Tracers are seldom changed. Optimize when selftests are disabled. */ #define __tracer_data __read_mostly |
60a11774b ftrace: add self-... |
881 |
#endif /* CONFIG_FTRACE_STARTUP_TEST */ |
c7aafc549 ftrace: cleanups |
882 |
extern void *head_page(struct trace_array_cpu *data); |
a5a1d1c29 clocksource: Use ... |
883 |
extern unsigned long long ns2usecs(u64 nsec); |
1fd8f2a3f tracing/function-... |
884 |
extern int |
40ce74f19 tracing: remove r... |
885 |
trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
48ead0203 tracing/core: bri... |
886 |
extern int |
40ce74f19 tracing: remove r... |
887 |
trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
659372d3e tracing: add trac... |
888 889 890 |
extern int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args); |
132924943 tracing: Make str... |
891 |
int trace_array_printk_buf(struct trace_buffer *buffer, |
12883efb6 tracing: Consolid... |
892 |
unsigned long ip, const char *fmt, ...); |
955b61e59 ftrace,kdb: Exten... |
893 894 |
void trace_printk_seq(struct trace_seq *s); enum print_line_t print_trace_line(struct trace_iterator *iter); |
c7aafc549 ftrace: cleanups |
895 |
|
8e1e1df29 tracing: Add addi... |
896 |
extern char trace_find_mark(unsigned long long duration); |
673feb9d7 ftrace: Add :mod:... |
897 898 899 900 901 902 903 904 |
struct ftrace_hash; struct ftrace_mod_load { struct list_head list; char *func; char *module; int enable; }; |
8c08f0d5c ftrace: Have cach... |
905 906 907 |
enum { FTRACE_HASH_FL_MOD = (1 << 0), }; |
4046bf023 ftrace: Expose ft... |
908 909 910 911 |
struct ftrace_hash { unsigned long size_bits; struct hlist_head *buckets; unsigned long count; |
8c08f0d5c ftrace: Have cach... |
912 |
unsigned long flags; |
4046bf023 ftrace: Expose ft... |
913 914 915 916 917 |
struct rcu_head rcu; }; struct ftrace_func_entry * ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); |
eb583cd48 tracing: Use mode... |
918 |
static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) |
4046bf023 ftrace: Expose ft... |
919 |
{ |
8c08f0d5c ftrace: Have cach... |
920 |
return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); |
4046bf023 ftrace: Expose ft... |
921 |
} |
15e6cb367 tracing: add a tr... |
922 |
/* Standard output formatting function used for function return traces */ |
fb52607af tracing/function-... |
923 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
62b915f10 tracing: Add grap... |
924 925 926 927 928 929 930 931 |
/* Flag options */ #define TRACE_GRAPH_PRINT_OVERRUN 0x1 #define TRACE_GRAPH_PRINT_CPU 0x2 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 #define TRACE_GRAPH_PRINT_PROC 0x8 #define TRACE_GRAPH_PRINT_DURATION 0x10 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
9acd8de69 function_graph: S... |
932 933 934 935 936 |
#define TRACE_GRAPH_PRINT_REL_TIME 0x40 #define TRACE_GRAPH_PRINT_IRQS 0x80 #define TRACE_GRAPH_PRINT_TAIL 0x100 #define TRACE_GRAPH_SLEEP_TIME 0x200 #define TRACE_GRAPH_GRAPH_TIME 0x400 |
6fc84ea70 tracing: Do not u... |
937 938 |
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) |
62b915f10 tracing: Add grap... |
939 |
|
555772041 tracing: Move sle... |
940 |
extern void ftrace_graph_sleep_time_control(bool enable); |
c8dd0f458 function_graph: D... |
941 942 |
#ifdef CONFIG_FUNCTION_PROFILER |
555772041 tracing: Move sle... |
943 |
extern void ftrace_graph_graph_time_control(bool enable); |
c8dd0f458 function_graph: D... |
944 945 946 |
#else static inline void ftrace_graph_graph_time_control(bool enable) { } #endif |
555772041 tracing: Move sle... |
947 |
|
d7a8d9e90 tracing: Have gra... |
948 949 950 |
extern enum print_line_t print_graph_function_flags(struct trace_iterator *iter, u32 flags); extern void print_graph_headers_flags(struct seq_file *s, u32 flags); |
9d9add34e tracing: Have fun... |
951 |
extern void |
0706f1c48 tracing: adding f... |
952 |
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
62b915f10 tracing: Add grap... |
953 954 955 956 957 958 959 960 |
extern void graph_trace_open(struct trace_iterator *iter); extern void graph_trace_close(struct trace_iterator *iter); extern int __trace_graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace, unsigned long flags, int pc); extern void __trace_graph_return(struct trace_array *tr, struct ftrace_graph_ret *trace, unsigned long flags, int pc); |
ea4e2bc4d ftrace: graph of ... |
961 |
#ifdef CONFIG_DYNAMIC_FTRACE |
24a9729f8 tracing: Annotate... |
962 |
extern struct ftrace_hash __rcu *ftrace_graph_hash; |
fd0e6852c tracing: Annotate... |
963 |
extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; |
ea4e2bc4d ftrace: graph of ... |
964 |
|
5cf99a0f3 tracing/fgraph: F... |
965 |
static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) |
ea4e2bc4d ftrace: graph of ... |
966 |
{ |
5cf99a0f3 tracing/fgraph: F... |
967 |
unsigned long addr = trace->func; |
b9b0c831b ftrace: Convert g... |
968 |
int ret = 0; |
24a9729f8 tracing: Annotate... |
969 |
struct ftrace_hash *hash; |
b9b0c831b ftrace: Convert g... |
970 971 |
preempt_disable_notrace(); |
16052dd5b ftrace: Add comme... |
972 973 974 975 |
/* * Have to open code "rcu_dereference_sched()" because the * function graph tracer can be called when RCU is not * "watching". |
54a16ff6f ftrace: Protect f... |
976 |
* Protected with schedule_on_each_cpu(ftrace_sync) |
16052dd5b ftrace: Add comme... |
977 |
*/ |
24a9729f8 tracing: Annotate... |
978 979 980 |
hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); if (ftrace_hash_empty(hash)) { |
b9b0c831b ftrace: Convert g... |
981 982 |
ret = 1; goto out; |
ea4e2bc4d ftrace: graph of ... |
983 |
} |
24a9729f8 tracing: Annotate... |
984 |
if (ftrace_lookup_ip(hash, addr)) { |
5cf99a0f3 tracing/fgraph: F... |
985 986 987 988 989 990 991 |
/* * This needs to be cleared on the return functions * when the depth is zero. */ trace_recursion_set(TRACE_GRAPH_BIT); trace_recursion_set_depth(trace->depth); |
b9b0c831b ftrace: Convert g... |
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 |
/* * If no irqs are to be traced, but a set_graph_function * is set, and called by an interrupt handler, we still * want to trace it. */ if (in_irq()) trace_recursion_set(TRACE_IRQ_BIT); else trace_recursion_clear(TRACE_IRQ_BIT); ret = 1; } out: preempt_enable_notrace(); return ret; |
ea4e2bc4d ftrace: graph of ... |
1007 |
} |
29ad23b00 ftrace: Add set_g... |
1008 |
|
5cf99a0f3 tracing/fgraph: F... |
1009 1010 1011 1012 1013 1014 |
static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) { if (trace_recursion_test(TRACE_GRAPH_BIT) && trace->depth == trace_recursion_depth()) trace_recursion_clear(TRACE_GRAPH_BIT); } |
29ad23b00 ftrace: Add set_g... |
1015 1016 |
static inline int ftrace_graph_notrace_addr(unsigned long addr) { |
b9b0c831b ftrace: Convert g... |
1017 |
int ret = 0; |
fd0e6852c tracing: Annotate... |
1018 |
struct ftrace_hash *notrace_hash; |
29ad23b00 ftrace: Add set_g... |
1019 |
|
b9b0c831b ftrace: Convert g... |
1020 |
preempt_disable_notrace(); |
29ad23b00 ftrace: Add set_g... |
1021 |
|
16052dd5b ftrace: Add comme... |
1022 1023 1024 1025 |
/* * Have to open code "rcu_dereference_sched()" because the * function graph tracer can be called when RCU is not * "watching". |
54a16ff6f ftrace: Protect f... |
1026 |
* Protected with schedule_on_each_cpu(ftrace_sync) |
16052dd5b ftrace: Add comme... |
1027 |
*/ |
fd0e6852c tracing: Annotate... |
1028 1029 1030 1031 |
notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, !preemptible()); if (ftrace_lookup_ip(notrace_hash, addr)) |
b9b0c831b ftrace: Convert g... |
1032 |
ret = 1; |
29ad23b00 ftrace: Add set_g... |
1033 |
|
b9b0c831b ftrace: Convert g... |
1034 1035 |
preempt_enable_notrace(); return ret; |
29ad23b00 ftrace: Add set_g... |
1036 |
} |
15e6cb367 tracing: add a tr... |
1037 |
#else |
5cf99a0f3 tracing/fgraph: F... |
1038 |
static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) |
6b2539302 tracing: fix typo... |
1039 1040 |
{ return 1; |
ea4e2bc4d ftrace: graph of ... |
1041 |
} |
29ad23b00 ftrace: Add set_g... |
1042 1043 1044 1045 1046 |
static inline int ftrace_graph_notrace_addr(unsigned long addr) { return 0; } |
5cf99a0f3 tracing/fgraph: F... |
1047 1048 |
static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) { } |
ea4e2bc4d ftrace: graph of ... |
1049 |
#endif /* CONFIG_DYNAMIC_FTRACE */ |
1a4144286 tracing/fgraph: H... |
1050 1051 1052 1053 1054 1055 |
extern unsigned int fgraph_max_depth; static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) { /* trace it when it is-nested-in or is a function enabled. */ |
5cf99a0f3 tracing/fgraph: F... |
1056 1057 |
return !(trace_recursion_test(TRACE_GRAPH_BIT) || ftrace_graph_addr(trace)) || |
1a4144286 tracing/fgraph: H... |
1058 1059 1060 |
(trace->depth < 0) || (fgraph_max_depth && trace->depth >= fgraph_max_depth); } |
ea4e2bc4d ftrace: graph of ... |
1061 |
#else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb367 tracing: add a tr... |
1062 |
static inline enum print_line_t |
d7a8d9e90 tracing: Have gra... |
1063 |
print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
15e6cb367 tracing: add a tr... |
1064 1065 1066 |
{ return TRACE_TYPE_UNHANDLED; } |
ea4e2bc4d ftrace: graph of ... |
1067 |
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb367 tracing: add a tr... |
1068 |
|
756d17ee7 tracing: Support ... |
1069 |
extern struct list_head ftrace_pids; |
804a68516 ftrace: trace sin... |
1070 |
|
1155de47c ring-buffer: Make... |
1071 |
#ifdef CONFIG_FUNCTION_TRACER |
c58b6b037 ftrace: Fix ftrac... |
1072 1073 1074 |
#define FTRACE_PID_IGNORE -1 #define FTRACE_PID_TRACE -2 |
92a68fa04 ftrace: Move the ... |
1075 1076 1077 |
struct ftrace_func_command { struct list_head list; char *name; |
04ec7bb64 tracing: Have the... |
1078 1079 |
int (*func)(struct trace_array *tr, struct ftrace_hash *hash, |
92a68fa04 ftrace: Move the ... |
1080 1081 1082 |
char *func, char *cmd, char *params, int enable); }; |
f1ed7c741 ftrace: Do not ru... |
1083 |
extern bool ftrace_filter_param __initdata; |
345ddcc88 ftrace: Have set_... |
1084 |
static inline int ftrace_trace_task(struct trace_array *tr) |
804a68516 ftrace: trace sin... |
1085 |
{ |
c58b6b037 ftrace: Fix ftrac... |
1086 1087 |
return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != FTRACE_PID_IGNORE; |
804a68516 ftrace: trace sin... |
1088 |
} |
e0a413f61 tracing: Warn on ... |
1089 |
extern int ftrace_is_dead(void); |
591dffdad ftrace: Allow for... |
1090 1091 1092 |
int ftrace_create_function_files(struct trace_array *tr, struct dentry *parent); void ftrace_destroy_function_files(struct trace_array *tr); |
4114fbfd0 tracing: Enable c... |
1093 1094 |
int ftrace_allocate_ftrace_ops(struct trace_array *tr); void ftrace_free_ftrace_ops(struct trace_array *tr); |
4104d326b ftrace: Remove gl... |
1095 1096 1097 |
void ftrace_init_global_array_ops(struct trace_array *tr); void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); void ftrace_reset_array_ops(struct trace_array *tr); |
345ddcc88 ftrace: Have set_... |
1098 |
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
501c23752 ftrace: Move topl... |
1099 1100 |
void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d_tracer); |
d879d0b8c ftrace: Fix funct... |
1101 |
void ftrace_clear_pids(struct trace_array *tr); |
dbeafd0d6 ftrace: Have func... |
1102 |
int init_function_trace(void); |
1e10486ff ftrace: Add 'func... |
1103 |
void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); |
1155de47c ring-buffer: Make... |
1104 |
#else |
345ddcc88 ftrace: Have set_... |
1105 |
static inline int ftrace_trace_task(struct trace_array *tr) |
1155de47c ring-buffer: Make... |
1106 1107 1108 |
{ return 1; } |
e0a413f61 tracing: Warn on ... |
1109 |
static inline int ftrace_is_dead(void) { return 0; } |
591dffdad ftrace: Allow for... |
1110 1111 1112 1113 1114 1115 |
static inline int ftrace_create_function_files(struct trace_array *tr, struct dentry *parent) { return 0; } |
4114fbfd0 tracing: Enable c... |
1116 1117 1118 1119 1120 |
static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) { return 0; } static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } |
591dffdad ftrace: Allow for... |
1121 |
static inline void ftrace_destroy_function_files(struct trace_array *tr) { } |
4104d326b ftrace: Remove gl... |
1122 1123 1124 |
static inline __init void ftrace_init_global_array_ops(struct trace_array *tr) { } static inline void ftrace_reset_array_ops(struct trace_array *tr) { } |
345ddcc88 ftrace: Have set_... |
1125 |
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } |
501c23752 ftrace: Move topl... |
1126 |
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } |
d879d0b8c ftrace: Fix funct... |
1127 |
static inline void ftrace_clear_pids(struct trace_array *tr) { } |
dbeafd0d6 ftrace: Have func... |
1128 |
static inline int init_function_trace(void) { return 0; } |
1e10486ff ftrace: Add 'func... |
1129 |
static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } |
4104d326b ftrace: Remove gl... |
1130 1131 |
/* ftace_func_t type is not defined, use macro instead of static inline */ #define ftrace_init_array_ops(tr, func) do { } while (0) |
591dffdad ftrace: Allow for... |
1132 1133 1134 |
#endif /* CONFIG_FUNCTION_TRACER */ #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
ec19b8591 ftrace: Move the ... |
1135 1136 1137 1138 |
struct ftrace_probe_ops { void (*func)(unsigned long ip, unsigned long parent_ip, |
b5f081b56 tracing: Pass the... |
1139 |
struct trace_array *tr, |
bca6c8d04 ftrace: Pass prob... |
1140 |
struct ftrace_probe_ops *ops, |
6e4443199 tracing/ftrace: A... |
1141 |
void *data); |
ec19b8591 ftrace: Move the ... |
1142 |
int (*init)(struct ftrace_probe_ops *ops, |
b5f081b56 tracing: Pass the... |
1143 |
struct trace_array *tr, |
6e4443199 tracing/ftrace: A... |
1144 1145 |
unsigned long ip, void *init_data, void **data); |
ec19b8591 ftrace: Move the ... |
1146 |
void (*free)(struct ftrace_probe_ops *ops, |
b5f081b56 tracing: Pass the... |
1147 |
struct trace_array *tr, |
6e4443199 tracing/ftrace: A... |
1148 |
unsigned long ip, void *data); |
ec19b8591 ftrace: Move the ... |
1149 1150 1151 1152 1153 |
int (*print)(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data); }; |
41794f190 ftrace: Added ftr... |
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 |
struct ftrace_func_mapper; typedef int (*ftrace_mapper_func)(void *data); struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, unsigned long ip); int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, unsigned long ip, void *data); void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, unsigned long ip); void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, ftrace_mapper_func free_func); |
ec19b8591 ftrace: Move the ... |
1166 |
extern int |
04ec7bb64 tracing: Have the... |
1167 1168 |
register_ftrace_function_probe(char *glob, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data); |
d3d532d79 ftrace: Have unre... |
1169 |
extern int |
7b60f3d87 ftrace: Dynamical... |
1170 1171 |
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, struct ftrace_probe_ops *ops); |
a0e6369e4 ftrace/instances:... |
1172 |
extern void clear_ftrace_function_probes(struct trace_array *tr); |
ec19b8591 ftrace: Move the ... |
1173 |
|
92a68fa04 ftrace: Move the ... |
1174 1175 |
int register_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd); |
591dffdad ftrace: Allow for... |
1176 1177 1178 |
void ftrace_create_filter_files(struct ftrace_ops *ops, struct dentry *parent); void ftrace_destroy_filter_files(struct ftrace_ops *ops); |
5c3469cb8 tracing/boot: Mov... |
1179 1180 1181 1182 1183 |
extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); |
591dffdad ftrace: Allow for... |
1184 |
#else |
92a68fa04 ftrace: Move the ... |
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 |
struct ftrace_func_command; static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) { return -EINVAL; } static inline __init int unregister_ftrace_command(char *cmd_name) { return -EINVAL; } |
8a49f3e03 ftrace: Remove #i... |
1195 1196 1197 |
static inline void clear_ftrace_function_probes(struct trace_array *tr) { } |
591dffdad ftrace: Allow for... |
1198 1199 1200 1201 1202 1203 1204 |
/* * The ops parameter passed in is usually undefined. * This must be a macro. */ #define ftrace_create_filter_files(ops, parent) do { } while (0) #define ftrace_destroy_filter_files(ops) do { } while (0) #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ |
804a68516 ftrace: trace sin... |
1205 |
|
c6650b2e5 tracing: ftrace_e... |
1206 |
bool ftrace_event_is_function(struct trace_event_call *call); |
ced39002f ftrace, perf: Add... |
1207 |
|
4fcdae83c ftrace: comment code |
1208 |
/* |
b63f39ea5 tracing: create g... |
1209 1210 1211 |
* struct trace_parser - servers for reading the user input separated by spaces * @cont: set if the input is not complete - no final space char was found * @buffer: holds the parsed user input |
1537a3638 tree-wide: fix 'l... |
1212 |
* @idx: user input length |
b63f39ea5 tracing: create g... |
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 |
* @size: buffer size */ struct trace_parser { bool cont; char *buffer; unsigned idx; unsigned size; }; static inline bool trace_parser_loaded(struct trace_parser *parser) { return (parser->idx != 0); } static inline bool trace_parser_cont(struct trace_parser *parser) { return parser->cont; } static inline void trace_parser_clear(struct trace_parser *parser) { parser->cont = false; parser->idx = 0; } extern int trace_parser_get_init(struct trace_parser *parser, int size); extern void trace_parser_put(struct trace_parser *parser); extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos); /* |
729358da9 tracing: Only cre... |
1244 1245 1246 1247 |
* Only create function graph options if function graph is configured. */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER # define FGRAPH_FLAGS \ |
729358da9 tracing: Only cre... |
1248 |
C(DISPLAY_GRAPH, "display-graph"), |
729358da9 tracing: Only cre... |
1249 1250 |
#else # define FGRAPH_FLAGS |
729358da9 tracing: Only cre... |
1251 |
#endif |
4ee4301c4 tracing: Only cre... |
1252 1253 1254 1255 1256 1257 |
#ifdef CONFIG_BRANCH_TRACER # define BRANCH_FLAGS \ C(BRANCH, "branch"), #else # define BRANCH_FLAGS #endif |
8179e8a15 tracing: Do not c... |
1258 1259 |
#ifdef CONFIG_FUNCTION_TRACER # define FUNCTION_FLAGS \ |
1e10486ff ftrace: Add 'func... |
1260 1261 |
C(FUNCTION, "function-trace"), \ C(FUNC_FORK, "function-fork"), |
8179e8a15 tracing: Do not c... |
1262 1263 1264 1265 |
# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION #else # define FUNCTION_FLAGS # define FUNCTION_DEFAULT_FLAGS 0UL |
1e10486ff ftrace: Add 'func... |
1266 |
# define TRACE_ITER_FUNC_FORK 0UL |
8179e8a15 tracing: Do not c... |
1267 |
#endif |
73dddbb57 tracing: Only cre... |
1268 1269 1270 1271 1272 1273 |
#ifdef CONFIG_STACKTRACE # define STACK_FLAGS \ C(STACKTRACE, "stacktrace"), #else # define STACK_FLAGS #endif |
729358da9 tracing: Only cre... |
1274 |
/* |
4fcdae83c ftrace: comment code |
1275 1276 1277 1278 |
* trace_iterator_flags is an enumeration that defines bit * positions into trace_flags that controls the output. * * NOTE: These bits must match the trace_options array in |
a3418a364 tracing: Use TRAC... |
1279 |
* trace.c (this macro guarantees it). |
4fcdae83c ftrace: comment code |
1280 |
*/ |
a3418a364 tracing: Use TRAC... |
1281 1282 1283 1284 1285 1286 1287 1288 1289 |
#define TRACE_FLAGS \ C(PRINT_PARENT, "print-parent"), \ C(SYM_OFFSET, "sym-offset"), \ C(SYM_ADDR, "sym-addr"), \ C(VERBOSE, "verbose"), \ C(RAW, "raw"), \ C(HEX, "hex"), \ C(BIN, "bin"), \ C(BLOCK, "block"), \ |
a3418a364 tracing: Use TRAC... |
1290 |
C(PRINTK, "trace_printk"), \ |
a3418a364 tracing: Use TRAC... |
1291 1292 1293 1294 1295 1296 |
C(ANNOTATE, "annotate"), \ C(USERSTACKTRACE, "userstacktrace"), \ C(SYM_USEROBJ, "sym-userobj"), \ C(PRINTK_MSGONLY, "printk-msg-only"), \ C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ C(LATENCY_FMT, "latency-format"), \ |
a3418a364 tracing: Use TRAC... |
1297 |
C(RECORD_CMD, "record-cmd"), \ |
d914ba37d tracing: Add supp... |
1298 |
C(RECORD_TGID, "record-tgid"), \ |
a3418a364 tracing: Use TRAC... |
1299 1300 1301 1302 |
C(OVERWRITE, "overwrite"), \ C(STOP_ON_FREE, "disable_on_free"), \ C(IRQ_INFO, "irq-info"), \ C(MARKERS, "markers"), \ |
c37775d57 tracing: Add infr... |
1303 |
C(EVENT_FORK, "event-fork"), \ |
06e0a548b tracing: Do not d... |
1304 |
C(PAUSE_ON_TRACE, "pause-on-trace"), \ |
8179e8a15 tracing: Do not c... |
1305 |
FUNCTION_FLAGS \ |
4ee4301c4 tracing: Only cre... |
1306 |
FGRAPH_FLAGS \ |
73dddbb57 tracing: Only cre... |
1307 |
STACK_FLAGS \ |
4ee4301c4 tracing: Only cre... |
1308 |
BRANCH_FLAGS |
ce3fed628 tracing: Use enum... |
1309 |
|
a3418a364 tracing: Use TRAC... |
1310 1311 1312 1313 1314 1315 |
/* * By defining C, we can make TRACE_FLAGS a list of bit names * that will define the bits for the flag masks. */ #undef C #define C(a, b) TRACE_ITER_##a##_BIT |
b5e87c058 tracing: Add buil... |
1316 1317 1318 1319 1320 |
enum trace_iterator_bits { TRACE_FLAGS /* Make sure we don't go more than we have bits for */ TRACE_ITER_LAST_BIT }; |
a3418a364 tracing: Use TRAC... |
1321 1322 1323 1324 1325 1326 1327 1328 1329 |
/* * By redefining C, we can make TRACE_FLAGS a list of masks that * use the bits as defined above. */ #undef C #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) enum trace_iterator_flags { TRACE_FLAGS }; |
4e6555190 ftrace: sched tra... |
1330 |
|
15e6cb367 tracing: add a tr... |
1331 1332 1333 1334 1335 1336 |
/* * TRACE_ITER_SYM_MASK masks the options in trace_flags that * control the output of kernel symbols. */ #define TRACE_ITER_SYM_MASK \ (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) |
43a15386c tracing/ftrace: r... |
1337 |
extern struct tracer nop_trace; |
2ed84eeb8 trace: rename unl... |
1338 |
#ifdef CONFIG_BRANCH_TRACER |
9f029e83e ftrace: rename un... |
1339 1340 1341 |
extern int enable_branch_tracing(struct trace_array *tr); extern void disable_branch_tracing(void); static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb7 tracing: likely/u... |
1342 |
{ |
983f938ae tracing: Move tra... |
1343 |
if (tr->trace_flags & TRACE_ITER_BRANCH) |
9f029e83e ftrace: rename un... |
1344 |
return enable_branch_tracing(tr); |
52f232cb7 tracing: likely/u... |
1345 1346 |
return 0; } |
9f029e83e ftrace: rename un... |
1347 |
static inline void trace_branch_disable(void) |
52f232cb7 tracing: likely/u... |
1348 1349 |
{ /* due to races, always disable */ |
9f029e83e ftrace: rename un... |
1350 |
disable_branch_tracing(); |
52f232cb7 tracing: likely/u... |
1351 1352 |
} #else |
9f029e83e ftrace: rename un... |
1353 |
static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb7 tracing: likely/u... |
1354 1355 1356 |
{ return 0; } |
9f029e83e ftrace: rename un... |
1357 |
static inline void trace_branch_disable(void) |
52f232cb7 tracing: likely/u... |
1358 1359 |
{ } |
2ed84eeb8 trace: rename unl... |
1360 |
#endif /* CONFIG_BRANCH_TRACER */ |
52f232cb7 tracing: likely/u... |
1361 |
|
1852fcce1 tracing: expand t... |
1362 1363 |
/* set ring buffers to default size if not already done so */ int tracing_update_buffers(void); |
cf027f645 tracing: add run-... |
1364 1365 |
struct ftrace_event_field { struct list_head link; |
92edca073 tracing: Use dire... |
1366 1367 |
const char *name; const char *type; |
aa38e9fc3 tracing/filters: ... |
1368 |
int filter_type; |
cf027f645 tracing: add run-... |
1369 1370 |
int offset; int size; |
a118e4d14 tracing/filters: ... |
1371 |
int is_signed; |
cf027f645 tracing: add run-... |
1372 |
}; |
80765597b tracing: Rewrite ... |
1373 |
struct prog_entry; |
30e673b23 tracing/filters: ... |
1374 |
struct event_filter { |
80765597b tracing: Rewrite ... |
1375 1376 |
struct prog_entry __rcu *prog; char *filter_string; |
30e673b23 tracing/filters: ... |
1377 |
}; |
cfb180f3e tracing: add per-... |
1378 1379 1380 |
struct event_subsystem { struct list_head list; const char *name; |
1f9963cbb tracing/filters: ... |
1381 |
struct event_filter *filter; |
e9dbfae53 tracing: Fix bug ... |
1382 |
int ref_count; |
cfb180f3e tracing: add per-... |
1383 |
}; |
7967b3e0c tracing: Rename s... |
1384 |
struct trace_subsystem_dir { |
ae63b31e4 tracing: Separate... |
1385 1386 1387 1388 1389 1390 1391 |
struct list_head list; struct event_subsystem *subsystem; struct trace_array *tr; struct dentry *entry; int ref_count; int nr_events; }; |
65da9a0a3 tracing: Make fil... |
1392 |
extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
132924943 tracing: Make str... |
1393 |
struct trace_buffer *buffer, |
65da9a0a3 tracing: Make fil... |
1394 |
struct ring_buffer_event *event); |
fa66ddb87 tracing: Move tra... |
1395 |
|
fa66ddb87 tracing: Move tra... |
1396 |
void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
132924943 tracing: Make str... |
1397 |
struct trace_buffer *buffer, |
fa66ddb87 tracing: Move tra... |
1398 1399 1400 |
struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs); |
33fddff24 tracing: Have tra... |
1401 1402 |
static inline void trace_buffer_unlock_commit(struct trace_array *tr, |
132924943 tracing: Make str... |
1403 |
struct trace_buffer *buffer, |
33fddff24 tracing: Have tra... |
1404 1405 1406 1407 1408 |
struct ring_buffer_event *event, unsigned long flags, int pc) { trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); } |
0fc1b09ff tracing: Use temp... |
1409 1410 1411 1412 1413 1414 |
DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DECLARE_PER_CPU(int, trace_buffered_event_cnt); void trace_buffered_event_disable(void); void trace_buffered_event_enable(void); static inline void |
132924943 tracing: Make str... |
1415 |
__trace_event_discard_commit(struct trace_buffer *buffer, |
0fc1b09ff tracing: Use temp... |
1416 1417 1418 1419 1420 1421 1422 1423 1424 |
struct ring_buffer_event *event) { if (this_cpu_read(trace_buffered_event) == event) { /* Simply release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); return; } ring_buffer_discard_commit(buffer, event); } |
dad56ee74 tracing: Move eve... |
1425 1426 1427 |
/* * Helper function for event_trigger_unlock_commit{_regs}(). * If there are event triggers attached to this event that requires |
499f7bb08 tracing: Fix some... |
1428 |
* filtering against its fields, then they will be called as the |
dad56ee74 tracing: Move eve... |
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 |
* entry already holds the field information of the current event. * * It also checks if the event should be discarded or not. * It is to be discarded if the event is soft disabled and the * event was only recorded to process triggers, or if the event * filter is active and this event did not match the filters. * * Returns true if the event is discarded, false otherwise. */ static inline bool __event_trigger_test_discard(struct trace_event_file *file, |
132924943 tracing: Make str... |
1440 |
struct trace_buffer *buffer, |
dad56ee74 tracing: Move eve... |
1441 1442 1443 1444 1445 1446 1447 |
struct ring_buffer_event *event, void *entry, enum event_trigger_type *tt) { unsigned long eflags = file->flags; if (eflags & EVENT_FILE_FL_TRIGGER_COND) |
1ac4f51c0 tracing: Give eve... |
1448 |
*tt = event_triggers_call(file, entry, event); |
dad56ee74 tracing: Move eve... |
1449 |
|
9cbb1506a tracing: Fold fil... |
1450 1451 1452 |
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && !filter_match_preds(file->filter, entry))) { |
0fc1b09ff tracing: Use temp... |
1453 |
__trace_event_discard_commit(buffer, event); |
9cbb1506a tracing: Fold fil... |
1454 1455 |
return true; } |
dad56ee74 tracing: Move eve... |
1456 |
|
9cbb1506a tracing: Fold fil... |
1457 |
return false; |
dad56ee74 tracing: Move eve... |
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 |
} /** * event_trigger_unlock_commit - handle triggers and finish event commit * @file: The file pointer assoctiated to the event * @buffer: The ring buffer that the event is being written to * @event: The event meta data in the ring buffer * @entry: The event itself * @irq_flags: The state of the interrupts at the start of the event * @pc: The state of the preempt count at the start of the event. * * This is a helper function to handle triggers that require data * from the event itself. It also tests the event against filters and * if the event is soft disabled and should be discarded. */ static inline void event_trigger_unlock_commit(struct trace_event_file *file, |
132924943 tracing: Make str... |
1475 |
struct trace_buffer *buffer, |
dad56ee74 tracing: Move eve... |
1476 1477 1478 1479 1480 1481 1482 1483 1484 |
struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc) { enum event_trigger_type tt = ETT_NONE; if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); if (tt) |
c94e45bc3 tracing: Do not r... |
1485 |
event_triggers_post_call(file, tt); |
dad56ee74 tracing: Move eve... |
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 |
} /** * event_trigger_unlock_commit_regs - handle triggers and finish event commit * @file: The file pointer assoctiated to the event * @buffer: The ring buffer that the event is being written to * @event: The event meta data in the ring buffer * @entry: The event itself * @irq_flags: The state of the interrupts at the start of the event * @pc: The state of the preempt count at the start of the event. * * This is a helper function to handle triggers that require data * from the event itself. It also tests the event against filters and * if the event is soft disabled and should be discarded. * * Same as event_trigger_unlock_commit() but calls * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). */ static inline void event_trigger_unlock_commit_regs(struct trace_event_file *file, |
132924943 tracing: Make str... |
1506 |
struct trace_buffer *buffer, |
dad56ee74 tracing: Move eve... |
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 |
struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc, struct pt_regs *regs) { enum event_trigger_type tt = ETT_NONE; if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) trace_buffer_unlock_commit_regs(file->tr, buffer, event, irq_flags, pc, regs); if (tt) |
c94e45bc3 tracing: Do not r... |
1518 |
event_triggers_post_call(file, tt); |
dad56ee74 tracing: Move eve... |
1519 |
} |
61e9dea20 tracing/filter: U... |
1520 1521 |
#define FILTER_PRED_INVALID ((unsigned short)-1) #define FILTER_PRED_IS_RIGHT (1 << 15) |
43cd41455 tracing/filter: O... |
1522 |
#define FILTER_PRED_FOLD (1 << 15) |
61e9dea20 tracing/filter: U... |
1523 |
|
bf93f9ed3 tracing/filter: I... |
1524 1525 1526 1527 1528 1529 1530 1531 |
/* * The max preds is the size of unsigned short with * two flags at the MSBs. One bit is used for both the IS_RIGHT * and FOLD flags. The other is reserved. * * 2^14 preds is way more than enough. */ #define MAX_FILTER_PRED 16384 |
4a3d27e98 tracing/filter: M... |
1532 |
|
7ce7e4249 tracing: add per-... |
1533 |
struct filter_pred; |
1889d2092 tracing/filters: ... |
1534 |
struct regex; |
7ce7e4249 tracing: add per-... |
1535 |
|
58d9a597c tracing/filter: M... |
1536 |
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); |
7ce7e4249 tracing: add per-... |
1537 |
|
1889d2092 tracing/filters: ... |
1538 |
typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
3f6fe06db tracing/filters: ... |
1539 |
enum regex_type { |
b0f1a59a9 tracing/filters: ... |
1540 |
MATCH_FULL = 0, |
3f6fe06db tracing/filters: ... |
1541 1542 1543 |
MATCH_FRONT_ONLY, MATCH_MIDDLE_ONLY, MATCH_END_ONLY, |
60f1d5e3b ftrace: Support f... |
1544 |
MATCH_GLOB, |
f79b3f338 ftrace: Allow ena... |
1545 |
MATCH_INDEX, |
3f6fe06db tracing/filters: ... |
1546 |
}; |
1889d2092 tracing/filters: ... |
1547 1548 1549 1550 1551 1552 |
struct regex { char pattern[MAX_FILTER_STR_VAL]; int len; int field_len; regex_match_func match; }; |
7ce7e4249 tracing: add per-... |
1553 |
struct filter_pred { |
1889d2092 tracing/filters: ... |
1554 1555 1556 |
filter_pred_fn_t fn; u64 val; struct regex regex; |
61aaef553 tracing/filter: R... |
1557 |
unsigned short *ops; |
1d0e78e38 tracing/filter: A... |
1558 |
struct ftrace_event_field *field; |
1889d2092 tracing/filters: ... |
1559 |
int offset; |
80765597b tracing: Rewrite ... |
1560 |
int not; |
1889d2092 tracing/filters: ... |
1561 |
int op; |
7ce7e4249 tracing: add per-... |
1562 |
}; |
4ef56902f tracing: Make ftr... |
1563 1564 1565 1566 |
static inline bool is_string_field(struct ftrace_event_field *field) { return field->filter_type == FILTER_DYN_STRING || field->filter_type == FILTER_STATIC_STRING || |
4c7384131 tracing: Have COM... |
1567 1568 |
field->filter_type == FILTER_PTR_STRING || field->filter_type == FILTER_COMM; |
4ef56902f tracing: Make ftr... |
1569 1570 1571 1572 1573 1574 |
} static inline bool is_function_field(struct ftrace_event_field *field) { return field->filter_type == FILTER_TRACE_FN; } |
3f6fe06db tracing/filters: ... |
1575 1576 |
extern enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not); |
7f1d2f821 tracing: Rename f... |
1577 |
extern void print_event_filter(struct trace_event_file *file, |
4bda2d517 tracing/filters: ... |
1578 |
struct trace_seq *s); |
7f1d2f821 tracing: Rename f... |
1579 |
extern int apply_event_filter(struct trace_event_file *file, |
8b3725621 tracing/filters: ... |
1580 |
char *filter_string); |
7967b3e0c tracing: Rename s... |
1581 |
extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, |
8b3725621 tracing/filters: ... |
1582 1583 |
char *filter_string); extern void print_subsystem_event_filter(struct event_subsystem *system, |
ac1adc55f tracing/filters: ... |
1584 |
struct trace_seq *s); |
aa38e9fc3 tracing/filters: ... |
1585 |
extern int filter_assign_type(const char *type); |
1e144d73f tracing: Add trac... |
1586 1587 |
extern int create_event_filter(struct trace_array *tr, struct trace_event_call *call, |
bac5fb97a tracing: Add and ... |
1588 1589 1590 |
char *filter_str, bool set_str, struct event_filter **filterp); extern void free_event_filter(struct event_filter *filter); |
7ce7e4249 tracing: add per-... |
1591 |
|
b3a8c6fd7 tracing: Move fin... |
1592 |
struct ftrace_event_field * |
2425bcb92 tracing: Rename f... |
1593 |
trace_find_event_field(struct trace_event_call *call, char *name); |
2e33af029 tracing: Move fie... |
1594 |
|
e870e9a12 tracing: Allow to... |
1595 |
extern void trace_event_enable_cmd_record(bool enable); |
d914ba37d tracing: Add supp... |
1596 |
extern void trace_event_enable_tgid_record(bool enable); |
58b925475 tracing: Have eve... |
1597 |
extern int event_trace_init(void); |
277ba0446 tracing: Add inte... |
1598 |
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
0c8916c34 tracing: Add rmdi... |
1599 |
extern int event_trace_del_tracer(struct trace_array *tr); |
720dee53a tracing/boot: Ini... |
1600 |
extern void __trace_early_add_events(struct trace_array *tr); |
e870e9a12 tracing: Allow to... |
1601 |
|
3c96529c0 tracing: Add __fi... |
1602 1603 1604 |
extern struct trace_event_file *__find_event_file(struct trace_array *tr, const char *system, const char *event); |
7f1d2f821 tracing: Rename f... |
1605 1606 1607 |
extern struct trace_event_file *find_event_file(struct trace_array *tr, const char *system, const char *event); |
7862ad184 tracing: Add 'ena... |
1608 |
|
85f2b0826 tracing: Add basi... |
1609 1610 |
static inline void *event_file_data(struct file *filp) { |
6aa7de059 locking/atomics: ... |
1611 |
return READ_ONCE(file_inode(filp)->i_private); |
85f2b0826 tracing: Add basi... |
1612 |
} |
20c8928ab tracing/events: f... |
1613 |
extern struct mutex event_mutex; |
a59fd6027 tracing/events: c... |
1614 |
extern struct list_head ftrace_events; |
ac199db01 ftrace: event pro... |
1615 |
|
85f2b0826 tracing: Add basi... |
1616 |
extern const struct file_operations event_trigger_fops; |
7ef224d1d tracing: Add 'his... |
1617 |
extern const struct file_operations event_hist_fops; |
2d19bd79a tracing: Add hist... |
1618 |
extern const struct file_operations event_hist_debug_fops; |
6c3edaf9f tracing: Introduc... |
1619 |
extern const struct file_operations event_inject_fops; |
7ef224d1d tracing: Add 'his... |
1620 1621 1622 |
#ifdef CONFIG_HIST_TRIGGERS extern int register_trigger_hist_cmd(void); |
d0bad49bb tracing: Add enab... |
1623 |
extern int register_trigger_hist_enable_disable_cmds(void); |
7ef224d1d tracing: Add 'his... |
1624 1625 |
#else static inline int register_trigger_hist_cmd(void) { return 0; } |
d0bad49bb tracing: Add enab... |
1626 |
static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } |
7ef224d1d tracing: Add 'his... |
1627 |
#endif |
85f2b0826 tracing: Add basi... |
1628 1629 1630 1631 1632 1633 1634 1635 1636 |
extern int register_trigger_cmds(void); extern void clear_event_triggers(struct trace_array *tr); struct event_trigger_data { unsigned long count; int ref; struct event_trigger_ops *ops; struct event_command *cmd_ops; |
d8a30f203 tracing: Fix rcu ... |
1637 |
struct event_filter __rcu *filter; |
85f2b0826 tracing: Add basi... |
1638 1639 |
char *filter_str; void *private_data; |
104f28104 tracing: Add a pe... |
1640 |
bool paused; |
db1388b4f tracing: Add supp... |
1641 |
bool paused_tmp; |
85f2b0826 tracing: Add basi... |
1642 |
struct list_head list; |
db1388b4f tracing: Add supp... |
1643 1644 1645 |
char *name; struct list_head named_list; struct event_trigger_data *named_data; |
85f2b0826 tracing: Add basi... |
1646 |
}; |
d0bad49bb tracing: Add enab... |
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 |
/* Avoid typos */ #define ENABLE_EVENT_STR "enable_event" #define DISABLE_EVENT_STR "disable_event" #define ENABLE_HIST_STR "enable_hist" #define DISABLE_HIST_STR "disable_hist" struct enable_trigger_data { struct trace_event_file *file; bool enable; bool hist; }; extern int event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, struct event_trigger_data *data); extern void event_enable_trigger_free(struct event_trigger_ops *ops, struct event_trigger_data *data); extern int event_enable_trigger_func(struct event_command *cmd_ops, struct trace_event_file *file, char *glob, char *cmd, char *param); extern int event_enable_register_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data, struct trace_event_file *file); extern void event_enable_unregister_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *test, struct trace_event_file *file); |
ab4bf0089 tracing: Make eve... |
1675 1676 1677 1678 1679 1680 |
extern void trigger_data_free(struct event_trigger_data *data); extern int event_trigger_init(struct event_trigger_ops *ops, struct event_trigger_data *data); extern int trace_event_trigger_enable_disable(struct trace_event_file *file, int trigger_enable); extern void update_cond_flag(struct trace_event_file *file); |
ab4bf0089 tracing: Make eve... |
1681 1682 1683 |
extern int set_trigger_filter(char *filter_str, struct event_trigger_data *trigger_data, struct trace_event_file *file); |
db1388b4f tracing: Add supp... |
1684 1685 1686 1687 1688 1689 1690 1691 1692 |
extern struct event_trigger_data *find_named_trigger(const char *name); extern bool is_named_trigger(struct event_trigger_data *test); extern int save_named_trigger(const char *name, struct event_trigger_data *data); extern void del_named_trigger(struct event_trigger_data *data); extern void pause_named_trigger(struct event_trigger_data *data); extern void unpause_named_trigger(struct event_trigger_data *data); extern void set_named_trigger_data(struct event_trigger_data *data, struct event_trigger_data *named_data); |
067fe038e tracing: Add vari... |
1693 1694 |
extern struct event_trigger_data * get_named_trigger_data(struct event_trigger_data *data); |
ab4bf0089 tracing: Make eve... |
1695 |
extern int register_event_command(struct event_command *cmd); |
d0bad49bb tracing: Add enab... |
1696 1697 |
extern int unregister_event_command(struct event_command *cmd); extern int register_trigger_hist_enable_disable_cmds(void); |
ab4bf0089 tracing: Make eve... |
1698 |
|
85f2b0826 tracing: Add basi... |
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 |
/** * struct event_trigger_ops - callbacks for trace event triggers * * The methods in this structure provide per-event trigger hooks for * various trigger operations. * * All the methods below, except for @init() and @free(), must be * implemented. * * @func: The trigger 'probe' function called when the triggering * event occurs. The data passed into this callback is the data * that was supplied to the event_command @reg() function that |
c4a592305 tracing: Add even... |
1711 1712 |
* registered the trigger (see struct event_command) along with * the trace record, rec. |
85f2b0826 tracing: Add basi... |
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 |
* * @init: An optional initialization function called for the trigger * when the trigger is registered (via the event_command reg() * function). This can be used to perform per-trigger * initialization such as incrementing a per-trigger reference * count, for instance. This is usually implemented by the * generic utility function @event_trigger_init() (see * trace_event_triggers.c). * * @free: An optional de-initialization function called for the * trigger when the trigger is unregistered (via the * event_command @reg() function). This can be used to perform * per-trigger de-initialization such as decrementing a * per-trigger reference count and freeing corresponding trigger * data, for instance. This is usually implemented by the * generic utility function @event_trigger_free() (see * trace_event_triggers.c). * * @print: The callback function invoked to have the trigger print * itself. This is usually implemented by a wrapper function * that calls the generic utility function @event_trigger_print() * (see trace_event_triggers.c). */ struct event_trigger_ops { |
c4a592305 tracing: Add even... |
1737 |
void (*func)(struct event_trigger_data *data, |
1ac4f51c0 tracing: Give eve... |
1738 1739 |
void *rec, struct ring_buffer_event *rbe); |
85f2b0826 tracing: Add basi... |
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 |
int (*init)(struct event_trigger_ops *ops, struct event_trigger_data *data); void (*free)(struct event_trigger_ops *ops, struct event_trigger_data *data); int (*print)(struct seq_file *m, struct event_trigger_ops *ops, struct event_trigger_data *data); }; /** * struct event_command - callbacks and data members for event commands * * Event commands are invoked by users by writing the command name * into the 'trigger' file associated with a trace event. The * parameters associated with a specific invocation of an event * command are used to create an event trigger instance, which is * added to the list of trigger instances associated with that trace * event. When the event is hit, the set of triggers associated with * that event is invoked. * * The data members in this structure provide per-event command data * for various event commands. * * All the data members below, except for @post_trigger, must be set * for each event command. * * @name: The unique name that identifies the event command. This is * the name used when setting triggers via trigger files. * * @trigger_type: A unique id that identifies the event command * 'type'. This value has two purposes, the first to ensure that * only one trigger of the same type can be set at a given time * for a particular event e.g. it doesn't make sense to have both * a traceon and traceoff trigger attached to a single event at * the same time, so traceon and traceoff have the same type * though they have different names. The @trigger_type value is * also used as a bit value for deferring the actual trigger * action until after the current event is finished. Some * commands need to do this if they themselves log to the trace * buffer (see the @post_trigger() member below). @trigger_type * values are defined by adding new values to the trigger_type |
af658dca2 tracing: Rename f... |
1781 |
* enum in include/linux/trace_events.h. |
85f2b0826 tracing: Add basi... |
1782 |
* |
353206f5c tracing: Use flag... |
1783 |
* @flags: See the enum event_command_flags below. |
a5863dae8 tracing: Add need... |
1784 |
* |
a88e1cfb1 tracing: Add an u... |
1785 1786 |
* All the methods below, except for @set_filter() and @unreg_all(), * must be implemented. |
85f2b0826 tracing: Add basi... |
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 |
* * @func: The callback function responsible for parsing and * registering the trigger written to the 'trigger' file by the * user. It allocates the trigger instance and registers it with * the appropriate trace event. It makes use of the other * event_command callback functions to orchestrate this, and is * usually implemented by the generic utility function * @event_trigger_callback() (see trace_event_triggers.c). * * @reg: Adds the trigger to the list of triggers associated with the * event, and enables the event trigger itself, after * initializing it (via the event_trigger_ops @init() function). * This is also where commands can use the @trigger_type value to * make the decision as to whether or not multiple instances of * the trigger should be allowed. This is usually implemented by * the generic utility function @register_trigger() (see * trace_event_triggers.c). * * @unreg: Removes the trigger from the list of triggers associated * with the event, and disables the event trigger itself, after * initializing it (via the event_trigger_ops @free() function). * This is usually implemented by the generic utility function * @unregister_trigger() (see trace_event_triggers.c). * |
a88e1cfb1 tracing: Add an u... |
1811 1812 1813 1814 |
* @unreg_all: An optional function called to remove all the triggers * from the list of triggers associated with the event. Called * when a trigger file is opened in truncate mode. * |
85f2b0826 tracing: Add basi... |
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 |
* @set_filter: An optional function called to parse and set a filter * for the trigger. If no @set_filter() method is set for the * event command, filters set by the user for the command will be * ignored. This is usually implemented by the generic utility * function @set_trigger_filter() (see trace_event_triggers.c). * * @get_trigger_ops: The callback function invoked to retrieve the * event_trigger_ops implementation associated with the command. */ struct event_command { struct list_head list; char *name; enum event_trigger_type trigger_type; |
353206f5c tracing: Use flag... |
1828 |
int flags; |
85f2b0826 tracing: Add basi... |
1829 |
int (*func)(struct event_command *cmd_ops, |
7f1d2f821 tracing: Rename f... |
1830 |
struct trace_event_file *file, |
85f2b0826 tracing: Add basi... |
1831 1832 1833 1834 |
char *glob, char *cmd, char *params); int (*reg)(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data, |
7f1d2f821 tracing: Rename f... |
1835 |
struct trace_event_file *file); |
85f2b0826 tracing: Add basi... |
1836 1837 1838 |
void (*unreg)(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data, |
7f1d2f821 tracing: Rename f... |
1839 |
struct trace_event_file *file); |
a88e1cfb1 tracing: Add an u... |
1840 |
void (*unreg_all)(struct trace_event_file *file); |
85f2b0826 tracing: Add basi... |
1841 1842 |
int (*set_filter)(char *filter_str, struct event_trigger_data *data, |
7f1d2f821 tracing: Rename f... |
1843 |
struct trace_event_file *file); |
85f2b0826 tracing: Add basi... |
1844 1845 |
struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); }; |
353206f5c tracing: Use flag... |
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 |
/** * enum event_command_flags - flags for struct event_command * * @POST_TRIGGER: A flag that says whether or not this command needs * to have its action delayed until after the current event has * been closed. Some triggers need to avoid being invoked while * an event is currently in the process of being logged, since * the trigger may itself log data into the trace buffer. Thus * we make sure the current event is committed before invoking * those triggers. To do that, the trigger invocation is split * in two - the first part checks the filter using the current * trace record; if a command has the @post_trigger flag set, it * sets a bit for itself in the return value, otherwise it * directly invokes the trigger. Once all commands have been * either invoked or set their return flag, the current record is * either committed or discarded. At that point, if any commands * have deferred their triggers, those commands are finally * invoked following the close of the current event. In other * words, if the event_trigger_ops @func() probe implementation * itself logs to the trace buffer, this flag should be set, * otherwise it can be left unspecified. * * @NEEDS_REC: A flag that says whether or not this command needs * access to the trace record in order to perform its function, * regardless of whether or not it has a filter associated with * it (filters make a trigger require access to the trace record * but are not always present). */ enum event_command_flags { EVENT_CMD_FL_POST_TRIGGER = 1, EVENT_CMD_FL_NEEDS_REC = 2, }; static inline bool event_command_post_trigger(struct event_command *cmd_ops) { return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; } static inline bool event_command_needs_rec(struct event_command *cmd_ops) { return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; } |
7f1d2f821 tracing: Rename f... |
1888 |
extern int trace_event_enable_disable(struct trace_event_file *file, |
85f2b0826 tracing: Add basi... |
1889 |
int enable, int soft_disable); |
93e31ffbf tracing: Add 'sna... |
1890 |
extern int tracing_alloc_snapshot(void); |
a35873a09 tracing: Add cond... |
1891 1892 1893 1894 1895 |
extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); extern int tracing_snapshot_cond_disable(struct trace_array *tr); extern void *tracing_cond_snapshot_data(struct trace_array *tr); |
85f2b0826 tracing: Add basi... |
1896 |
|
e9fb2b6d5 tracing: have eve... |
1897 1898 |
extern const char *__start___trace_bprintk_fmt[]; extern const char *__stop___trace_bprintk_fmt[]; |
102c9323c tracing: Add __tr... |
1899 1900 |
extern const char *__start___tracepoint_str[]; extern const char *__stop___tracepoint_str[]; |
b9f9108ca tracing: Remove a... |
1901 |
void trace_printk_control(bool enabled); |
81698831b tracing: Enable c... |
1902 |
void trace_printk_start_comm(void); |
613f04a0f tracing: Prevent ... |
1903 |
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
2b6080f28 tracing: Encapsul... |
1904 |
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
07d777fe8 tracing: Add perc... |
1905 |
|
5c3469cb8 tracing/boot: Mov... |
1906 1907 1908 1909 1910 1911 1912 |
/* Used from boot time tracer */ extern int trace_set_options(struct trace_array *tr, char *option); extern int tracing_set_tracer(struct trace_array *tr, const char *buf); extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id); extern int tracing_set_cpumask(struct trace_array *tr, cpumask_var_t tracing_cpumask_new); |
7e465baa8 tracing: Make tra... |
1913 1914 1915 1916 1917 1918 |
#define MAX_EVENT_NAME_LEN 64 extern int trace_run_command(const char *buf, int (*createfn)(int, char**)); extern ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char**)); |
8a062902b tracing: Add trac... |
1919 |
extern unsigned int err_pos(char *cmd, const char *str); |
2f754e771 tracing: Have the... |
1920 1921 |
extern void tracing_log_err(struct trace_array *tr, const char *loc, const char *cmd, |
8a062902b tracing: Add trac... |
1922 |
const char **errs, u8 type, u8 pos); |
ca268da6e tracing: Add inte... |
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 |
/* * Normal trace_printk() and friends allocates special buffers * to do the manipulation, as well as saves the print formats * into sections to display. But the trace infrastructure wants * to use these without the added overhead at the price of being * a bit slower (used mainly for warnings, where we don't care * about performance). The internal_trace_puts() is for such * a purpose. */ #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) |
4e5292ea1 tracing: use the ... |
1933 |
#undef FTRACE_ENTRY |
04ae87a52 ftrace: Rework ev... |
1934 |
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
2425bcb92 tracing: Rename f... |
1935 |
extern struct trace_event_call \ |
52f5684c8 kernel: use macro... |
1936 |
__aligned(4) event_##call; |
4e5292ea1 tracing: use the ... |
1937 |
#undef FTRACE_ENTRY_DUP |
04ae87a52 ftrace: Rework ev... |
1938 1939 |
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
a4a551b8f ftrace: Reduce si... |
1940 |
#undef FTRACE_ENTRY_PACKED |
04ae87a52 ftrace: Rework ev... |
1941 1942 |
#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
a4a551b8f ftrace: Reduce si... |
1943 |
|
4e5292ea1 tracing: use the ... |
1944 |
#include "trace_entries.h" |
e1112b4d9 tracing/filters: ... |
1945 |
|
6e48b550d tracing: Fix buil... |
1946 |
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
2425bcb92 tracing: Rename f... |
1947 |
int perf_ftrace_event_register(struct trace_event_call *call, |
ced39002f ftrace, perf: Add... |
1948 1949 1950 |
enum trace_reg type, void *data); #else #define perf_ftrace_event_register NULL |
6e48b550d tracing: Fix buil... |
1951 |
#endif |
ced39002f ftrace, perf: Add... |
1952 |
|
5f893b263 tracing: Move ena... |
1953 1954 |
#ifdef CONFIG_FTRACE_SYSCALLS void init_ftrace_syscalls(void); |
dbfeaa7ab tracing: Add get_... |
1955 |
const char *get_syscall_name(int syscall); |
5f893b263 tracing: Move ena... |
1956 1957 |
#else static inline void init_ftrace_syscalls(void) { } |
dbfeaa7ab tracing: Add get_... |
1958 1959 1960 1961 |
static inline const char *get_syscall_name(int syscall) { return NULL; } |
5f893b263 tracing: Move ena... |
1962 1963 1964 1965 |
#endif #ifdef CONFIG_EVENT_TRACING void trace_event_init(void); |
f57a41434 trace: rename enu... |
1966 |
void trace_event_eval_update(struct trace_eval_map **map, int len); |
5c3469cb8 tracing/boot: Mov... |
1967 1968 1969 |
/* Used from boot time tracer */ extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); extern int trigger_process_regex(struct trace_event_file *file, char *buff); |
5f893b263 tracing: Move ena... |
1970 1971 |
#else static inline void __init trace_event_init(void) { } |
f57a41434 trace: rename enu... |
1972 |
static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
5f893b263 tracing: Move ena... |
1973 |
#endif |
2824f5033 tracing: Make the... |
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 |
#ifdef CONFIG_TRACER_SNAPSHOT void tracing_snapshot_instance(struct trace_array *tr); int tracing_alloc_snapshot_instance(struct trace_array *tr); #else static inline void tracing_snapshot_instance(struct trace_array *tr) { } static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) { return 0; } #endif |
3f1756dc2 tracing: More rev... |
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 |
#ifdef CONFIG_PREEMPT_TRACER void tracer_preempt_on(unsigned long a0, unsigned long a1); void tracer_preempt_off(unsigned long a0, unsigned long a1); #else static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } #endif #ifdef CONFIG_IRQSOFF_TRACER void tracer_hardirqs_on(unsigned long a0, unsigned long a1); void tracer_hardirqs_off(unsigned long a0, unsigned long a1); #else static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } #endif |
0daa23029 tracing: Add tp_p... |
1998 |
extern struct trace_iterator *tracepoint_print_iter; |
5f893b263 tracing: Move ena... |
1999 |
|
0c97bf863 tracing: Silence ... |
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 |
/* * Reset the state of the trace_iterator so that it can read consumed data. * Normally, the trace_iterator is used for reading the data when it is not * consumed, and must retain state. */ static __always_inline void trace_iterator_reset(struct trace_iterator *iter) { const size_t offset = offsetof(struct trace_iterator, seq); /* * Keep gcc from complaining about overwriting more than just one * member in the structure. */ memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset); iter->pos = -1; } |
42d120e2d tracing: Move is_... |
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 |
/* Check the name is good for event/group/fields */ static inline bool is_good_name(const char *name) { if (!isalpha(*name) && *name != '_') return false; while (*++name != '\0') { if (!isalpha(*name) && !isdigit(*name) && *name != '_') return false; } return true; } |
bc0c38d13 ftrace: latency t... |
2028 |
#endif /* _LINUX_KERNEL_TRACE_H */ |