Commit cdbe61bfe70440939e457fb4a8d0995eaaed17de

Authored by Steven Rostedt
Committed by Steven Rostedt
1 parent b848914ce3

ftrace: Allow dynamically allocated function tracers

Now that functions may be selected individually, it only makes sense
that we should allow dynamically allocated trace structures to
be traced. This will allow perf to allocate a ftrace_ops structure
at runtime and use it to pick and choose which functions that
structure will trace.

Note, a dynamically allocated ftrace_ops will always be called
indirectly instead of being called directly from the mcount in
entry.S. This is because there's no safe way to prevent mcount
from being preempted before calling the function, unless we
modify every entry.S to do so (not likely). Thus, dynamically allocated
functions will now be called by the ftrace_ops_list_func() that
loops through the ops that are allocated if there are more than
one op allocated at a time. This loop is protected with a
preempt_disable.

To determine if an ftrace_ops structure is allocated or not, a new
util function was added to the kernel/extable.c called
core_kernel_data(), which returns 1 if the address is between
_sdata and _edata.

Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Showing 4 changed files with 40 additions and 7 deletions Side-by-side Diff

include/linux/ftrace.h
... ... @@ -34,6 +34,7 @@
34 34 enum {
35 35 FTRACE_OPS_FL_ENABLED = 1 << 0,
36 36 FTRACE_OPS_FL_GLOBAL = 1 << 1,
  37 + FTRACE_OPS_FL_DYNAMIC = 1 << 2,
37 38 };
38 39  
39 40 struct ftrace_ops {
include/linux/kernel.h
... ... @@ -283,6 +283,7 @@
283 283 extern unsigned long long memparse(const char *ptr, char **retptr);
284 284  
285 285 extern int core_kernel_text(unsigned long addr);
  286 +extern int core_kernel_data(unsigned long addr);
286 287 extern int __kernel_text_address(unsigned long addr);
287 288 extern int kernel_text_address(unsigned long addr);
288 289 extern int func_ptr_is_kernel_text(void *ptr);
... ... @@ -72,6 +72,14 @@
72 72 return 0;
73 73 }
74 74  
  75 +int core_kernel_data(unsigned long addr)
  76 +{
  77 + if (addr >= (unsigned long)_sdata &&
  78 + addr < (unsigned long)_edata)
  79 + return 1;
  80 + return 0;
  81 +}
  82 +
75 83 int __kernel_text_address(unsigned long addr)
76 84 {
77 85 if (core_kernel_text(addr))
kernel/trace/ftrace.c
... ... @@ -189,8 +189,14 @@
189 189  
190 190 update_global_ops();
191 191  
  192 + /*
  193 + * If we are at the end of the list and this ops is
  194 + * not dynamic, then have the mcount trampoline call
  195 + * the function directly
  196 + */
192 197 if (ftrace_ops_list == &ftrace_list_end ||
193   - ftrace_ops_list->next == &ftrace_list_end)
  198 + (ftrace_ops_list->next == &ftrace_list_end &&
  199 + !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
194 200 func = ftrace_ops_list->func;
195 201 else
196 202 func = ftrace_ops_list_func;
... ... @@ -250,6 +256,9 @@
250 256 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
251 257 return -EBUSY;
252 258  
  259 + if (!core_kernel_data((unsigned long)ops))
  260 + ops->flags |= FTRACE_OPS_FL_DYNAMIC;
  261 +
253 262 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
254 263 int first = ftrace_global_list == &ftrace_list_end;
255 264 add_ftrace_ops(&ftrace_global_list, ops);
... ... @@ -293,6 +302,13 @@
293 302 if (ftrace_enabled)
294 303 update_ftrace_function();
295 304  
  305 + /*
  306 + * Dynamic ops may be freed, we must make sure that all
  307 + * callers are done before leaving this function.
  308 + */
  309 + if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
  310 + synchronize_sched();
  311 +
296 312 return 0;
297 313 }
298 314  
... ... @@ -1225,6 +1241,9 @@
1225 1241 * the filter_hash does not exist or is empty,
1226 1242 * AND
1227 1243 * the ip is not in the ops->notrace_hash.
  1244 + *
  1245 + * This needs to be called with preemption disabled as
  1246 + * the hashes are freed with call_rcu_sched().
1228 1247 */
1229 1248 static int
1230 1249 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
... ... @@ -1233,9 +1252,6 @@
1233 1252 struct ftrace_hash *notrace_hash;
1234 1253 int ret;
1235 1254  
1236   - /* The hashes are freed with call_rcu_sched() */
1237   - preempt_disable_notrace();
1238   -
1239 1255 filter_hash = rcu_dereference_raw(ops->filter_hash);
1240 1256 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1241 1257  
... ... @@ -1246,7 +1262,6 @@
1246 1262 ret = 1;
1247 1263 else
1248 1264 ret = 0;
1249   - preempt_enable_notrace();
1250 1265  
1251 1266 return ret;
1252 1267 }
1253 1268  
1254 1269  
... ... @@ -3425,14 +3440,20 @@
3425 3440 static void
3426 3441 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3427 3442 {
3428   - /* see comment above ftrace_global_list_func */
3429   - struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list);
  3443 + struct ftrace_ops *op;
3430 3444  
  3445 + /*
  3446 + * Some of the ops may be dynamically allocated,
  3447 + * they must be freed after a synchronize_sched().
  3448 + */
  3449 + preempt_disable_notrace();
  3450 + op = rcu_dereference_raw(ftrace_ops_list);
3431 3451 while (op != &ftrace_list_end) {
3432 3452 if (ftrace_ops_test(op, ip))
3433 3453 op->func(ip, parent_ip);
3434 3454 op = rcu_dereference_raw(op->next);
3435 3455 };
  3456 + preempt_enable_notrace();
3436 3457 }
3437 3458  
3438 3459 static void clear_ftrace_swapper(void)
... ... @@ -3743,6 +3764,7 @@
3743 3764 mutex_unlock(&ftrace_lock);
3744 3765 return ret;
3745 3766 }
  3767 +EXPORT_SYMBOL_GPL(register_ftrace_function);
3746 3768  
3747 3769 /**
3748 3770 * unregister_ftrace_function - unregister a function for profiling.
... ... @@ -3762,6 +3784,7 @@
3762 3784  
3763 3785 return ret;
3764 3786 }
  3787 +EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3765 3788  
3766 3789 int
3767 3790 ftrace_enable_sysctl(struct ctl_table *table, int write,