Commit 7eb69529cbaf4229baf5559a400a7a46352c6e52

Authored by Linus Torvalds

Merge tag 'trace-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "Not much changes for the 3.12 merge window.  The major tracing changes
  are still in flux, and will have to wait for 3.13.

  The changes for 3.12 are mostly clean ups and minor fixes.

  H Peter Anvin added a check to x86_32 static function tracing that
  helps a small segment of the kernel community.

  Oleg Nesterov had a few changes from 3.11, but were mostly clean ups
  and not worth pushing in the -rc time frame.

  Li Zefan had small clean up with annotating a raw_init with __init.

  I fixed a slight race in updating function callbacks, but the race is
  so small and the bug that happens when it occurs is so minor it's not
  even worth pushing to stable.

  The only real enhancement is from Alexander Z Lam that made the
  tracing_cpumask work for trace buffer instances, instead of them all
  sharing a global cpumask"

* tag 'trace-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ftrace/rcu: Do not trace debug_lockdep_rcu_enabled()
  x86-32, ftrace: Fix static ftrace when early microcode is enabled
  ftrace: Fix a slight race in modifying what function callback gets traced
  tracing: Make tracing_cpumask available for all instances
  tracing: Kill the !CONFIG_MODULES code in trace_events.c
  tracing: Don't pass file_operations array to event_create_dir()
  tracing: Kill trace_create_file_ops() and friends
  tracing/syscalls: Annotate raw_init function with __init

Showing 7 changed files Side-by-side Diff

arch/x86/kernel/entry_32.S
... ... @@ -1176,6 +1176,9 @@
1176 1176 #else /* ! CONFIG_DYNAMIC_FTRACE */
1177 1177  
1178 1178 ENTRY(mcount)
  1179 + cmpl $__PAGE_OFFSET, %esp
  1180 + jb ftrace_stub /* Paging not enabled yet? */
  1181 +
1179 1182 cmpl $0, function_trace_stop
1180 1183 jne ftrace_stub
1181 1184  
... ... @@ -122,7 +122,7 @@
122 122 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
123 123 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
124 124  
125   -int debug_lockdep_rcu_enabled(void)
  125 +int notrace debug_lockdep_rcu_enabled(void)
126 126 {
127 127 return rcu_scheduler_active && debug_locks &&
128 128 current->lockdep_recursion == 0;
kernel/trace/ftrace.c
... ... @@ -1978,12 +1978,27 @@
1978 1978  
1979 1979 void ftrace_modify_all_code(int command)
1980 1980 {
  1981 + int update = command & FTRACE_UPDATE_TRACE_FUNC;
  1982 +
  1983 + /*
  1984 + * If the ftrace_caller calls a ftrace_ops func directly,
  1985 + * we need to make sure that it only traces functions it
  1986 + * expects to trace. When doing the switch of functions,
  1987 + * we need to update to the ftrace_ops_list_func first
  1988 + * before the transition between old and new calls are set,
  1989 + * as the ftrace_ops_list_func will check the ops hashes
  1990 + * to make sure the ops are having the right functions
  1991 + * traced.
  1992 + */
  1993 + if (update)
  1994 + ftrace_update_ftrace_func(ftrace_ops_list_func);
  1995 +
1981 1996 if (command & FTRACE_UPDATE_CALLS)
1982 1997 ftrace_replace_code(1);
1983 1998 else if (command & FTRACE_DISABLE_CALLS)
1984 1999 ftrace_replace_code(0);
1985 2000  
1986   - if (command & FTRACE_UPDATE_TRACE_FUNC)
  2001 + if (update && ftrace_trace_function != ftrace_ops_list_func)
1987 2002 ftrace_update_ftrace_func(ftrace_trace_function);
1988 2003  
1989 2004 if (command & FTRACE_START_FUNC_RET)
kernel/trace/trace.c
... ... @@ -3166,11 +3166,6 @@
3166 3166 };
3167 3167  
3168 3168 /*
3169   - * Only trace on a CPU if the bitmask is set:
3170   - */
3171   -static cpumask_var_t tracing_cpumask;
3172   -
3173   -/*
3174 3169 * The tracer itself will not take this lock, but still we want
3175 3170 * to provide a consistent cpumask to user-space:
3176 3171 */
3177 3172  
... ... @@ -3186,11 +3181,12 @@
3186 3181 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3187 3182 size_t count, loff_t *ppos)
3188 3183 {
  3184 + struct trace_array *tr = file_inode(filp)->i_private;
3189 3185 int len;
3190 3186  
3191 3187 mutex_lock(&tracing_cpumask_update_lock);
3192 3188  
3193   - len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
  3189 + len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3194 3190 if (count - len < 2) {
3195 3191 count = -EINVAL;
3196 3192 goto out_err;
... ... @@ -3208,7 +3204,7 @@
3208 3204 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3209 3205 size_t count, loff_t *ppos)
3210 3206 {
3211   - struct trace_array *tr = filp->private_data;
  3207 + struct trace_array *tr = file_inode(filp)->i_private;
3212 3208 cpumask_var_t tracing_cpumask_new;
3213 3209 int err, cpu;
3214 3210  
3215 3211  
... ... @@ -3228,12 +3224,12 @@
3228 3224 * Increase/decrease the disabled counter if we are
3229 3225 * about to flip a bit in the cpumask:
3230 3226 */
3231   - if (cpumask_test_cpu(cpu, tracing_cpumask) &&
  3227 + if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3232 3228 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3233 3229 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3234 3230 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3235 3231 }
3236   - if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
  3232 + if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3237 3233 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3238 3234 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3239 3235 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
... ... @@ -3242,7 +3238,7 @@
3242 3238 arch_spin_unlock(&ftrace_max_lock);
3243 3239 local_irq_enable();
3244 3240  
3245   - cpumask_copy(tracing_cpumask, tracing_cpumask_new);
  3241 + cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3246 3242  
3247 3243 mutex_unlock(&tracing_cpumask_update_lock);
3248 3244 free_cpumask_var(tracing_cpumask_new);
3249 3245  
... ... @@ -3256,9 +3252,10 @@
3256 3252 }
3257 3253  
3258 3254 static const struct file_operations tracing_cpumask_fops = {
3259   - .open = tracing_open_generic,
  3255 + .open = tracing_open_generic_tr,
3260 3256 .read = tracing_cpumask_read,
3261 3257 .write = tracing_cpumask_write,
  3258 + .release = tracing_release_generic_tr,
3262 3259 .llseek = generic_file_llseek,
3263 3260 };
3264 3261  
... ... @@ -5938,6 +5935,11 @@
5938 5935 if (!tr->name)
5939 5936 goto out_free_tr;
5940 5937  
  5938 + if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
  5939 + goto out_free_tr;
  5940 +
  5941 + cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
  5942 +
5941 5943 raw_spin_lock_init(&tr->start_lock);
5942 5944  
5943 5945 tr->current_trace = &nop_trace;
... ... @@ -5969,6 +5971,7 @@
5969 5971 out_free_tr:
5970 5972 if (tr->trace_buffer.buffer)
5971 5973 ring_buffer_free(tr->trace_buffer.buffer);
  5974 + free_cpumask_var(tr->tracing_cpumask);
5972 5975 kfree(tr->name);
5973 5976 kfree(tr);
5974 5977  
... ... @@ -6098,6 +6101,9 @@
6098 6101 {
6099 6102 int cpu;
6100 6103  
  6104 + trace_create_file("tracing_cpumask", 0644, d_tracer,
  6105 + tr, &tracing_cpumask_fops);
  6106 +
6101 6107 trace_create_file("trace_options", 0644, d_tracer,
6102 6108 tr, &tracing_iter_fops);
6103 6109  
... ... @@ -6147,9 +6153,6 @@
6147 6153  
6148 6154 init_tracer_debugfs(&global_trace, d_tracer);
6149 6155  
6150   - trace_create_file("tracing_cpumask", 0644, d_tracer,
6151   - &global_trace, &tracing_cpumask_fops);
6152   -
6153 6156 trace_create_file("available_tracers", 0444, d_tracer,
6154 6157 &global_trace, &show_traces_fops);
6155 6158  
... ... @@ -6371,7 +6374,7 @@
6371 6374 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6372 6375 goto out;
6373 6376  
6374   - if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
  6377 + if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6375 6378 goto out_free_buffer_mask;
6376 6379  
6377 6380 /* Only allocate trace_printk buffers if a trace_printk exists */
... ... @@ -6386,7 +6389,7 @@
6386 6389 ring_buf_size = 1;
6387 6390  
6388 6391 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6389   - cpumask_copy(tracing_cpumask, cpu_all_mask);
  6392 + cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6390 6393  
6391 6394 raw_spin_lock_init(&global_trace.start_lock);
6392 6395  
... ... @@ -6441,7 +6444,7 @@
6441 6444 #ifdef CONFIG_TRACER_MAX_TRACE
6442 6445 free_percpu(global_trace.max_buffer.data);
6443 6446 #endif
6444   - free_cpumask_var(tracing_cpumask);
  6447 + free_cpumask_var(global_trace.tracing_cpumask);
6445 6448 out_free_buffer_mask:
6446 6449 free_cpumask_var(tracing_buffer_mask);
6447 6450 out:
kernel/trace/trace.h
... ... @@ -206,6 +206,7 @@
206 206 struct dentry *event_dir;
207 207 struct list_head systems;
208 208 struct list_head events;
  209 + cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
209 210 int ref;
210 211 };
211 212  
kernel/trace/trace_events.c
... ... @@ -1489,12 +1489,7 @@
1489 1489 }
1490 1490  
1491 1491 static int
1492   -event_create_dir(struct dentry *parent,
1493   - struct ftrace_event_file *file,
1494   - const struct file_operations *id,
1495   - const struct file_operations *enable,
1496   - const struct file_operations *filter,
1497   - const struct file_operations *format)
  1492 +event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1498 1493 {
1499 1494 struct ftrace_event_call *call = file->event_call;
1500 1495 struct trace_array *tr = file->tr;
1501 1496  
... ... @@ -1522,12 +1517,13 @@
1522 1517  
1523 1518 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1524 1519 trace_create_file("enable", 0644, file->dir, file,
1525   - enable);
  1520 + &ftrace_enable_fops);
1526 1521  
1527 1522 #ifdef CONFIG_PERF_EVENTS
1528 1523 if (call->event.type && call->class->reg)
1529 1524 trace_create_file("id", 0444, file->dir,
1530   - (void *)(long)call->event.type, id);
  1525 + (void *)(long)call->event.type,
  1526 + &ftrace_event_id_fops);
1531 1527 #endif
1532 1528  
1533 1529 /*
1534 1530  
... ... @@ -1544,10 +1540,10 @@
1544 1540 }
1545 1541 }
1546 1542 trace_create_file("filter", 0644, file->dir, call,
1547   - filter);
  1543 + &ftrace_event_filter_fops);
1548 1544  
1549 1545 trace_create_file("format", 0444, file->dir, call,
1550   - format);
  1546 + &ftrace_event_format_fops);
1551 1547  
1552 1548 return 0;
1553 1549 }
... ... @@ -1648,12 +1644,7 @@
1648 1644  
1649 1645 /* Add an event to a trace directory */
1650 1646 static int
1651   -__trace_add_new_event(struct ftrace_event_call *call,
1652   - struct trace_array *tr,
1653   - const struct file_operations *id,
1654   - const struct file_operations *enable,
1655   - const struct file_operations *filter,
1656   - const struct file_operations *format)
  1647 +__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1657 1648 {
1658 1649 struct ftrace_event_file *file;
1659 1650  
... ... @@ -1661,7 +1652,7 @@
1661 1652 if (!file)
1662 1653 return -ENOMEM;
1663 1654  
1664   - return event_create_dir(tr->event_dir, file, id, enable, filter, format);
  1655 + return event_create_dir(tr->event_dir, file);
1665 1656 }
1666 1657  
1667 1658 /*
... ... @@ -1683,8 +1674,7 @@
1683 1674 }
1684 1675  
1685 1676 struct ftrace_module_file_ops;
1686   -static void __add_event_to_tracers(struct ftrace_event_call *call,
1687   - struct ftrace_module_file_ops *file_ops);
  1677 +static void __add_event_to_tracers(struct ftrace_event_call *call);
1688 1678  
1689 1679 /* Add an additional event_call dynamically */
1690 1680 int trace_add_event_call(struct ftrace_event_call *call)
... ... @@ -1695,7 +1685,7 @@
1695 1685  
1696 1686 ret = __register_event(call, NULL);
1697 1687 if (ret >= 0)
1698   - __add_event_to_tracers(call, NULL);
  1688 + __add_event_to_tracers(call);
1699 1689  
1700 1690 mutex_unlock(&event_mutex);
1701 1691 mutex_unlock(&trace_types_lock);
1702 1692  
1703 1693  
1704 1694  
1705 1695  
... ... @@ -1769,100 +1759,21 @@
1769 1759  
1770 1760 #ifdef CONFIG_MODULES
1771 1761  
1772   -static LIST_HEAD(ftrace_module_file_list);
1773   -
1774   -/*
1775   - * Modules must own their file_operations to keep up with
1776   - * reference counting.
1777   - */
1778   -struct ftrace_module_file_ops {
1779   - struct list_head list;
1780   - struct module *mod;
1781   - struct file_operations id;
1782   - struct file_operations enable;
1783   - struct file_operations format;
1784   - struct file_operations filter;
1785   -};
1786   -
1787   -static struct ftrace_module_file_ops *
1788   -find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1789   -{
1790   - /*
1791   - * As event_calls are added in groups by module,
1792   - * when we find one file_ops, we don't need to search for
1793   - * each call in that module, as the rest should be the
1794   - * same. Only search for a new one if the last one did
1795   - * not match.
1796   - */
1797   - if (file_ops && mod == file_ops->mod)
1798   - return file_ops;
1799   -
1800   - list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1801   - if (file_ops->mod == mod)
1802   - return file_ops;
1803   - }
1804   - return NULL;
1805   -}
1806   -
1807   -static struct ftrace_module_file_ops *
1808   -trace_create_file_ops(struct module *mod)
1809   -{
1810   - struct ftrace_module_file_ops *file_ops;
1811   -
1812   - /*
1813   - * This is a bit of a PITA. To allow for correct reference
1814   - * counting, modules must "own" their file_operations.
1815   - * To do this, we allocate the file operations that will be
1816   - * used in the event directory.
1817   - */
1818   -
1819   - file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1820   - if (!file_ops)
1821   - return NULL;
1822   -
1823   - file_ops->mod = mod;
1824   -
1825   - file_ops->id = ftrace_event_id_fops;
1826   - file_ops->id.owner = mod;
1827   -
1828   - file_ops->enable = ftrace_enable_fops;
1829   - file_ops->enable.owner = mod;
1830   -
1831   - file_ops->filter = ftrace_event_filter_fops;
1832   - file_ops->filter.owner = mod;
1833   -
1834   - file_ops->format = ftrace_event_format_fops;
1835   - file_ops->format.owner = mod;
1836   -
1837   - list_add(&file_ops->list, &ftrace_module_file_list);
1838   -
1839   - return file_ops;
1840   -}
1841   -
1842 1762 static void trace_module_add_events(struct module *mod)
1843 1763 {
1844   - struct ftrace_module_file_ops *file_ops = NULL;
1845 1764 struct ftrace_event_call **call, **start, **end;
1846 1765  
1847 1766 start = mod->trace_events;
1848 1767 end = mod->trace_events + mod->num_trace_events;
1849 1768  
1850   - if (start == end)
1851   - return;
1852   -
1853   - file_ops = trace_create_file_ops(mod);
1854   - if (!file_ops)
1855   - return;
1856   -
1857 1769 for_each_event(call, start, end) {
1858 1770 __register_event(*call, mod);
1859   - __add_event_to_tracers(*call, file_ops);
  1771 + __add_event_to_tracers(*call);
1860 1772 }
1861 1773 }
1862 1774  
1863 1775 static void trace_module_remove_events(struct module *mod)
1864 1776 {
1865   - struct ftrace_module_file_ops *file_ops;
1866 1777 struct ftrace_event_call *call, *p;
1867 1778 bool clear_trace = false;
1868 1779  
... ... @@ -1874,16 +1785,6 @@
1874 1785 __trace_remove_event_call(call);
1875 1786 }
1876 1787 }
1877   -
1878   - /* Now free the file_operations */
1879   - list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1880   - if (file_ops->mod == mod)
1881   - break;
1882   - }
1883   - if (&file_ops->list != &ftrace_module_file_list) {
1884   - list_del(&file_ops->list);
1885   - kfree(file_ops);
1886   - }
1887 1788 up_write(&trace_event_sem);
1888 1789  
1889 1790 /*
1890 1791  
1891 1792  
... ... @@ -1919,67 +1820,21 @@
1919 1820 return 0;
1920 1821 }
1921 1822  
1922   -static int
1923   -__trace_add_new_mod_event(struct ftrace_event_call *call,
1924   - struct trace_array *tr,
1925   - struct ftrace_module_file_ops *file_ops)
1926   -{
1927   - return __trace_add_new_event(call, tr,
1928   - &file_ops->id, &file_ops->enable,
1929   - &file_ops->filter, &file_ops->format);
1930   -}
1931   -
1932   -#else
1933   -static inline struct ftrace_module_file_ops *
1934   -find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1935   -{
1936   - return NULL;
1937   -}
1938   -static inline int trace_module_notify(struct notifier_block *self,
1939   - unsigned long val, void *data)
1940   -{
1941   - return 0;
1942   -}
1943   -static inline int
1944   -__trace_add_new_mod_event(struct ftrace_event_call *call,
1945   - struct trace_array *tr,
1946   - struct ftrace_module_file_ops *file_ops)
1947   -{
1948   - return -ENODEV;
1949   -}
  1823 +static struct notifier_block trace_module_nb = {
  1824 + .notifier_call = trace_module_notify,
  1825 + .priority = 0,
  1826 +};
1950 1827 #endif /* CONFIG_MODULES */
1951 1828  
1952 1829 /* Create a new event directory structure for a trace directory. */
1953 1830 static void
1954 1831 __trace_add_event_dirs(struct trace_array *tr)
1955 1832 {
1956   - struct ftrace_module_file_ops *file_ops = NULL;
1957 1833 struct ftrace_event_call *call;
1958 1834 int ret;
1959 1835  
1960 1836 list_for_each_entry(call, &ftrace_events, list) {
1961   - if (call->mod) {
1962   - /*
1963   - * Directories for events by modules need to
1964   - * keep module ref counts when opened (as we don't
1965   - * want the module to disappear when reading one
1966   - * of these files). The file_ops keep account of
1967   - * the module ref count.
1968   - */
1969   - file_ops = find_ftrace_file_ops(file_ops, call->mod);
1970   - if (!file_ops)
1971   - continue; /* Warn? */
1972   - ret = __trace_add_new_mod_event(call, tr, file_ops);
1973   - if (ret < 0)
1974   - pr_warning("Could not create directory for event %s\n",
1975   - call->name);
1976   - continue;
1977   - }
1978   - ret = __trace_add_new_event(call, tr,
1979   - &ftrace_event_id_fops,
1980   - &ftrace_enable_fops,
1981   - &ftrace_event_filter_fops,
1982   - &ftrace_event_format_fops);
  1837 + ret = __trace_add_new_event(call, tr);
1983 1838 if (ret < 0)
1984 1839 pr_warning("Could not create directory for event %s\n",
1985 1840 call->name);
... ... @@ -2287,11 +2142,7 @@
2287 2142  
2288 2143  
2289 2144 list_for_each_entry(file, &tr->events, list) {
2290   - ret = event_create_dir(tr->event_dir, file,
2291   - &ftrace_event_id_fops,
2292   - &ftrace_enable_fops,
2293   - &ftrace_event_filter_fops,
2294   - &ftrace_event_format_fops);
  2145 + ret = event_create_dir(tr->event_dir, file);
2295 2146 if (ret < 0)
2296 2147 pr_warning("Could not create directory for event %s\n",
2297 2148 file->event_call->name);
2298 2149  
2299 2150  
... ... @@ -2332,29 +2183,14 @@
2332 2183 remove_event_file_dir(file);
2333 2184 }
2334 2185  
2335   -static void
2336   -__add_event_to_tracers(struct ftrace_event_call *call,
2337   - struct ftrace_module_file_ops *file_ops)
  2186 +static void __add_event_to_tracers(struct ftrace_event_call *call)
2338 2187 {
2339 2188 struct trace_array *tr;
2340 2189  
2341   - list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2342   - if (file_ops)
2343   - __trace_add_new_mod_event(call, tr, file_ops);
2344   - else
2345   - __trace_add_new_event(call, tr,
2346   - &ftrace_event_id_fops,
2347   - &ftrace_enable_fops,
2348   - &ftrace_event_filter_fops,
2349   - &ftrace_event_format_fops);
2350   - }
  2190 + list_for_each_entry(tr, &ftrace_trace_arrays, list)
  2191 + __trace_add_new_event(call, tr);
2351 2192 }
2352 2193  
2353   -static struct notifier_block trace_module_nb = {
2354   - .notifier_call = trace_module_notify,
2355   - .priority = 0,
2356   -};
2357   -
2358 2194 extern struct ftrace_event_call *__start_ftrace_events[];
2359 2195 extern struct ftrace_event_call *__stop_ftrace_events[];
2360 2196  
2361 2197  
... ... @@ -2559,10 +2395,11 @@
2559 2395 if (ret)
2560 2396 return ret;
2561 2397  
  2398 +#ifdef CONFIG_MODULES
2562 2399 ret = register_module_notifier(&trace_module_nb);
2563 2400 if (ret)
2564 2401 pr_warning("Failed to register trace events module notifier\n");
2565   -
  2402 +#endif
2566 2403 return 0;
2567 2404 }
2568 2405 early_initcall(event_trace_memsetup);
kernel/trace/trace_syscalls.c
... ... @@ -200,8 +200,8 @@
200 200 #type, #name, offsetof(typeof(trace), name), \
201 201 sizeof(trace.name), is_signed_type(type)
202 202  
203   -static
204   -int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  203 +static int __init
  204 +__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
205 205 {
206 206 int i;
207 207 int pos = 0;
... ... @@ -228,7 +228,7 @@
228 228 return pos;
229 229 }
230 230  
231   -static int set_syscall_print_fmt(struct ftrace_event_call *call)
  231 +static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
232 232 {
233 233 char *print_fmt;
234 234 int len;
... ... @@ -253,7 +253,7 @@
253 253 return 0;
254 254 }
255 255  
256   -static void free_syscall_print_fmt(struct ftrace_event_call *call)
  256 +static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
257 257 {
258 258 struct syscall_metadata *entry = call->data;
259 259  
... ... @@ -459,7 +459,7 @@
459 459 mutex_unlock(&syscall_trace_lock);
460 460 }
461 461  
462   -static int init_syscall_trace(struct ftrace_event_call *call)
  462 +static int __init init_syscall_trace(struct ftrace_event_call *call)
463 463 {
464 464 int id;
465 465 int num;