Commit 2aafe1a4d451866e3e7b476e2fa0813b69b313c1
Exists in
master
and in
13 other branches
Merge tag 'trace-fixes-v3.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/g…
…it/rostedt/linux-trace Pull ftrace bugfix from Steven Rostedt: "Takao Indoh reported that he was able to cause a ftrace bug while loading a module and enabling function tracing at the same time. He uncovered a race where the module when loaded will convert the calls to mcount into nops, and expects the module's text to be RW. But when function tracing is enabled, it will convert all kernel text (core and module) from RO to RW to convert the nops to calls to ftrace to record the function. After the convertion, it will convert all the text back from RW to RO. The issue is, it will also convert the module's text that is loading. If it converts it to RO before ftrace does its conversion, it will cause ftrace to fail and require a reboot to fix it again. This patch moves the ftrace module update that converts calls to mcount into nops to be done when the module state is still MODULE_STATE_UNFORMED. This will ignore the module when the text is being converted from RW back to RO" * tag 'trace-fixes-v3.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace/module: Hardcode ftrace_module_init() call into load_module()
Showing 3 changed files Inline Diff
include/linux/ftrace.h
1 | /* | 1 | /* |
2 | * Ftrace header. For implementation details beyond the random comments | 2 | * Ftrace header. For implementation details beyond the random comments |
3 | * scattered below, see: Documentation/trace/ftrace-design.txt | 3 | * scattered below, see: Documentation/trace/ftrace-design.txt |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _LINUX_FTRACE_H | 6 | #ifndef _LINUX_FTRACE_H |
7 | #define _LINUX_FTRACE_H | 7 | #define _LINUX_FTRACE_H |
8 | 8 | ||
9 | #include <linux/trace_clock.h> | 9 | #include <linux/trace_clock.h> |
10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/ptrace.h> | 13 | #include <linux/ptrace.h> |
14 | #include <linux/ktime.h> | 14 | #include <linux/ktime.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | 19 | ||
20 | #include <asm/ftrace.h> | 20 | #include <asm/ftrace.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * If the arch supports passing the variable contents of | 23 | * If the arch supports passing the variable contents of |
24 | * function_trace_op as the third parameter back from the | 24 | * function_trace_op as the third parameter back from the |
25 | * mcount call, then the arch should define this as 1. | 25 | * mcount call, then the arch should define this as 1. |
26 | */ | 26 | */ |
27 | #ifndef ARCH_SUPPORTS_FTRACE_OPS | 27 | #ifndef ARCH_SUPPORTS_FTRACE_OPS |
28 | #define ARCH_SUPPORTS_FTRACE_OPS 0 | 28 | #define ARCH_SUPPORTS_FTRACE_OPS 0 |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * If the arch's mcount caller does not support all of ftrace's | 32 | * If the arch's mcount caller does not support all of ftrace's |
33 | * features, then it must call an indirect function that | 33 | * features, then it must call an indirect function that |
34 | * does. Or at least does enough to prevent any unwelcomed side effects. | 34 | * does. Or at least does enough to prevent any unwelcomed side effects. |
35 | */ | 35 | */ |
36 | #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ | 36 | #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ |
37 | !ARCH_SUPPORTS_FTRACE_OPS | 37 | !ARCH_SUPPORTS_FTRACE_OPS |
38 | # define FTRACE_FORCE_LIST_FUNC 1 | 38 | # define FTRACE_FORCE_LIST_FUNC 1 |
39 | #else | 39 | #else |
40 | # define FTRACE_FORCE_LIST_FUNC 0 | 40 | # define FTRACE_FORCE_LIST_FUNC 0 |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | 43 | ||
44 | struct module; | 44 | struct module; |
45 | struct ftrace_hash; | 45 | struct ftrace_hash; |
46 | 46 | ||
47 | #ifdef CONFIG_FUNCTION_TRACER | 47 | #ifdef CONFIG_FUNCTION_TRACER |
48 | 48 | ||
49 | extern int ftrace_enabled; | 49 | extern int ftrace_enabled; |
50 | extern int | 50 | extern int |
51 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 51 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
52 | void __user *buffer, size_t *lenp, | 52 | void __user *buffer, size_t *lenp, |
53 | loff_t *ppos); | 53 | loff_t *ppos); |
54 | 54 | ||
55 | struct ftrace_ops; | 55 | struct ftrace_ops; |
56 | 56 | ||
57 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | 57 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, |
58 | struct ftrace_ops *op, struct pt_regs *regs); | 58 | struct ftrace_ops *op, struct pt_regs *regs); |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | 61 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are |
62 | * set in the flags member. | 62 | * set in the flags member. |
63 | * | 63 | * |
64 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 64 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
65 | * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops | 65 | * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops |
66 | * is part of the global tracers sharing the same filter | 66 | * is part of the global tracers sharing the same filter |
67 | * via set_ftrace_* debugfs files. | 67 | * via set_ftrace_* debugfs files. |
68 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 68 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
69 | * allocated ftrace_ops which need special care | 69 | * allocated ftrace_ops which need special care |
70 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops | 70 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops |
71 | * could be controled by following calls: | 71 | * could be controled by following calls: |
72 | * ftrace_function_local_enable | 72 | * ftrace_function_local_enable |
73 | * ftrace_function_local_disable | 73 | * ftrace_function_local_disable |
74 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called | 74 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
75 | * and passed to the callback. If this flag is set, but the | 75 | * and passed to the callback. If this flag is set, but the |
76 | * architecture does not support passing regs | 76 | * architecture does not support passing regs |
77 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the | 77 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the |
78 | * ftrace_ops will fail to register, unless the next flag | 78 | * ftrace_ops will fail to register, unless the next flag |
79 | * is set. | 79 | * is set. |
80 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the | 80 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the |
81 | * handler can handle an arch that does not save regs | 81 | * handler can handle an arch that does not save regs |
82 | * (the handler tests if regs == NULL), then it can set | 82 | * (the handler tests if regs == NULL), then it can set |
83 | * this flag instead. It will not fail registering the ftrace_ops | 83 | * this flag instead. It will not fail registering the ftrace_ops |
84 | * but, the regs field will be NULL if the arch does not support | 84 | * but, the regs field will be NULL if the arch does not support |
85 | * passing regs to the handler. | 85 | * passing regs to the handler. |
86 | * Note, if this flag is set, the SAVE_REGS flag will automatically | 86 | * Note, if this flag is set, the SAVE_REGS flag will automatically |
87 | * get set upon registering the ftrace_ops, if the arch supports it. | 87 | * get set upon registering the ftrace_ops, if the arch supports it. |
88 | * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure | 88 | * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure |
89 | * that the call back has its own recursion protection. If it does | 89 | * that the call back has its own recursion protection. If it does |
90 | * not set this, then the ftrace infrastructure will add recursion | 90 | * not set this, then the ftrace infrastructure will add recursion |
91 | * protection for the caller. | 91 | * protection for the caller. |
92 | * STUB - The ftrace_ops is just a place holder. | 92 | * STUB - The ftrace_ops is just a place holder. |
93 | * INITIALIZED - The ftrace_ops has already been initialized (first use time | 93 | * INITIALIZED - The ftrace_ops has already been initialized (first use time |
94 | * register_ftrace_function() is called, it will initialized the ops) | 94 | * register_ftrace_function() is called, it will initialized the ops) |
95 | * DELETED - The ops are being deleted, do not let them be registered again. | 95 | * DELETED - The ops are being deleted, do not let them be registered again. |
96 | */ | 96 | */ |
97 | enum { | 97 | enum { |
98 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 98 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
99 | FTRACE_OPS_FL_GLOBAL = 1 << 1, | 99 | FTRACE_OPS_FL_GLOBAL = 1 << 1, |
100 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, | 100 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, |
101 | FTRACE_OPS_FL_CONTROL = 1 << 3, | 101 | FTRACE_OPS_FL_CONTROL = 1 << 3, |
102 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, | 102 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, |
103 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, | 103 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, |
104 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, | 104 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, |
105 | FTRACE_OPS_FL_STUB = 1 << 7, | 105 | FTRACE_OPS_FL_STUB = 1 << 7, |
106 | FTRACE_OPS_FL_INITIALIZED = 1 << 8, | 106 | FTRACE_OPS_FL_INITIALIZED = 1 << 8, |
107 | FTRACE_OPS_FL_DELETED = 1 << 9, | 107 | FTRACE_OPS_FL_DELETED = 1 << 9, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * Note, ftrace_ops can be referenced outside of RCU protection. | 111 | * Note, ftrace_ops can be referenced outside of RCU protection. |
112 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | 112 | * (Although, for perf, the control ops prevent that). If ftrace_ops is |
113 | * allocated and not part of kernel core data, the unregistering of it will | 113 | * allocated and not part of kernel core data, the unregistering of it will |
114 | * perform a scheduling on all CPUs to make sure that there are no more users. | 114 | * perform a scheduling on all CPUs to make sure that there are no more users. |
115 | * Depending on the load of the system that may take a bit of time. | 115 | * Depending on the load of the system that may take a bit of time. |
116 | * | 116 | * |
117 | * Any private data added must also take care not to be freed and if private | 117 | * Any private data added must also take care not to be freed and if private |
118 | * data is added to a ftrace_ops that is in core code, the user of the | 118 | * data is added to a ftrace_ops that is in core code, the user of the |
119 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. | 119 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. |
120 | */ | 120 | */ |
121 | struct ftrace_ops { | 121 | struct ftrace_ops { |
122 | ftrace_func_t func; | 122 | ftrace_func_t func; |
123 | struct ftrace_ops *next; | 123 | struct ftrace_ops *next; |
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | int __percpu *disabled; | 125 | int __percpu *disabled; |
126 | void *private; | 126 | void *private; |
127 | #ifdef CONFIG_DYNAMIC_FTRACE | 127 | #ifdef CONFIG_DYNAMIC_FTRACE |
128 | struct ftrace_hash *notrace_hash; | 128 | struct ftrace_hash *notrace_hash; |
129 | struct ftrace_hash *filter_hash; | 129 | struct ftrace_hash *filter_hash; |
130 | struct mutex regex_lock; | 130 | struct mutex regex_lock; |
131 | #endif | 131 | #endif |
132 | }; | 132 | }; |
133 | 133 | ||
134 | extern int function_trace_stop; | 134 | extern int function_trace_stop; |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Type of the current tracing. | 137 | * Type of the current tracing. |
138 | */ | 138 | */ |
139 | enum ftrace_tracing_type_t { | 139 | enum ftrace_tracing_type_t { |
140 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | 140 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ |
141 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | 141 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ |
142 | }; | 142 | }; |
143 | 143 | ||
144 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | 144 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ |
145 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | 145 | extern enum ftrace_tracing_type_t ftrace_tracing_type; |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * ftrace_stop - stop function tracer. | 148 | * ftrace_stop - stop function tracer. |
149 | * | 149 | * |
150 | * A quick way to stop the function tracer. Note this an on off switch, | 150 | * A quick way to stop the function tracer. Note this an on off switch, |
151 | * it is not something that is recursive like preempt_disable. | 151 | * it is not something that is recursive like preempt_disable. |
152 | * This does not disable the calling of mcount, it only stops the | 152 | * This does not disable the calling of mcount, it only stops the |
153 | * calling of functions from mcount. | 153 | * calling of functions from mcount. |
154 | */ | 154 | */ |
155 | static inline void ftrace_stop(void) | 155 | static inline void ftrace_stop(void) |
156 | { | 156 | { |
157 | function_trace_stop = 1; | 157 | function_trace_stop = 1; |
158 | } | 158 | } |
159 | 159 | ||
160 | /** | 160 | /** |
161 | * ftrace_start - start the function tracer. | 161 | * ftrace_start - start the function tracer. |
162 | * | 162 | * |
163 | * This function is the inverse of ftrace_stop. This does not enable | 163 | * This function is the inverse of ftrace_stop. This does not enable |
164 | * the function tracing if the function tracer is disabled. This only | 164 | * the function tracing if the function tracer is disabled. This only |
165 | * sets the function tracer flag to continue calling the functions | 165 | * sets the function tracer flag to continue calling the functions |
166 | * from mcount. | 166 | * from mcount. |
167 | */ | 167 | */ |
168 | static inline void ftrace_start(void) | 168 | static inline void ftrace_start(void) |
169 | { | 169 | { |
170 | function_trace_stop = 0; | 170 | function_trace_stop = 0; |
171 | } | 171 | } |
172 | 172 | ||
173 | /* | 173 | /* |
174 | * The ftrace_ops must be a static and should also | 174 | * The ftrace_ops must be a static and should also |
175 | * be read_mostly. These functions do modify read_mostly variables | 175 | * be read_mostly. These functions do modify read_mostly variables |
176 | * so use them sparely. Never free an ftrace_op or modify the | 176 | * so use them sparely. Never free an ftrace_op or modify the |
177 | * next pointer after it has been registered. Even after unregistering | 177 | * next pointer after it has been registered. Even after unregistering |
178 | * it, the next pointer may still be used internally. | 178 | * it, the next pointer may still be used internally. |
179 | */ | 179 | */ |
180 | int register_ftrace_function(struct ftrace_ops *ops); | 180 | int register_ftrace_function(struct ftrace_ops *ops); |
181 | int unregister_ftrace_function(struct ftrace_ops *ops); | 181 | int unregister_ftrace_function(struct ftrace_ops *ops); |
182 | void clear_ftrace_function(void); | 182 | void clear_ftrace_function(void); |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu | 185 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu |
186 | * | 186 | * |
187 | * This function enables tracing on current cpu by decreasing | 187 | * This function enables tracing on current cpu by decreasing |
188 | * the per cpu control variable. | 188 | * the per cpu control variable. |
189 | * It must be called with preemption disabled and only on ftrace_ops | 189 | * It must be called with preemption disabled and only on ftrace_ops |
190 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 190 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption |
191 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 191 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
192 | */ | 192 | */ |
193 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) | 193 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) |
194 | { | 194 | { |
195 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | 195 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) |
196 | return; | 196 | return; |
197 | 197 | ||
198 | (*this_cpu_ptr(ops->disabled))--; | 198 | (*this_cpu_ptr(ops->disabled))--; |
199 | } | 199 | } |
200 | 200 | ||
201 | /** | 201 | /** |
202 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu | 202 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu |
203 | * | 203 | * |
204 | * This function enables tracing on current cpu by decreasing | 204 | * This function enables tracing on current cpu by decreasing |
205 | * the per cpu control variable. | 205 | * the per cpu control variable. |
206 | * It must be called with preemption disabled and only on ftrace_ops | 206 | * It must be called with preemption disabled and only on ftrace_ops |
207 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 207 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption |
208 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 208 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
209 | */ | 209 | */ |
210 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | 210 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) |
211 | { | 211 | { |
212 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | 212 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) |
213 | return; | 213 | return; |
214 | 214 | ||
215 | (*this_cpu_ptr(ops->disabled))++; | 215 | (*this_cpu_ptr(ops->disabled))++; |
216 | } | 216 | } |
217 | 217 | ||
218 | /** | 218 | /** |
219 | * ftrace_function_local_disabled - returns ftrace_ops disabled value | 219 | * ftrace_function_local_disabled - returns ftrace_ops disabled value |
220 | * on current cpu | 220 | * on current cpu |
221 | * | 221 | * |
222 | * This function returns value of ftrace_ops::disabled on current cpu. | 222 | * This function returns value of ftrace_ops::disabled on current cpu. |
223 | * It must be called with preemption disabled and only on ftrace_ops | 223 | * It must be called with preemption disabled and only on ftrace_ops |
224 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 224 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption |
225 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 225 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
226 | */ | 226 | */ |
227 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) | 227 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) |
228 | { | 228 | { |
229 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); | 229 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); |
230 | return *this_cpu_ptr(ops->disabled); | 230 | return *this_cpu_ptr(ops->disabled); |
231 | } | 231 | } |
232 | 232 | ||
233 | extern void ftrace_stub(unsigned long a0, unsigned long a1, | 233 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
234 | struct ftrace_ops *op, struct pt_regs *regs); | 234 | struct ftrace_ops *op, struct pt_regs *regs); |
235 | 235 | ||
236 | #else /* !CONFIG_FUNCTION_TRACER */ | 236 | #else /* !CONFIG_FUNCTION_TRACER */ |
237 | /* | 237 | /* |
238 | * (un)register_ftrace_function must be a macro since the ops parameter | 238 | * (un)register_ftrace_function must be a macro since the ops parameter |
239 | * must not be evaluated. | 239 | * must not be evaluated. |
240 | */ | 240 | */ |
241 | #define register_ftrace_function(ops) ({ 0; }) | 241 | #define register_ftrace_function(ops) ({ 0; }) |
242 | #define unregister_ftrace_function(ops) ({ 0; }) | 242 | #define unregister_ftrace_function(ops) ({ 0; }) |
243 | static inline int ftrace_nr_registered_ops(void) | 243 | static inline int ftrace_nr_registered_ops(void) |
244 | { | 244 | { |
245 | return 0; | 245 | return 0; |
246 | } | 246 | } |
247 | static inline void clear_ftrace_function(void) { } | 247 | static inline void clear_ftrace_function(void) { } |
248 | static inline void ftrace_kill(void) { } | 248 | static inline void ftrace_kill(void) { } |
249 | static inline void ftrace_stop(void) { } | 249 | static inline void ftrace_stop(void) { } |
250 | static inline void ftrace_start(void) { } | 250 | static inline void ftrace_start(void) { } |
251 | #endif /* CONFIG_FUNCTION_TRACER */ | 251 | #endif /* CONFIG_FUNCTION_TRACER */ |
252 | 252 | ||
253 | #ifdef CONFIG_STACK_TRACER | 253 | #ifdef CONFIG_STACK_TRACER |
254 | extern int stack_tracer_enabled; | 254 | extern int stack_tracer_enabled; |
255 | int | 255 | int |
256 | stack_trace_sysctl(struct ctl_table *table, int write, | 256 | stack_trace_sysctl(struct ctl_table *table, int write, |
257 | void __user *buffer, size_t *lenp, | 257 | void __user *buffer, size_t *lenp, |
258 | loff_t *ppos); | 258 | loff_t *ppos); |
259 | #endif | 259 | #endif |
260 | 260 | ||
261 | struct ftrace_func_command { | 261 | struct ftrace_func_command { |
262 | struct list_head list; | 262 | struct list_head list; |
263 | char *name; | 263 | char *name; |
264 | int (*func)(struct ftrace_hash *hash, | 264 | int (*func)(struct ftrace_hash *hash, |
265 | char *func, char *cmd, | 265 | char *func, char *cmd, |
266 | char *params, int enable); | 266 | char *params, int enable); |
267 | }; | 267 | }; |
268 | 268 | ||
269 | #ifdef CONFIG_DYNAMIC_FTRACE | 269 | #ifdef CONFIG_DYNAMIC_FTRACE |
270 | 270 | ||
271 | int ftrace_arch_code_modify_prepare(void); | 271 | int ftrace_arch_code_modify_prepare(void); |
272 | int ftrace_arch_code_modify_post_process(void); | 272 | int ftrace_arch_code_modify_post_process(void); |
273 | 273 | ||
274 | void ftrace_bug(int err, unsigned long ip); | 274 | void ftrace_bug(int err, unsigned long ip); |
275 | 275 | ||
276 | struct seq_file; | 276 | struct seq_file; |
277 | 277 | ||
278 | struct ftrace_probe_ops { | 278 | struct ftrace_probe_ops { |
279 | void (*func)(unsigned long ip, | 279 | void (*func)(unsigned long ip, |
280 | unsigned long parent_ip, | 280 | unsigned long parent_ip, |
281 | void **data); | 281 | void **data); |
282 | int (*init)(struct ftrace_probe_ops *ops, | 282 | int (*init)(struct ftrace_probe_ops *ops, |
283 | unsigned long ip, void **data); | 283 | unsigned long ip, void **data); |
284 | void (*free)(struct ftrace_probe_ops *ops, | 284 | void (*free)(struct ftrace_probe_ops *ops, |
285 | unsigned long ip, void **data); | 285 | unsigned long ip, void **data); |
286 | int (*print)(struct seq_file *m, | 286 | int (*print)(struct seq_file *m, |
287 | unsigned long ip, | 287 | unsigned long ip, |
288 | struct ftrace_probe_ops *ops, | 288 | struct ftrace_probe_ops *ops, |
289 | void *data); | 289 | void *data); |
290 | }; | 290 | }; |
291 | 291 | ||
292 | extern int | 292 | extern int |
293 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 293 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
294 | void *data); | 294 | void *data); |
295 | extern void | 295 | extern void |
296 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 296 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
297 | void *data); | 297 | void *data); |
298 | extern void | 298 | extern void |
299 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); | 299 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); |
300 | extern void unregister_ftrace_function_probe_all(char *glob); | 300 | extern void unregister_ftrace_function_probe_all(char *glob); |
301 | 301 | ||
302 | extern int ftrace_text_reserved(const void *start, const void *end); | 302 | extern int ftrace_text_reserved(const void *start, const void *end); |
303 | 303 | ||
304 | extern int ftrace_nr_registered_ops(void); | 304 | extern int ftrace_nr_registered_ops(void); |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * The dyn_ftrace record's flags field is split into two parts. | 307 | * The dyn_ftrace record's flags field is split into two parts. |
308 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | 308 | * the first part which is '0-FTRACE_REF_MAX' is a counter of |
309 | * the number of callbacks that have registered the function that | 309 | * the number of callbacks that have registered the function that |
310 | * the dyn_ftrace descriptor represents. | 310 | * the dyn_ftrace descriptor represents. |
311 | * | 311 | * |
312 | * The second part is a mask: | 312 | * The second part is a mask: |
313 | * ENABLED - the function is being traced | 313 | * ENABLED - the function is being traced |
314 | * REGS - the record wants the function to save regs | 314 | * REGS - the record wants the function to save regs |
315 | * REGS_EN - the function is set up to save regs. | 315 | * REGS_EN - the function is set up to save regs. |
316 | * | 316 | * |
317 | * When a new ftrace_ops is registered and wants a function to save | 317 | * When a new ftrace_ops is registered and wants a function to save |
318 | * pt_regs, the rec->flag REGS is set. When the function has been | 318 | * pt_regs, the rec->flag REGS is set. When the function has been |
319 | * set up to save regs, the REG_EN flag is set. Once a function | 319 | * set up to save regs, the REG_EN flag is set. Once a function |
320 | * starts saving regs it will do so until all ftrace_ops are removed | 320 | * starts saving regs it will do so until all ftrace_ops are removed |
321 | * from tracing that function. | 321 | * from tracing that function. |
322 | */ | 322 | */ |
323 | enum { | 323 | enum { |
324 | FTRACE_FL_ENABLED = (1UL << 29), | 324 | FTRACE_FL_ENABLED = (1UL << 29), |
325 | FTRACE_FL_REGS = (1UL << 30), | 325 | FTRACE_FL_REGS = (1UL << 30), |
326 | FTRACE_FL_REGS_EN = (1UL << 31) | 326 | FTRACE_FL_REGS_EN = (1UL << 31) |
327 | }; | 327 | }; |
328 | 328 | ||
329 | #define FTRACE_FL_MASK (0x7UL << 29) | 329 | #define FTRACE_FL_MASK (0x7UL << 29) |
330 | #define FTRACE_REF_MAX ((1UL << 29) - 1) | 330 | #define FTRACE_REF_MAX ((1UL << 29) - 1) |
331 | 331 | ||
332 | struct dyn_ftrace { | 332 | struct dyn_ftrace { |
333 | unsigned long ip; /* address of mcount call-site */ | 333 | unsigned long ip; /* address of mcount call-site */ |
334 | unsigned long flags; | 334 | unsigned long flags; |
335 | struct dyn_arch_ftrace arch; | 335 | struct dyn_arch_ftrace arch; |
336 | }; | 336 | }; |
337 | 337 | ||
338 | int ftrace_force_update(void); | 338 | int ftrace_force_update(void); |
339 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | 339 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
340 | int remove, int reset); | 340 | int remove, int reset); |
341 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, | 341 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
342 | int len, int reset); | 342 | int len, int reset); |
343 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | 343 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
344 | int len, int reset); | 344 | int len, int reset); |
345 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | 345 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); |
346 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | 346 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); |
347 | void ftrace_free_filter(struct ftrace_ops *ops); | 347 | void ftrace_free_filter(struct ftrace_ops *ops); |
348 | 348 | ||
349 | int register_ftrace_command(struct ftrace_func_command *cmd); | 349 | int register_ftrace_command(struct ftrace_func_command *cmd); |
350 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 350 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
351 | 351 | ||
352 | enum { | 352 | enum { |
353 | FTRACE_UPDATE_CALLS = (1 << 0), | 353 | FTRACE_UPDATE_CALLS = (1 << 0), |
354 | FTRACE_DISABLE_CALLS = (1 << 1), | 354 | FTRACE_DISABLE_CALLS = (1 << 1), |
355 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 355 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
356 | FTRACE_START_FUNC_RET = (1 << 3), | 356 | FTRACE_START_FUNC_RET = (1 << 3), |
357 | FTRACE_STOP_FUNC_RET = (1 << 4), | 357 | FTRACE_STOP_FUNC_RET = (1 << 4), |
358 | }; | 358 | }; |
359 | 359 | ||
360 | /* | 360 | /* |
361 | * The FTRACE_UPDATE_* enum is used to pass information back | 361 | * The FTRACE_UPDATE_* enum is used to pass information back |
362 | * from the ftrace_update_record() and ftrace_test_record() | 362 | * from the ftrace_update_record() and ftrace_test_record() |
363 | * functions. These are called by the code update routines | 363 | * functions. These are called by the code update routines |
364 | * to find out what is to be done for a given function. | 364 | * to find out what is to be done for a given function. |
365 | * | 365 | * |
366 | * IGNORE - The function is already what we want it to be | 366 | * IGNORE - The function is already what we want it to be |
367 | * MAKE_CALL - Start tracing the function | 367 | * MAKE_CALL - Start tracing the function |
368 | * MODIFY_CALL - Stop saving regs for the function | 368 | * MODIFY_CALL - Stop saving regs for the function |
369 | * MODIFY_CALL_REGS - Start saving regs for the function | 369 | * MODIFY_CALL_REGS - Start saving regs for the function |
370 | * MAKE_NOP - Stop tracing the function | 370 | * MAKE_NOP - Stop tracing the function |
371 | */ | 371 | */ |
372 | enum { | 372 | enum { |
373 | FTRACE_UPDATE_IGNORE, | 373 | FTRACE_UPDATE_IGNORE, |
374 | FTRACE_UPDATE_MAKE_CALL, | 374 | FTRACE_UPDATE_MAKE_CALL, |
375 | FTRACE_UPDATE_MODIFY_CALL, | 375 | FTRACE_UPDATE_MODIFY_CALL, |
376 | FTRACE_UPDATE_MODIFY_CALL_REGS, | 376 | FTRACE_UPDATE_MODIFY_CALL_REGS, |
377 | FTRACE_UPDATE_MAKE_NOP, | 377 | FTRACE_UPDATE_MAKE_NOP, |
378 | }; | 378 | }; |
379 | 379 | ||
380 | enum { | 380 | enum { |
381 | FTRACE_ITER_FILTER = (1 << 0), | 381 | FTRACE_ITER_FILTER = (1 << 0), |
382 | FTRACE_ITER_NOTRACE = (1 << 1), | 382 | FTRACE_ITER_NOTRACE = (1 << 1), |
383 | FTRACE_ITER_PRINTALL = (1 << 2), | 383 | FTRACE_ITER_PRINTALL = (1 << 2), |
384 | FTRACE_ITER_DO_HASH = (1 << 3), | 384 | FTRACE_ITER_DO_HASH = (1 << 3), |
385 | FTRACE_ITER_HASH = (1 << 4), | 385 | FTRACE_ITER_HASH = (1 << 4), |
386 | FTRACE_ITER_ENABLED = (1 << 5), | 386 | FTRACE_ITER_ENABLED = (1 << 5), |
387 | }; | 387 | }; |
388 | 388 | ||
389 | void arch_ftrace_update_code(int command); | 389 | void arch_ftrace_update_code(int command); |
390 | 390 | ||
391 | struct ftrace_rec_iter; | 391 | struct ftrace_rec_iter; |
392 | 392 | ||
393 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); | 393 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); |
394 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); | 394 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); |
395 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); | 395 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); |
396 | 396 | ||
397 | #define for_ftrace_rec_iter(iter) \ | 397 | #define for_ftrace_rec_iter(iter) \ |
398 | for (iter = ftrace_rec_iter_start(); \ | 398 | for (iter = ftrace_rec_iter_start(); \ |
399 | iter; \ | 399 | iter; \ |
400 | iter = ftrace_rec_iter_next(iter)) | 400 | iter = ftrace_rec_iter_next(iter)) |
401 | 401 | ||
402 | 402 | ||
403 | int ftrace_update_record(struct dyn_ftrace *rec, int enable); | 403 | int ftrace_update_record(struct dyn_ftrace *rec, int enable); |
404 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); | 404 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); |
405 | void ftrace_run_stop_machine(int command); | 405 | void ftrace_run_stop_machine(int command); |
406 | unsigned long ftrace_location(unsigned long ip); | 406 | unsigned long ftrace_location(unsigned long ip); |
407 | 407 | ||
408 | extern ftrace_func_t ftrace_trace_function; | 408 | extern ftrace_func_t ftrace_trace_function; |
409 | 409 | ||
410 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, | 410 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
411 | struct inode *inode, struct file *file); | 411 | struct inode *inode, struct file *file); |
412 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | 412 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
413 | size_t cnt, loff_t *ppos); | 413 | size_t cnt, loff_t *ppos); |
414 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | 414 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
415 | size_t cnt, loff_t *ppos); | 415 | size_t cnt, loff_t *ppos); |
416 | int ftrace_regex_release(struct inode *inode, struct file *file); | 416 | int ftrace_regex_release(struct inode *inode, struct file *file); |
417 | 417 | ||
418 | void __init | 418 | void __init |
419 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); | 419 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); |
420 | 420 | ||
421 | /* defined in arch */ | 421 | /* defined in arch */ |
422 | extern int ftrace_ip_converted(unsigned long ip); | 422 | extern int ftrace_ip_converted(unsigned long ip); |
423 | extern int ftrace_dyn_arch_init(void); | 423 | extern int ftrace_dyn_arch_init(void); |
424 | extern void ftrace_replace_code(int enable); | 424 | extern void ftrace_replace_code(int enable); |
425 | extern int ftrace_update_ftrace_func(ftrace_func_t func); | 425 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
426 | extern void ftrace_caller(void); | 426 | extern void ftrace_caller(void); |
427 | extern void ftrace_regs_caller(void); | 427 | extern void ftrace_regs_caller(void); |
428 | extern void ftrace_call(void); | 428 | extern void ftrace_call(void); |
429 | extern void ftrace_regs_call(void); | 429 | extern void ftrace_regs_call(void); |
430 | extern void mcount_call(void); | 430 | extern void mcount_call(void); |
431 | 431 | ||
432 | void ftrace_modify_all_code(int command); | 432 | void ftrace_modify_all_code(int command); |
433 | 433 | ||
434 | #ifndef FTRACE_ADDR | 434 | #ifndef FTRACE_ADDR |
435 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | 435 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) |
436 | #endif | 436 | #endif |
437 | 437 | ||
438 | #ifndef FTRACE_REGS_ADDR | 438 | #ifndef FTRACE_REGS_ADDR |
439 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 439 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
440 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) | 440 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
441 | #else | 441 | #else |
442 | # define FTRACE_REGS_ADDR FTRACE_ADDR | 442 | # define FTRACE_REGS_ADDR FTRACE_ADDR |
443 | #endif | 443 | #endif |
444 | #endif | 444 | #endif |
445 | 445 | ||
446 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 446 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
447 | extern void ftrace_graph_caller(void); | 447 | extern void ftrace_graph_caller(void); |
448 | extern int ftrace_enable_ftrace_graph_caller(void); | 448 | extern int ftrace_enable_ftrace_graph_caller(void); |
449 | extern int ftrace_disable_ftrace_graph_caller(void); | 449 | extern int ftrace_disable_ftrace_graph_caller(void); |
450 | #else | 450 | #else |
451 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | 451 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } |
452 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | 452 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } |
453 | #endif | 453 | #endif |
454 | 454 | ||
455 | /** | 455 | /** |
456 | * ftrace_make_nop - convert code into nop | 456 | * ftrace_make_nop - convert code into nop |
457 | * @mod: module structure if called by module load initialization | 457 | * @mod: module structure if called by module load initialization |
458 | * @rec: the mcount call site record | 458 | * @rec: the mcount call site record |
459 | * @addr: the address that the call site should be calling | 459 | * @addr: the address that the call site should be calling |
460 | * | 460 | * |
461 | * This is a very sensitive operation and great care needs | 461 | * This is a very sensitive operation and great care needs |
462 | * to be taken by the arch. The operation should carefully | 462 | * to be taken by the arch. The operation should carefully |
463 | * read the location, check to see if what is read is indeed | 463 | * read the location, check to see if what is read is indeed |
464 | * what we expect it to be, and then on success of the compare, | 464 | * what we expect it to be, and then on success of the compare, |
465 | * it should write to the location. | 465 | * it should write to the location. |
466 | * | 466 | * |
467 | * The code segment at @rec->ip should be a caller to @addr | 467 | * The code segment at @rec->ip should be a caller to @addr |
468 | * | 468 | * |
469 | * Return must be: | 469 | * Return must be: |
470 | * 0 on success | 470 | * 0 on success |
471 | * -EFAULT on error reading the location | 471 | * -EFAULT on error reading the location |
472 | * -EINVAL on a failed compare of the contents | 472 | * -EINVAL on a failed compare of the contents |
473 | * -EPERM on error writing to the location | 473 | * -EPERM on error writing to the location |
474 | * Any other value will be considered a failure. | 474 | * Any other value will be considered a failure. |
475 | */ | 475 | */ |
476 | extern int ftrace_make_nop(struct module *mod, | 476 | extern int ftrace_make_nop(struct module *mod, |
477 | struct dyn_ftrace *rec, unsigned long addr); | 477 | struct dyn_ftrace *rec, unsigned long addr); |
478 | 478 | ||
479 | /** | 479 | /** |
480 | * ftrace_make_call - convert a nop call site into a call to addr | 480 | * ftrace_make_call - convert a nop call site into a call to addr |
481 | * @rec: the mcount call site record | 481 | * @rec: the mcount call site record |
482 | * @addr: the address that the call site should call | 482 | * @addr: the address that the call site should call |
483 | * | 483 | * |
484 | * This is a very sensitive operation and great care needs | 484 | * This is a very sensitive operation and great care needs |
485 | * to be taken by the arch. The operation should carefully | 485 | * to be taken by the arch. The operation should carefully |
486 | * read the location, check to see if what is read is indeed | 486 | * read the location, check to see if what is read is indeed |
487 | * what we expect it to be, and then on success of the compare, | 487 | * what we expect it to be, and then on success of the compare, |
488 | * it should write to the location. | 488 | * it should write to the location. |
489 | * | 489 | * |
490 | * The code segment at @rec->ip should be a nop | 490 | * The code segment at @rec->ip should be a nop |
491 | * | 491 | * |
492 | * Return must be: | 492 | * Return must be: |
493 | * 0 on success | 493 | * 0 on success |
494 | * -EFAULT on error reading the location | 494 | * -EFAULT on error reading the location |
495 | * -EINVAL on a failed compare of the contents | 495 | * -EINVAL on a failed compare of the contents |
496 | * -EPERM on error writing to the location | 496 | * -EPERM on error writing to the location |
497 | * Any other value will be considered a failure. | 497 | * Any other value will be considered a failure. |
498 | */ | 498 | */ |
499 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); | 499 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
500 | 500 | ||
501 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 501 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
502 | /** | 502 | /** |
503 | * ftrace_modify_call - convert from one addr to another (no nop) | 503 | * ftrace_modify_call - convert from one addr to another (no nop) |
504 | * @rec: the mcount call site record | 504 | * @rec: the mcount call site record |
505 | * @old_addr: the address expected to be currently called to | 505 | * @old_addr: the address expected to be currently called to |
506 | * @addr: the address to change to | 506 | * @addr: the address to change to |
507 | * | 507 | * |
508 | * This is a very sensitive operation and great care needs | 508 | * This is a very sensitive operation and great care needs |
509 | * to be taken by the arch. The operation should carefully | 509 | * to be taken by the arch. The operation should carefully |
510 | * read the location, check to see if what is read is indeed | 510 | * read the location, check to see if what is read is indeed |
511 | * what we expect it to be, and then on success of the compare, | 511 | * what we expect it to be, and then on success of the compare, |
512 | * it should write to the location. | 512 | * it should write to the location. |
513 | * | 513 | * |
514 | * The code segment at @rec->ip should be a caller to @old_addr | 514 | * The code segment at @rec->ip should be a caller to @old_addr |
515 | * | 515 | * |
516 | * Return must be: | 516 | * Return must be: |
517 | * 0 on success | 517 | * 0 on success |
518 | * -EFAULT on error reading the location | 518 | * -EFAULT on error reading the location |
519 | * -EINVAL on a failed compare of the contents | 519 | * -EINVAL on a failed compare of the contents |
520 | * -EPERM on error writing to the location | 520 | * -EPERM on error writing to the location |
521 | * Any other value will be considered a failure. | 521 | * Any other value will be considered a failure. |
522 | */ | 522 | */ |
523 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | 523 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
524 | unsigned long addr); | 524 | unsigned long addr); |
525 | #else | 525 | #else |
526 | /* Should never be called */ | 526 | /* Should never be called */ |
527 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | 527 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
528 | unsigned long addr) | 528 | unsigned long addr) |
529 | { | 529 | { |
530 | return -EINVAL; | 530 | return -EINVAL; |
531 | } | 531 | } |
532 | #endif | 532 | #endif |
533 | 533 | ||
534 | /* May be defined in arch */ | 534 | /* May be defined in arch */ |
535 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 535 | extern int ftrace_arch_read_dyn_info(char *buf, int size); |
536 | 536 | ||
537 | extern int skip_trace(unsigned long ip); | 537 | extern int skip_trace(unsigned long ip); |
538 | extern void ftrace_module_init(struct module *mod); | ||
538 | 539 | ||
539 | extern void ftrace_disable_daemon(void); | 540 | extern void ftrace_disable_daemon(void); |
540 | extern void ftrace_enable_daemon(void); | 541 | extern void ftrace_enable_daemon(void); |
541 | #else /* CONFIG_DYNAMIC_FTRACE */ | 542 | #else /* CONFIG_DYNAMIC_FTRACE */ |
542 | static inline int skip_trace(unsigned long ip) { return 0; } | 543 | static inline int skip_trace(unsigned long ip) { return 0; } |
543 | static inline int ftrace_force_update(void) { return 0; } | 544 | static inline int ftrace_force_update(void) { return 0; } |
544 | static inline void ftrace_disable_daemon(void) { } | 545 | static inline void ftrace_disable_daemon(void) { } |
545 | static inline void ftrace_enable_daemon(void) { } | 546 | static inline void ftrace_enable_daemon(void) { } |
546 | static inline void ftrace_release_mod(struct module *mod) {} | 547 | static inline void ftrace_release_mod(struct module *mod) {} |
548 | static inline void ftrace_module_init(struct module *mod) {} | ||
547 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) | 549 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) |
548 | { | 550 | { |
549 | return -EINVAL; | 551 | return -EINVAL; |
550 | } | 552 | } |
551 | static inline __init int unregister_ftrace_command(char *cmd_name) | 553 | static inline __init int unregister_ftrace_command(char *cmd_name) |
552 | { | 554 | { |
553 | return -EINVAL; | 555 | return -EINVAL; |
554 | } | 556 | } |
555 | static inline int ftrace_text_reserved(const void *start, const void *end) | 557 | static inline int ftrace_text_reserved(const void *start, const void *end) |
556 | { | 558 | { |
557 | return 0; | 559 | return 0; |
558 | } | 560 | } |
559 | static inline unsigned long ftrace_location(unsigned long ip) | 561 | static inline unsigned long ftrace_location(unsigned long ip) |
560 | { | 562 | { |
561 | return 0; | 563 | return 0; |
562 | } | 564 | } |
563 | 565 | ||
564 | /* | 566 | /* |
565 | * Again users of functions that have ftrace_ops may not | 567 | * Again users of functions that have ftrace_ops may not |
566 | * have them defined when ftrace is not enabled, but these | 568 | * have them defined when ftrace is not enabled, but these |
567 | * functions may still be called. Use a macro instead of inline. | 569 | * functions may still be called. Use a macro instead of inline. |
568 | */ | 570 | */ |
569 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) | 571 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) |
570 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) | 572 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
571 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) | 573 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
572 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) | 574 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
573 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) | 575 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) |
574 | #define ftrace_free_filter(ops) do { } while (0) | 576 | #define ftrace_free_filter(ops) do { } while (0) |
575 | 577 | ||
576 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | 578 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
577 | size_t cnt, loff_t *ppos) { return -ENODEV; } | 579 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
578 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | 580 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
579 | size_t cnt, loff_t *ppos) { return -ENODEV; } | 581 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
580 | static inline int | 582 | static inline int |
581 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | 583 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
582 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 584 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
583 | 585 | ||
584 | /* totally disable ftrace - can not re-enable after this */ | 586 | /* totally disable ftrace - can not re-enable after this */ |
585 | void ftrace_kill(void); | 587 | void ftrace_kill(void); |
586 | 588 | ||
587 | static inline void tracer_disable(void) | 589 | static inline void tracer_disable(void) |
588 | { | 590 | { |
589 | #ifdef CONFIG_FUNCTION_TRACER | 591 | #ifdef CONFIG_FUNCTION_TRACER |
590 | ftrace_enabled = 0; | 592 | ftrace_enabled = 0; |
591 | #endif | 593 | #endif |
592 | } | 594 | } |
593 | 595 | ||
594 | /* | 596 | /* |
595 | * Ftrace disable/restore without lock. Some synchronization mechanism | 597 | * Ftrace disable/restore without lock. Some synchronization mechanism |
596 | * must be used to prevent ftrace_enabled to be changed between | 598 | * must be used to prevent ftrace_enabled to be changed between |
597 | * disable/restore. | 599 | * disable/restore. |
598 | */ | 600 | */ |
599 | static inline int __ftrace_enabled_save(void) | 601 | static inline int __ftrace_enabled_save(void) |
600 | { | 602 | { |
601 | #ifdef CONFIG_FUNCTION_TRACER | 603 | #ifdef CONFIG_FUNCTION_TRACER |
602 | int saved_ftrace_enabled = ftrace_enabled; | 604 | int saved_ftrace_enabled = ftrace_enabled; |
603 | ftrace_enabled = 0; | 605 | ftrace_enabled = 0; |
604 | return saved_ftrace_enabled; | 606 | return saved_ftrace_enabled; |
605 | #else | 607 | #else |
606 | return 0; | 608 | return 0; |
607 | #endif | 609 | #endif |
608 | } | 610 | } |
609 | 611 | ||
610 | static inline void __ftrace_enabled_restore(int enabled) | 612 | static inline void __ftrace_enabled_restore(int enabled) |
611 | { | 613 | { |
612 | #ifdef CONFIG_FUNCTION_TRACER | 614 | #ifdef CONFIG_FUNCTION_TRACER |
613 | ftrace_enabled = enabled; | 615 | ftrace_enabled = enabled; |
614 | #endif | 616 | #endif |
615 | } | 617 | } |
616 | 618 | ||
617 | #ifndef HAVE_ARCH_CALLER_ADDR | 619 | #ifndef HAVE_ARCH_CALLER_ADDR |
618 | # ifdef CONFIG_FRAME_POINTER | 620 | # ifdef CONFIG_FRAME_POINTER |
619 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 621 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
620 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) | 622 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) |
621 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) | 623 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) |
622 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) | 624 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) |
623 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) | 625 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) |
624 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) | 626 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) |
625 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) | 627 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) |
626 | # else | 628 | # else |
627 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 629 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
628 | # define CALLER_ADDR1 0UL | 630 | # define CALLER_ADDR1 0UL |
629 | # define CALLER_ADDR2 0UL | 631 | # define CALLER_ADDR2 0UL |
630 | # define CALLER_ADDR3 0UL | 632 | # define CALLER_ADDR3 0UL |
631 | # define CALLER_ADDR4 0UL | 633 | # define CALLER_ADDR4 0UL |
632 | # define CALLER_ADDR5 0UL | 634 | # define CALLER_ADDR5 0UL |
633 | # define CALLER_ADDR6 0UL | 635 | # define CALLER_ADDR6 0UL |
634 | # endif | 636 | # endif |
635 | #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ | 637 | #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ |
636 | 638 | ||
637 | #ifdef CONFIG_IRQSOFF_TRACER | 639 | #ifdef CONFIG_IRQSOFF_TRACER |
638 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); | 640 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
639 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | 641 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); |
640 | #else | 642 | #else |
641 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } | 643 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } |
642 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } | 644 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } |
643 | #endif | 645 | #endif |
644 | 646 | ||
645 | #ifdef CONFIG_PREEMPT_TRACER | 647 | #ifdef CONFIG_PREEMPT_TRACER |
646 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); | 648 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
647 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | 649 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); |
648 | #else | 650 | #else |
649 | /* | 651 | /* |
650 | * Use defines instead of static inlines because some arches will make code out | 652 | * Use defines instead of static inlines because some arches will make code out |
651 | * of the CALLER_ADDR, when we really want these to be a real nop. | 653 | * of the CALLER_ADDR, when we really want these to be a real nop. |
652 | */ | 654 | */ |
653 | # define trace_preempt_on(a0, a1) do { } while (0) | 655 | # define trace_preempt_on(a0, a1) do { } while (0) |
654 | # define trace_preempt_off(a0, a1) do { } while (0) | 656 | # define trace_preempt_off(a0, a1) do { } while (0) |
655 | #endif | 657 | #endif |
656 | 658 | ||
657 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 659 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
658 | extern void ftrace_init(void); | 660 | extern void ftrace_init(void); |
659 | #else | 661 | #else |
660 | static inline void ftrace_init(void) { } | 662 | static inline void ftrace_init(void) { } |
661 | #endif | 663 | #endif |
662 | 664 | ||
663 | /* | 665 | /* |
664 | * Structure that defines an entry function trace. | 666 | * Structure that defines an entry function trace. |
665 | */ | 667 | */ |
666 | struct ftrace_graph_ent { | 668 | struct ftrace_graph_ent { |
667 | unsigned long func; /* Current function */ | 669 | unsigned long func; /* Current function */ |
668 | int depth; | 670 | int depth; |
669 | }; | 671 | }; |
670 | 672 | ||
671 | /* | 673 | /* |
672 | * Structure that defines a return function trace. | 674 | * Structure that defines a return function trace. |
673 | */ | 675 | */ |
674 | struct ftrace_graph_ret { | 676 | struct ftrace_graph_ret { |
675 | unsigned long func; /* Current function */ | 677 | unsigned long func; /* Current function */ |
676 | unsigned long long calltime; | 678 | unsigned long long calltime; |
677 | unsigned long long rettime; | 679 | unsigned long long rettime; |
678 | /* Number of functions that overran the depth limit for current task */ | 680 | /* Number of functions that overran the depth limit for current task */ |
679 | unsigned long overrun; | 681 | unsigned long overrun; |
680 | int depth; | 682 | int depth; |
681 | }; | 683 | }; |
682 | 684 | ||
683 | /* Type of the callback handlers for tracing function graph*/ | 685 | /* Type of the callback handlers for tracing function graph*/ |
684 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | 686 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ |
685 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | 687 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ |
686 | 688 | ||
687 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 689 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
688 | 690 | ||
689 | /* for init task */ | 691 | /* for init task */ |
690 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, | 692 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, |
691 | 693 | ||
692 | /* | 694 | /* |
693 | * Stack of return addresses for functions | 695 | * Stack of return addresses for functions |
694 | * of a thread. | 696 | * of a thread. |
695 | * Used in struct thread_info | 697 | * Used in struct thread_info |
696 | */ | 698 | */ |
697 | struct ftrace_ret_stack { | 699 | struct ftrace_ret_stack { |
698 | unsigned long ret; | 700 | unsigned long ret; |
699 | unsigned long func; | 701 | unsigned long func; |
700 | unsigned long long calltime; | 702 | unsigned long long calltime; |
701 | unsigned long long subtime; | 703 | unsigned long long subtime; |
702 | unsigned long fp; | 704 | unsigned long fp; |
703 | }; | 705 | }; |
704 | 706 | ||
705 | /* | 707 | /* |
706 | * Primary handler of a function return. | 708 | * Primary handler of a function return. |
707 | * It relays on ftrace_return_to_handler. | 709 | * It relays on ftrace_return_to_handler. |
708 | * Defined in entry_32/64.S | 710 | * Defined in entry_32/64.S |
709 | */ | 711 | */ |
710 | extern void return_to_handler(void); | 712 | extern void return_to_handler(void); |
711 | 713 | ||
712 | extern int | 714 | extern int |
713 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | 715 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
714 | unsigned long frame_pointer); | 716 | unsigned long frame_pointer); |
715 | 717 | ||
716 | /* | 718 | /* |
717 | * Sometimes we don't want to trace a function with the function | 719 | * Sometimes we don't want to trace a function with the function |
718 | * graph tracer but we want them to keep traced by the usual function | 720 | * graph tracer but we want them to keep traced by the usual function |
719 | * tracer if the function graph tracer is not configured. | 721 | * tracer if the function graph tracer is not configured. |
720 | */ | 722 | */ |
721 | #define __notrace_funcgraph notrace | 723 | #define __notrace_funcgraph notrace |
722 | 724 | ||
723 | /* | 725 | /* |
724 | * We want to which function is an entrypoint of a hardirq. | 726 | * We want to which function is an entrypoint of a hardirq. |
725 | * That will help us to put a signal on output. | 727 | * That will help us to put a signal on output. |
726 | */ | 728 | */ |
727 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) | 729 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) |
728 | 730 | ||
729 | /* Limits of hardirq entrypoints */ | 731 | /* Limits of hardirq entrypoints */ |
730 | extern char __irqentry_text_start[]; | 732 | extern char __irqentry_text_start[]; |
731 | extern char __irqentry_text_end[]; | 733 | extern char __irqentry_text_end[]; |
732 | 734 | ||
733 | #define FTRACE_NOTRACE_DEPTH 65536 | 735 | #define FTRACE_NOTRACE_DEPTH 65536 |
734 | #define FTRACE_RETFUNC_DEPTH 50 | 736 | #define FTRACE_RETFUNC_DEPTH 50 |
735 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | 737 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 |
736 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 738 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
737 | trace_func_graph_ent_t entryfunc); | 739 | trace_func_graph_ent_t entryfunc); |
738 | 740 | ||
739 | extern void ftrace_graph_stop(void); | 741 | extern void ftrace_graph_stop(void); |
740 | 742 | ||
741 | /* The current handlers in use */ | 743 | /* The current handlers in use */ |
742 | extern trace_func_graph_ret_t ftrace_graph_return; | 744 | extern trace_func_graph_ret_t ftrace_graph_return; |
743 | extern trace_func_graph_ent_t ftrace_graph_entry; | 745 | extern trace_func_graph_ent_t ftrace_graph_entry; |
744 | 746 | ||
745 | extern void unregister_ftrace_graph(void); | 747 | extern void unregister_ftrace_graph(void); |
746 | 748 | ||
747 | extern void ftrace_graph_init_task(struct task_struct *t); | 749 | extern void ftrace_graph_init_task(struct task_struct *t); |
748 | extern void ftrace_graph_exit_task(struct task_struct *t); | 750 | extern void ftrace_graph_exit_task(struct task_struct *t); |
749 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); | 751 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
750 | 752 | ||
751 | static inline int task_curr_ret_stack(struct task_struct *t) | 753 | static inline int task_curr_ret_stack(struct task_struct *t) |
752 | { | 754 | { |
753 | return t->curr_ret_stack; | 755 | return t->curr_ret_stack; |
754 | } | 756 | } |
755 | 757 | ||
756 | static inline void pause_graph_tracing(void) | 758 | static inline void pause_graph_tracing(void) |
757 | { | 759 | { |
758 | atomic_inc(¤t->tracing_graph_pause); | 760 | atomic_inc(¤t->tracing_graph_pause); |
759 | } | 761 | } |
760 | 762 | ||
761 | static inline void unpause_graph_tracing(void) | 763 | static inline void unpause_graph_tracing(void) |
762 | { | 764 | { |
763 | atomic_dec(¤t->tracing_graph_pause); | 765 | atomic_dec(¤t->tracing_graph_pause); |
764 | } | 766 | } |
765 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ | 767 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
766 | 768 | ||
767 | #define __notrace_funcgraph | 769 | #define __notrace_funcgraph |
768 | #define __irq_entry | 770 | #define __irq_entry |
769 | #define INIT_FTRACE_GRAPH | 771 | #define INIT_FTRACE_GRAPH |
770 | 772 | ||
771 | static inline void ftrace_graph_init_task(struct task_struct *t) { } | 773 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
772 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | 774 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
773 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } | 775 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
774 | 776 | ||
775 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 777 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
776 | trace_func_graph_ent_t entryfunc) | 778 | trace_func_graph_ent_t entryfunc) |
777 | { | 779 | { |
778 | return -1; | 780 | return -1; |
779 | } | 781 | } |
780 | static inline void unregister_ftrace_graph(void) { } | 782 | static inline void unregister_ftrace_graph(void) { } |
781 | 783 | ||
782 | static inline int task_curr_ret_stack(struct task_struct *tsk) | 784 | static inline int task_curr_ret_stack(struct task_struct *tsk) |
783 | { | 785 | { |
784 | return -1; | 786 | return -1; |
785 | } | 787 | } |
786 | 788 | ||
787 | static inline void pause_graph_tracing(void) { } | 789 | static inline void pause_graph_tracing(void) { } |
788 | static inline void unpause_graph_tracing(void) { } | 790 | static inline void unpause_graph_tracing(void) { } |
789 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 791 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
790 | 792 | ||
791 | #ifdef CONFIG_TRACING | 793 | #ifdef CONFIG_TRACING |
792 | 794 | ||
793 | /* flags for current->trace */ | 795 | /* flags for current->trace */ |
794 | enum { | 796 | enum { |
795 | TSK_TRACE_FL_TRACE_BIT = 0, | 797 | TSK_TRACE_FL_TRACE_BIT = 0, |
796 | TSK_TRACE_FL_GRAPH_BIT = 1, | 798 | TSK_TRACE_FL_GRAPH_BIT = 1, |
797 | }; | 799 | }; |
798 | enum { | 800 | enum { |
799 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, | 801 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, |
800 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, | 802 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, |
801 | }; | 803 | }; |
802 | 804 | ||
803 | static inline void set_tsk_trace_trace(struct task_struct *tsk) | 805 | static inline void set_tsk_trace_trace(struct task_struct *tsk) |
804 | { | 806 | { |
805 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | 807 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); |
806 | } | 808 | } |
807 | 809 | ||
808 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) | 810 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) |
809 | { | 811 | { |
810 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | 812 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); |
811 | } | 813 | } |
812 | 814 | ||
813 | static inline int test_tsk_trace_trace(struct task_struct *tsk) | 815 | static inline int test_tsk_trace_trace(struct task_struct *tsk) |
814 | { | 816 | { |
815 | return tsk->trace & TSK_TRACE_FL_TRACE; | 817 | return tsk->trace & TSK_TRACE_FL_TRACE; |
816 | } | 818 | } |
817 | 819 | ||
818 | static inline void set_tsk_trace_graph(struct task_struct *tsk) | 820 | static inline void set_tsk_trace_graph(struct task_struct *tsk) |
819 | { | 821 | { |
820 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | 822 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); |
821 | } | 823 | } |
822 | 824 | ||
823 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) | 825 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) |
824 | { | 826 | { |
825 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | 827 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); |
826 | } | 828 | } |
827 | 829 | ||
828 | static inline int test_tsk_trace_graph(struct task_struct *tsk) | 830 | static inline int test_tsk_trace_graph(struct task_struct *tsk) |
829 | { | 831 | { |
830 | return tsk->trace & TSK_TRACE_FL_GRAPH; | 832 | return tsk->trace & TSK_TRACE_FL_GRAPH; |
831 | } | 833 | } |
832 | 834 | ||
833 | enum ftrace_dump_mode; | 835 | enum ftrace_dump_mode; |
834 | 836 | ||
835 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | 837 | extern enum ftrace_dump_mode ftrace_dump_on_oops; |
836 | 838 | ||
837 | extern void disable_trace_on_warning(void); | 839 | extern void disable_trace_on_warning(void); |
838 | extern int __disable_trace_on_warning; | 840 | extern int __disable_trace_on_warning; |
839 | 841 | ||
840 | #ifdef CONFIG_PREEMPT | 842 | #ifdef CONFIG_PREEMPT |
841 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | 843 | #define INIT_TRACE_RECURSION .trace_recursion = 0, |
842 | #endif | 844 | #endif |
843 | 845 | ||
844 | #else /* CONFIG_TRACING */ | 846 | #else /* CONFIG_TRACING */ |
845 | static inline void disable_trace_on_warning(void) { } | 847 | static inline void disable_trace_on_warning(void) { } |
846 | #endif /* CONFIG_TRACING */ | 848 | #endif /* CONFIG_TRACING */ |
847 | 849 | ||
848 | #ifndef INIT_TRACE_RECURSION | 850 | #ifndef INIT_TRACE_RECURSION |
849 | #define INIT_TRACE_RECURSION | 851 | #define INIT_TRACE_RECURSION |
850 | #endif | 852 | #endif |
851 | 853 | ||
852 | #ifdef CONFIG_FTRACE_SYSCALLS | 854 | #ifdef CONFIG_FTRACE_SYSCALLS |
853 | 855 | ||
854 | unsigned long arch_syscall_addr(int nr); | 856 | unsigned long arch_syscall_addr(int nr); |
855 | 857 | ||
856 | #endif /* CONFIG_FTRACE_SYSCALLS */ | 858 | #endif /* CONFIG_FTRACE_SYSCALLS */ |
857 | 859 | ||
858 | #endif /* _LINUX_FTRACE_H */ | 860 | #endif /* _LINUX_FTRACE_H */ |
859 | 861 |
kernel/module.c
1 | /* | 1 | /* |
2 | Copyright (C) 2002 Richard Henderson | 2 | Copyright (C) 2002 Richard Henderson |
3 | Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. | 3 | Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify | 5 | This program is free software; you can redistribute it and/or modify |
6 | it under the terms of the GNU General Public License as published by | 6 | it under the terms of the GNU General Public License as published by |
7 | the Free Software Foundation; either version 2 of the License, or | 7 | the Free Software Foundation; either version 2 of the License, or |
8 | (at your option) any later version. | 8 | (at your option) any later version. |
9 | 9 | ||
10 | This program is distributed in the hope that it will be useful, | 10 | This program is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | GNU General Public License for more details. | 13 | GNU General Public License for more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License | 15 | You should have received a copy of the GNU General Public License |
16 | along with this program; if not, write to the Free Software | 16 | along with this program; if not, write to the Free Software |
17 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
20 | #include <linux/moduleloader.h> | 20 | #include <linux/moduleloader.h> |
21 | #include <linux/ftrace_event.h> | 21 | #include <linux/ftrace_event.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/kallsyms.h> | 23 | #include <linux/kallsyms.h> |
24 | #include <linux/file.h> | 24 | #include <linux/file.h> |
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/sysfs.h> | 26 | #include <linux/sysfs.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
30 | #include <linux/elf.h> | 30 | #include <linux/elf.h> |
31 | #include <linux/proc_fs.h> | 31 | #include <linux/proc_fs.h> |
32 | #include <linux/security.h> | 32 | #include <linux/security.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
35 | #include <linux/fcntl.h> | 35 | #include <linux/fcntl.h> |
36 | #include <linux/rcupdate.h> | 36 | #include <linux/rcupdate.h> |
37 | #include <linux/capability.h> | 37 | #include <linux/capability.h> |
38 | #include <linux/cpu.h> | 38 | #include <linux/cpu.h> |
39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
40 | #include <linux/errno.h> | 40 | #include <linux/errno.h> |
41 | #include <linux/err.h> | 41 | #include <linux/err.h> |
42 | #include <linux/vermagic.h> | 42 | #include <linux/vermagic.h> |
43 | #include <linux/notifier.h> | 43 | #include <linux/notifier.h> |
44 | #include <linux/sched.h> | 44 | #include <linux/sched.h> |
45 | #include <linux/stop_machine.h> | 45 | #include <linux/stop_machine.h> |
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <linux/string.h> | 47 | #include <linux/string.h> |
48 | #include <linux/mutex.h> | 48 | #include <linux/mutex.h> |
49 | #include <linux/rculist.h> | 49 | #include <linux/rculist.h> |
50 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
51 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
52 | #include <asm/mmu_context.h> | 52 | #include <asm/mmu_context.h> |
53 | #include <linux/license.h> | 53 | #include <linux/license.h> |
54 | #include <asm/sections.h> | 54 | #include <asm/sections.h> |
55 | #include <linux/tracepoint.h> | 55 | #include <linux/tracepoint.h> |
56 | #include <linux/ftrace.h> | 56 | #include <linux/ftrace.h> |
57 | #include <linux/async.h> | 57 | #include <linux/async.h> |
58 | #include <linux/percpu.h> | 58 | #include <linux/percpu.h> |
59 | #include <linux/kmemleak.h> | 59 | #include <linux/kmemleak.h> |
60 | #include <linux/jump_label.h> | 60 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 61 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 62 | #include <linux/bsearch.h> |
63 | #include <linux/fips.h> | 63 | #include <linux/fips.h> |
64 | #include <uapi/linux/module.h> | 64 | #include <uapi/linux/module.h> |
65 | #include "module-internal.h" | 65 | #include "module-internal.h" |
66 | 66 | ||
67 | #define CREATE_TRACE_POINTS | 67 | #define CREATE_TRACE_POINTS |
68 | #include <trace/events/module.h> | 68 | #include <trace/events/module.h> |
69 | 69 | ||
70 | #ifndef ARCH_SHF_SMALL | 70 | #ifndef ARCH_SHF_SMALL |
71 | #define ARCH_SHF_SMALL 0 | 71 | #define ARCH_SHF_SMALL 0 |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Modules' sections will be aligned on page boundaries | 75 | * Modules' sections will be aligned on page boundaries |
76 | * to ensure complete separation of code and data, but | 76 | * to ensure complete separation of code and data, but |
77 | * only when CONFIG_DEBUG_SET_MODULE_RONX=y | 77 | * only when CONFIG_DEBUG_SET_MODULE_RONX=y |
78 | */ | 78 | */ |
79 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX | 79 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX |
80 | # define debug_align(X) ALIGN(X, PAGE_SIZE) | 80 | # define debug_align(X) ALIGN(X, PAGE_SIZE) |
81 | #else | 81 | #else |
82 | # define debug_align(X) (X) | 82 | # define debug_align(X) (X) |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Given BASE and SIZE this macro calculates the number of pages the | 86 | * Given BASE and SIZE this macro calculates the number of pages the |
87 | * memory regions occupies | 87 | * memory regions occupies |
88 | */ | 88 | */ |
89 | #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ | 89 | #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ |
90 | (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ | 90 | (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ |
91 | PFN_DOWN((unsigned long)BASE) + 1) \ | 91 | PFN_DOWN((unsigned long)BASE) + 1) \ |
92 | : (0UL)) | 92 | : (0UL)) |
93 | 93 | ||
94 | /* If this is set, the section belongs in the init part of the module */ | 94 | /* If this is set, the section belongs in the init part of the module */ |
95 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | 95 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * Mutex protects: | 98 | * Mutex protects: |
99 | * 1) List of modules (also safely readable with preempt_disable), | 99 | * 1) List of modules (also safely readable with preempt_disable), |
100 | * 2) module_use links, | 100 | * 2) module_use links, |
101 | * 3) module_addr_min/module_addr_max. | 101 | * 3) module_addr_min/module_addr_max. |
102 | * (delete uses stop_machine/add uses RCU list operations). */ | 102 | * (delete uses stop_machine/add uses RCU list operations). */ |
103 | DEFINE_MUTEX(module_mutex); | 103 | DEFINE_MUTEX(module_mutex); |
104 | EXPORT_SYMBOL_GPL(module_mutex); | 104 | EXPORT_SYMBOL_GPL(module_mutex); |
105 | static LIST_HEAD(modules); | 105 | static LIST_HEAD(modules); |
106 | #ifdef CONFIG_KGDB_KDB | 106 | #ifdef CONFIG_KGDB_KDB |
107 | struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ | 107 | struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ |
108 | #endif /* CONFIG_KGDB_KDB */ | 108 | #endif /* CONFIG_KGDB_KDB */ |
109 | 109 | ||
110 | #ifdef CONFIG_MODULE_SIG | 110 | #ifdef CONFIG_MODULE_SIG |
111 | #ifdef CONFIG_MODULE_SIG_FORCE | 111 | #ifdef CONFIG_MODULE_SIG_FORCE |
112 | static bool sig_enforce = true; | 112 | static bool sig_enforce = true; |
113 | #else | 113 | #else |
114 | static bool sig_enforce = false; | 114 | static bool sig_enforce = false; |
115 | 115 | ||
116 | static int param_set_bool_enable_only(const char *val, | 116 | static int param_set_bool_enable_only(const char *val, |
117 | const struct kernel_param *kp) | 117 | const struct kernel_param *kp) |
118 | { | 118 | { |
119 | int err; | 119 | int err; |
120 | bool test; | 120 | bool test; |
121 | struct kernel_param dummy_kp = *kp; | 121 | struct kernel_param dummy_kp = *kp; |
122 | 122 | ||
123 | dummy_kp.arg = &test; | 123 | dummy_kp.arg = &test; |
124 | 124 | ||
125 | err = param_set_bool(val, &dummy_kp); | 125 | err = param_set_bool(val, &dummy_kp); |
126 | if (err) | 126 | if (err) |
127 | return err; | 127 | return err; |
128 | 128 | ||
129 | /* Don't let them unset it once it's set! */ | 129 | /* Don't let them unset it once it's set! */ |
130 | if (!test && sig_enforce) | 130 | if (!test && sig_enforce) |
131 | return -EROFS; | 131 | return -EROFS; |
132 | 132 | ||
133 | if (test) | 133 | if (test) |
134 | sig_enforce = true; | 134 | sig_enforce = true; |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | static const struct kernel_param_ops param_ops_bool_enable_only = { | 138 | static const struct kernel_param_ops param_ops_bool_enable_only = { |
139 | .flags = KERNEL_PARAM_FL_NOARG, | 139 | .flags = KERNEL_PARAM_FL_NOARG, |
140 | .set = param_set_bool_enable_only, | 140 | .set = param_set_bool_enable_only, |
141 | .get = param_get_bool, | 141 | .get = param_get_bool, |
142 | }; | 142 | }; |
143 | #define param_check_bool_enable_only param_check_bool | 143 | #define param_check_bool_enable_only param_check_bool |
144 | 144 | ||
145 | module_param(sig_enforce, bool_enable_only, 0644); | 145 | module_param(sig_enforce, bool_enable_only, 0644); |
146 | #endif /* !CONFIG_MODULE_SIG_FORCE */ | 146 | #endif /* !CONFIG_MODULE_SIG_FORCE */ |
147 | #endif /* CONFIG_MODULE_SIG */ | 147 | #endif /* CONFIG_MODULE_SIG */ |
148 | 148 | ||
149 | /* Block module loading/unloading? */ | 149 | /* Block module loading/unloading? */ |
150 | int modules_disabled = 0; | 150 | int modules_disabled = 0; |
151 | core_param(nomodule, modules_disabled, bint, 0); | 151 | core_param(nomodule, modules_disabled, bint, 0); |
152 | 152 | ||
153 | /* Waiting for a module to finish initializing? */ | 153 | /* Waiting for a module to finish initializing? */ |
154 | static DECLARE_WAIT_QUEUE_HEAD(module_wq); | 154 | static DECLARE_WAIT_QUEUE_HEAD(module_wq); |
155 | 155 | ||
156 | static BLOCKING_NOTIFIER_HEAD(module_notify_list); | 156 | static BLOCKING_NOTIFIER_HEAD(module_notify_list); |
157 | 157 | ||
158 | /* Bounds of module allocation, for speeding __module_address. | 158 | /* Bounds of module allocation, for speeding __module_address. |
159 | * Protected by module_mutex. */ | 159 | * Protected by module_mutex. */ |
160 | static unsigned long module_addr_min = -1UL, module_addr_max = 0; | 160 | static unsigned long module_addr_min = -1UL, module_addr_max = 0; |
161 | 161 | ||
162 | int register_module_notifier(struct notifier_block * nb) | 162 | int register_module_notifier(struct notifier_block * nb) |
163 | { | 163 | { |
164 | return blocking_notifier_chain_register(&module_notify_list, nb); | 164 | return blocking_notifier_chain_register(&module_notify_list, nb); |
165 | } | 165 | } |
166 | EXPORT_SYMBOL(register_module_notifier); | 166 | EXPORT_SYMBOL(register_module_notifier); |
167 | 167 | ||
168 | int unregister_module_notifier(struct notifier_block * nb) | 168 | int unregister_module_notifier(struct notifier_block * nb) |
169 | { | 169 | { |
170 | return blocking_notifier_chain_unregister(&module_notify_list, nb); | 170 | return blocking_notifier_chain_unregister(&module_notify_list, nb); |
171 | } | 171 | } |
172 | EXPORT_SYMBOL(unregister_module_notifier); | 172 | EXPORT_SYMBOL(unregister_module_notifier); |
173 | 173 | ||
174 | struct load_info { | 174 | struct load_info { |
175 | Elf_Ehdr *hdr; | 175 | Elf_Ehdr *hdr; |
176 | unsigned long len; | 176 | unsigned long len; |
177 | Elf_Shdr *sechdrs; | 177 | Elf_Shdr *sechdrs; |
178 | char *secstrings, *strtab; | 178 | char *secstrings, *strtab; |
179 | unsigned long symoffs, stroffs; | 179 | unsigned long symoffs, stroffs; |
180 | struct _ddebug *debug; | 180 | struct _ddebug *debug; |
181 | unsigned int num_debug; | 181 | unsigned int num_debug; |
182 | bool sig_ok; | 182 | bool sig_ok; |
183 | struct { | 183 | struct { |
184 | unsigned int sym, str, mod, vers, info, pcpu; | 184 | unsigned int sym, str, mod, vers, info, pcpu; |
185 | } index; | 185 | } index; |
186 | }; | 186 | }; |
187 | 187 | ||
188 | /* We require a truly strong try_module_get(): 0 means failure due to | 188 | /* We require a truly strong try_module_get(): 0 means failure due to |
189 | ongoing or failed initialization etc. */ | 189 | ongoing or failed initialization etc. */ |
190 | static inline int strong_try_module_get(struct module *mod) | 190 | static inline int strong_try_module_get(struct module *mod) |
191 | { | 191 | { |
192 | BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); | 192 | BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); |
193 | if (mod && mod->state == MODULE_STATE_COMING) | 193 | if (mod && mod->state == MODULE_STATE_COMING) |
194 | return -EBUSY; | 194 | return -EBUSY; |
195 | if (try_module_get(mod)) | 195 | if (try_module_get(mod)) |
196 | return 0; | 196 | return 0; |
197 | else | 197 | else |
198 | return -ENOENT; | 198 | return -ENOENT; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline void add_taint_module(struct module *mod, unsigned flag, | 201 | static inline void add_taint_module(struct module *mod, unsigned flag, |
202 | enum lockdep_ok lockdep_ok) | 202 | enum lockdep_ok lockdep_ok) |
203 | { | 203 | { |
204 | add_taint(flag, lockdep_ok); | 204 | add_taint(flag, lockdep_ok); |
205 | mod->taints |= (1U << flag); | 205 | mod->taints |= (1U << flag); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * A thread that wants to hold a reference to a module only while it | 209 | * A thread that wants to hold a reference to a module only while it |
210 | * is running can call this to safely exit. nfsd and lockd use this. | 210 | * is running can call this to safely exit. nfsd and lockd use this. |
211 | */ | 211 | */ |
212 | void __module_put_and_exit(struct module *mod, long code) | 212 | void __module_put_and_exit(struct module *mod, long code) |
213 | { | 213 | { |
214 | module_put(mod); | 214 | module_put(mod); |
215 | do_exit(code); | 215 | do_exit(code); |
216 | } | 216 | } |
217 | EXPORT_SYMBOL(__module_put_and_exit); | 217 | EXPORT_SYMBOL(__module_put_and_exit); |
218 | 218 | ||
219 | /* Find a module section: 0 means not found. */ | 219 | /* Find a module section: 0 means not found. */ |
220 | static unsigned int find_sec(const struct load_info *info, const char *name) | 220 | static unsigned int find_sec(const struct load_info *info, const char *name) |
221 | { | 221 | { |
222 | unsigned int i; | 222 | unsigned int i; |
223 | 223 | ||
224 | for (i = 1; i < info->hdr->e_shnum; i++) { | 224 | for (i = 1; i < info->hdr->e_shnum; i++) { |
225 | Elf_Shdr *shdr = &info->sechdrs[i]; | 225 | Elf_Shdr *shdr = &info->sechdrs[i]; |
226 | /* Alloc bit cleared means "ignore it." */ | 226 | /* Alloc bit cleared means "ignore it." */ |
227 | if ((shdr->sh_flags & SHF_ALLOC) | 227 | if ((shdr->sh_flags & SHF_ALLOC) |
228 | && strcmp(info->secstrings + shdr->sh_name, name) == 0) | 228 | && strcmp(info->secstrings + shdr->sh_name, name) == 0) |
229 | return i; | 229 | return i; |
230 | } | 230 | } |
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* Find a module section, or NULL. */ | 234 | /* Find a module section, or NULL. */ |
235 | static void *section_addr(const struct load_info *info, const char *name) | 235 | static void *section_addr(const struct load_info *info, const char *name) |
236 | { | 236 | { |
237 | /* Section 0 has sh_addr 0. */ | 237 | /* Section 0 has sh_addr 0. */ |
238 | return (void *)info->sechdrs[find_sec(info, name)].sh_addr; | 238 | return (void *)info->sechdrs[find_sec(info, name)].sh_addr; |
239 | } | 239 | } |
240 | 240 | ||
241 | /* Find a module section, or NULL. Fill in number of "objects" in section. */ | 241 | /* Find a module section, or NULL. Fill in number of "objects" in section. */ |
242 | static void *section_objs(const struct load_info *info, | 242 | static void *section_objs(const struct load_info *info, |
243 | const char *name, | 243 | const char *name, |
244 | size_t object_size, | 244 | size_t object_size, |
245 | unsigned int *num) | 245 | unsigned int *num) |
246 | { | 246 | { |
247 | unsigned int sec = find_sec(info, name); | 247 | unsigned int sec = find_sec(info, name); |
248 | 248 | ||
249 | /* Section 0 has sh_addr 0 and sh_size 0. */ | 249 | /* Section 0 has sh_addr 0 and sh_size 0. */ |
250 | *num = info->sechdrs[sec].sh_size / object_size; | 250 | *num = info->sechdrs[sec].sh_size / object_size; |
251 | return (void *)info->sechdrs[sec].sh_addr; | 251 | return (void *)info->sechdrs[sec].sh_addr; |
252 | } | 252 | } |
253 | 253 | ||
254 | /* Provided by the linker */ | 254 | /* Provided by the linker */ |
255 | extern const struct kernel_symbol __start___ksymtab[]; | 255 | extern const struct kernel_symbol __start___ksymtab[]; |
256 | extern const struct kernel_symbol __stop___ksymtab[]; | 256 | extern const struct kernel_symbol __stop___ksymtab[]; |
257 | extern const struct kernel_symbol __start___ksymtab_gpl[]; | 257 | extern const struct kernel_symbol __start___ksymtab_gpl[]; |
258 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; | 258 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; |
259 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; | 259 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; |
260 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; | 260 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; |
261 | extern const unsigned long __start___kcrctab[]; | 261 | extern const unsigned long __start___kcrctab[]; |
262 | extern const unsigned long __start___kcrctab_gpl[]; | 262 | extern const unsigned long __start___kcrctab_gpl[]; |
263 | extern const unsigned long __start___kcrctab_gpl_future[]; | 263 | extern const unsigned long __start___kcrctab_gpl_future[]; |
264 | #ifdef CONFIG_UNUSED_SYMBOLS | 264 | #ifdef CONFIG_UNUSED_SYMBOLS |
265 | extern const struct kernel_symbol __start___ksymtab_unused[]; | 265 | extern const struct kernel_symbol __start___ksymtab_unused[]; |
266 | extern const struct kernel_symbol __stop___ksymtab_unused[]; | 266 | extern const struct kernel_symbol __stop___ksymtab_unused[]; |
267 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; | 267 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; |
268 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; | 268 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; |
269 | extern const unsigned long __start___kcrctab_unused[]; | 269 | extern const unsigned long __start___kcrctab_unused[]; |
270 | extern const unsigned long __start___kcrctab_unused_gpl[]; | 270 | extern const unsigned long __start___kcrctab_unused_gpl[]; |
271 | #endif | 271 | #endif |
272 | 272 | ||
273 | #ifndef CONFIG_MODVERSIONS | 273 | #ifndef CONFIG_MODVERSIONS |
274 | #define symversion(base, idx) NULL | 274 | #define symversion(base, idx) NULL |
275 | #else | 275 | #else |
276 | #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) | 276 | #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) |
277 | #endif | 277 | #endif |
278 | 278 | ||
279 | static bool each_symbol_in_section(const struct symsearch *arr, | 279 | static bool each_symbol_in_section(const struct symsearch *arr, |
280 | unsigned int arrsize, | 280 | unsigned int arrsize, |
281 | struct module *owner, | 281 | struct module *owner, |
282 | bool (*fn)(const struct symsearch *syms, | 282 | bool (*fn)(const struct symsearch *syms, |
283 | struct module *owner, | 283 | struct module *owner, |
284 | void *data), | 284 | void *data), |
285 | void *data) | 285 | void *data) |
286 | { | 286 | { |
287 | unsigned int j; | 287 | unsigned int j; |
288 | 288 | ||
289 | for (j = 0; j < arrsize; j++) { | 289 | for (j = 0; j < arrsize; j++) { |
290 | if (fn(&arr[j], owner, data)) | 290 | if (fn(&arr[j], owner, data)) |
291 | return true; | 291 | return true; |
292 | } | 292 | } |
293 | 293 | ||
294 | return false; | 294 | return false; |
295 | } | 295 | } |
296 | 296 | ||
297 | /* Returns true as soon as fn returns true, otherwise false. */ | 297 | /* Returns true as soon as fn returns true, otherwise false. */ |
298 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | 298 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
299 | struct module *owner, | 299 | struct module *owner, |
300 | void *data), | 300 | void *data), |
301 | void *data) | 301 | void *data) |
302 | { | 302 | { |
303 | struct module *mod; | 303 | struct module *mod; |
304 | static const struct symsearch arr[] = { | 304 | static const struct symsearch arr[] = { |
305 | { __start___ksymtab, __stop___ksymtab, __start___kcrctab, | 305 | { __start___ksymtab, __stop___ksymtab, __start___kcrctab, |
306 | NOT_GPL_ONLY, false }, | 306 | NOT_GPL_ONLY, false }, |
307 | { __start___ksymtab_gpl, __stop___ksymtab_gpl, | 307 | { __start___ksymtab_gpl, __stop___ksymtab_gpl, |
308 | __start___kcrctab_gpl, | 308 | __start___kcrctab_gpl, |
309 | GPL_ONLY, false }, | 309 | GPL_ONLY, false }, |
310 | { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, | 310 | { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, |
311 | __start___kcrctab_gpl_future, | 311 | __start___kcrctab_gpl_future, |
312 | WILL_BE_GPL_ONLY, false }, | 312 | WILL_BE_GPL_ONLY, false }, |
313 | #ifdef CONFIG_UNUSED_SYMBOLS | 313 | #ifdef CONFIG_UNUSED_SYMBOLS |
314 | { __start___ksymtab_unused, __stop___ksymtab_unused, | 314 | { __start___ksymtab_unused, __stop___ksymtab_unused, |
315 | __start___kcrctab_unused, | 315 | __start___kcrctab_unused, |
316 | NOT_GPL_ONLY, true }, | 316 | NOT_GPL_ONLY, true }, |
317 | { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, | 317 | { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, |
318 | __start___kcrctab_unused_gpl, | 318 | __start___kcrctab_unused_gpl, |
319 | GPL_ONLY, true }, | 319 | GPL_ONLY, true }, |
320 | #endif | 320 | #endif |
321 | }; | 321 | }; |
322 | 322 | ||
323 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) | 323 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) |
324 | return true; | 324 | return true; |
325 | 325 | ||
326 | list_for_each_entry_rcu(mod, &modules, list) { | 326 | list_for_each_entry_rcu(mod, &modules, list) { |
327 | struct symsearch arr[] = { | 327 | struct symsearch arr[] = { |
328 | { mod->syms, mod->syms + mod->num_syms, mod->crcs, | 328 | { mod->syms, mod->syms + mod->num_syms, mod->crcs, |
329 | NOT_GPL_ONLY, false }, | 329 | NOT_GPL_ONLY, false }, |
330 | { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, | 330 | { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, |
331 | mod->gpl_crcs, | 331 | mod->gpl_crcs, |
332 | GPL_ONLY, false }, | 332 | GPL_ONLY, false }, |
333 | { mod->gpl_future_syms, | 333 | { mod->gpl_future_syms, |
334 | mod->gpl_future_syms + mod->num_gpl_future_syms, | 334 | mod->gpl_future_syms + mod->num_gpl_future_syms, |
335 | mod->gpl_future_crcs, | 335 | mod->gpl_future_crcs, |
336 | WILL_BE_GPL_ONLY, false }, | 336 | WILL_BE_GPL_ONLY, false }, |
337 | #ifdef CONFIG_UNUSED_SYMBOLS | 337 | #ifdef CONFIG_UNUSED_SYMBOLS |
338 | { mod->unused_syms, | 338 | { mod->unused_syms, |
339 | mod->unused_syms + mod->num_unused_syms, | 339 | mod->unused_syms + mod->num_unused_syms, |
340 | mod->unused_crcs, | 340 | mod->unused_crcs, |
341 | NOT_GPL_ONLY, true }, | 341 | NOT_GPL_ONLY, true }, |
342 | { mod->unused_gpl_syms, | 342 | { mod->unused_gpl_syms, |
343 | mod->unused_gpl_syms + mod->num_unused_gpl_syms, | 343 | mod->unused_gpl_syms + mod->num_unused_gpl_syms, |
344 | mod->unused_gpl_crcs, | 344 | mod->unused_gpl_crcs, |
345 | GPL_ONLY, true }, | 345 | GPL_ONLY, true }, |
346 | #endif | 346 | #endif |
347 | }; | 347 | }; |
348 | 348 | ||
349 | if (mod->state == MODULE_STATE_UNFORMED) | 349 | if (mod->state == MODULE_STATE_UNFORMED) |
350 | continue; | 350 | continue; |
351 | 351 | ||
352 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) | 352 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) |
353 | return true; | 353 | return true; |
354 | } | 354 | } |
355 | return false; | 355 | return false; |
356 | } | 356 | } |
357 | EXPORT_SYMBOL_GPL(each_symbol_section); | 357 | EXPORT_SYMBOL_GPL(each_symbol_section); |
358 | 358 | ||
359 | struct find_symbol_arg { | 359 | struct find_symbol_arg { |
360 | /* Input */ | 360 | /* Input */ |
361 | const char *name; | 361 | const char *name; |
362 | bool gplok; | 362 | bool gplok; |
363 | bool warn; | 363 | bool warn; |
364 | 364 | ||
365 | /* Output */ | 365 | /* Output */ |
366 | struct module *owner; | 366 | struct module *owner; |
367 | const unsigned long *crc; | 367 | const unsigned long *crc; |
368 | const struct kernel_symbol *sym; | 368 | const struct kernel_symbol *sym; |
369 | }; | 369 | }; |
370 | 370 | ||
371 | static bool check_symbol(const struct symsearch *syms, | 371 | static bool check_symbol(const struct symsearch *syms, |
372 | struct module *owner, | 372 | struct module *owner, |
373 | unsigned int symnum, void *data) | 373 | unsigned int symnum, void *data) |
374 | { | 374 | { |
375 | struct find_symbol_arg *fsa = data; | 375 | struct find_symbol_arg *fsa = data; |
376 | 376 | ||
377 | if (!fsa->gplok) { | 377 | if (!fsa->gplok) { |
378 | if (syms->licence == GPL_ONLY) | 378 | if (syms->licence == GPL_ONLY) |
379 | return false; | 379 | return false; |
380 | if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { | 380 | if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { |
381 | pr_warn("Symbol %s is being used by a non-GPL module, " | 381 | pr_warn("Symbol %s is being used by a non-GPL module, " |
382 | "which will not be allowed in the future\n", | 382 | "which will not be allowed in the future\n", |
383 | fsa->name); | 383 | fsa->name); |
384 | } | 384 | } |
385 | } | 385 | } |
386 | 386 | ||
387 | #ifdef CONFIG_UNUSED_SYMBOLS | 387 | #ifdef CONFIG_UNUSED_SYMBOLS |
388 | if (syms->unused && fsa->warn) { | 388 | if (syms->unused && fsa->warn) { |
389 | pr_warn("Symbol %s is marked as UNUSED, however this module is " | 389 | pr_warn("Symbol %s is marked as UNUSED, however this module is " |
390 | "using it.\n", fsa->name); | 390 | "using it.\n", fsa->name); |
391 | pr_warn("This symbol will go away in the future.\n"); | 391 | pr_warn("This symbol will go away in the future.\n"); |
392 | pr_warn("Please evalute if this is the right api to use and if " | 392 | pr_warn("Please evalute if this is the right api to use and if " |
393 | "it really is, submit a report the linux kernel " | 393 | "it really is, submit a report the linux kernel " |
394 | "mailinglist together with submitting your code for " | 394 | "mailinglist together with submitting your code for " |
395 | "inclusion.\n"); | 395 | "inclusion.\n"); |
396 | } | 396 | } |
397 | #endif | 397 | #endif |
398 | 398 | ||
399 | fsa->owner = owner; | 399 | fsa->owner = owner; |
400 | fsa->crc = symversion(syms->crcs, symnum); | 400 | fsa->crc = symversion(syms->crcs, symnum); |
401 | fsa->sym = &syms->start[symnum]; | 401 | fsa->sym = &syms->start[symnum]; |
402 | return true; | 402 | return true; |
403 | } | 403 | } |
404 | 404 | ||
405 | static int cmp_name(const void *va, const void *vb) | 405 | static int cmp_name(const void *va, const void *vb) |
406 | { | 406 | { |
407 | const char *a; | 407 | const char *a; |
408 | const struct kernel_symbol *b; | 408 | const struct kernel_symbol *b; |
409 | a = va; b = vb; | 409 | a = va; b = vb; |
410 | return strcmp(a, b->name); | 410 | return strcmp(a, b->name); |
411 | } | 411 | } |
412 | 412 | ||
413 | static bool find_symbol_in_section(const struct symsearch *syms, | 413 | static bool find_symbol_in_section(const struct symsearch *syms, |
414 | struct module *owner, | 414 | struct module *owner, |
415 | void *data) | 415 | void *data) |
416 | { | 416 | { |
417 | struct find_symbol_arg *fsa = data; | 417 | struct find_symbol_arg *fsa = data; |
418 | struct kernel_symbol *sym; | 418 | struct kernel_symbol *sym; |
419 | 419 | ||
420 | sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, | 420 | sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, |
421 | sizeof(struct kernel_symbol), cmp_name); | 421 | sizeof(struct kernel_symbol), cmp_name); |
422 | 422 | ||
423 | if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) | 423 | if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) |
424 | return true; | 424 | return true; |
425 | 425 | ||
426 | return false; | 426 | return false; |
427 | } | 427 | } |
428 | 428 | ||
429 | /* Find a symbol and return it, along with, (optional) crc and | 429 | /* Find a symbol and return it, along with, (optional) crc and |
430 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ | 430 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ |
431 | const struct kernel_symbol *find_symbol(const char *name, | 431 | const struct kernel_symbol *find_symbol(const char *name, |
432 | struct module **owner, | 432 | struct module **owner, |
433 | const unsigned long **crc, | 433 | const unsigned long **crc, |
434 | bool gplok, | 434 | bool gplok, |
435 | bool warn) | 435 | bool warn) |
436 | { | 436 | { |
437 | struct find_symbol_arg fsa; | 437 | struct find_symbol_arg fsa; |
438 | 438 | ||
439 | fsa.name = name; | 439 | fsa.name = name; |
440 | fsa.gplok = gplok; | 440 | fsa.gplok = gplok; |
441 | fsa.warn = warn; | 441 | fsa.warn = warn; |
442 | 442 | ||
443 | if (each_symbol_section(find_symbol_in_section, &fsa)) { | 443 | if (each_symbol_section(find_symbol_in_section, &fsa)) { |
444 | if (owner) | 444 | if (owner) |
445 | *owner = fsa.owner; | 445 | *owner = fsa.owner; |
446 | if (crc) | 446 | if (crc) |
447 | *crc = fsa.crc; | 447 | *crc = fsa.crc; |
448 | return fsa.sym; | 448 | return fsa.sym; |
449 | } | 449 | } |
450 | 450 | ||
451 | pr_debug("Failed to find symbol %s\n", name); | 451 | pr_debug("Failed to find symbol %s\n", name); |
452 | return NULL; | 452 | return NULL; |
453 | } | 453 | } |
454 | EXPORT_SYMBOL_GPL(find_symbol); | 454 | EXPORT_SYMBOL_GPL(find_symbol); |
455 | 455 | ||
456 | /* Search for module by name: must hold module_mutex. */ | 456 | /* Search for module by name: must hold module_mutex. */ |
457 | static struct module *find_module_all(const char *name, size_t len, | 457 | static struct module *find_module_all(const char *name, size_t len, |
458 | bool even_unformed) | 458 | bool even_unformed) |
459 | { | 459 | { |
460 | struct module *mod; | 460 | struct module *mod; |
461 | 461 | ||
462 | list_for_each_entry(mod, &modules, list) { | 462 | list_for_each_entry(mod, &modules, list) { |
463 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) | 463 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) |
464 | continue; | 464 | continue; |
465 | if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) | 465 | if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) |
466 | return mod; | 466 | return mod; |
467 | } | 467 | } |
468 | return NULL; | 468 | return NULL; |
469 | } | 469 | } |
470 | 470 | ||
471 | struct module *find_module(const char *name) | 471 | struct module *find_module(const char *name) |
472 | { | 472 | { |
473 | return find_module_all(name, strlen(name), false); | 473 | return find_module_all(name, strlen(name), false); |
474 | } | 474 | } |
475 | EXPORT_SYMBOL_GPL(find_module); | 475 | EXPORT_SYMBOL_GPL(find_module); |
476 | 476 | ||
477 | #ifdef CONFIG_SMP | 477 | #ifdef CONFIG_SMP |
478 | 478 | ||
479 | static inline void __percpu *mod_percpu(struct module *mod) | 479 | static inline void __percpu *mod_percpu(struct module *mod) |
480 | { | 480 | { |
481 | return mod->percpu; | 481 | return mod->percpu; |
482 | } | 482 | } |
483 | 483 | ||
484 | static int percpu_modalloc(struct module *mod, struct load_info *info) | 484 | static int percpu_modalloc(struct module *mod, struct load_info *info) |
485 | { | 485 | { |
486 | Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; | 486 | Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; |
487 | unsigned long align = pcpusec->sh_addralign; | 487 | unsigned long align = pcpusec->sh_addralign; |
488 | 488 | ||
489 | if (!pcpusec->sh_size) | 489 | if (!pcpusec->sh_size) |
490 | return 0; | 490 | return 0; |
491 | 491 | ||
492 | if (align > PAGE_SIZE) { | 492 | if (align > PAGE_SIZE) { |
493 | pr_warn("%s: per-cpu alignment %li > %li\n", | 493 | pr_warn("%s: per-cpu alignment %li > %li\n", |
494 | mod->name, align, PAGE_SIZE); | 494 | mod->name, align, PAGE_SIZE); |
495 | align = PAGE_SIZE; | 495 | align = PAGE_SIZE; |
496 | } | 496 | } |
497 | 497 | ||
498 | mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); | 498 | mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); |
499 | if (!mod->percpu) { | 499 | if (!mod->percpu) { |
500 | pr_warn("%s: Could not allocate %lu bytes percpu data\n", | 500 | pr_warn("%s: Could not allocate %lu bytes percpu data\n", |
501 | mod->name, (unsigned long)pcpusec->sh_size); | 501 | mod->name, (unsigned long)pcpusec->sh_size); |
502 | return -ENOMEM; | 502 | return -ENOMEM; |
503 | } | 503 | } |
504 | mod->percpu_size = pcpusec->sh_size; | 504 | mod->percpu_size = pcpusec->sh_size; |
505 | return 0; | 505 | return 0; |
506 | } | 506 | } |
507 | 507 | ||
508 | static void percpu_modfree(struct module *mod) | 508 | static void percpu_modfree(struct module *mod) |
509 | { | 509 | { |
510 | free_percpu(mod->percpu); | 510 | free_percpu(mod->percpu); |
511 | } | 511 | } |
512 | 512 | ||
513 | static unsigned int find_pcpusec(struct load_info *info) | 513 | static unsigned int find_pcpusec(struct load_info *info) |
514 | { | 514 | { |
515 | return find_sec(info, ".data..percpu"); | 515 | return find_sec(info, ".data..percpu"); |
516 | } | 516 | } |
517 | 517 | ||
518 | static void percpu_modcopy(struct module *mod, | 518 | static void percpu_modcopy(struct module *mod, |
519 | const void *from, unsigned long size) | 519 | const void *from, unsigned long size) |
520 | { | 520 | { |
521 | int cpu; | 521 | int cpu; |
522 | 522 | ||
523 | for_each_possible_cpu(cpu) | 523 | for_each_possible_cpu(cpu) |
524 | memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); | 524 | memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); |
525 | } | 525 | } |
526 | 526 | ||
527 | /** | 527 | /** |
528 | * is_module_percpu_address - test whether address is from module static percpu | 528 | * is_module_percpu_address - test whether address is from module static percpu |
529 | * @addr: address to test | 529 | * @addr: address to test |
530 | * | 530 | * |
531 | * Test whether @addr belongs to module static percpu area. | 531 | * Test whether @addr belongs to module static percpu area. |
532 | * | 532 | * |
533 | * RETURNS: | 533 | * RETURNS: |
534 | * %true if @addr is from module static percpu area | 534 | * %true if @addr is from module static percpu area |
535 | */ | 535 | */ |
536 | bool is_module_percpu_address(unsigned long addr) | 536 | bool is_module_percpu_address(unsigned long addr) |
537 | { | 537 | { |
538 | struct module *mod; | 538 | struct module *mod; |
539 | unsigned int cpu; | 539 | unsigned int cpu; |
540 | 540 | ||
541 | preempt_disable(); | 541 | preempt_disable(); |
542 | 542 | ||
543 | list_for_each_entry_rcu(mod, &modules, list) { | 543 | list_for_each_entry_rcu(mod, &modules, list) { |
544 | if (mod->state == MODULE_STATE_UNFORMED) | 544 | if (mod->state == MODULE_STATE_UNFORMED) |
545 | continue; | 545 | continue; |
546 | if (!mod->percpu_size) | 546 | if (!mod->percpu_size) |
547 | continue; | 547 | continue; |
548 | for_each_possible_cpu(cpu) { | 548 | for_each_possible_cpu(cpu) { |
549 | void *start = per_cpu_ptr(mod->percpu, cpu); | 549 | void *start = per_cpu_ptr(mod->percpu, cpu); |
550 | 550 | ||
551 | if ((void *)addr >= start && | 551 | if ((void *)addr >= start && |
552 | (void *)addr < start + mod->percpu_size) { | 552 | (void *)addr < start + mod->percpu_size) { |
553 | preempt_enable(); | 553 | preempt_enable(); |
554 | return true; | 554 | return true; |
555 | } | 555 | } |
556 | } | 556 | } |
557 | } | 557 | } |
558 | 558 | ||
559 | preempt_enable(); | 559 | preempt_enable(); |
560 | return false; | 560 | return false; |
561 | } | 561 | } |
562 | 562 | ||
563 | #else /* ... !CONFIG_SMP */ | 563 | #else /* ... !CONFIG_SMP */ |
564 | 564 | ||
565 | static inline void __percpu *mod_percpu(struct module *mod) | 565 | static inline void __percpu *mod_percpu(struct module *mod) |
566 | { | 566 | { |
567 | return NULL; | 567 | return NULL; |
568 | } | 568 | } |
569 | static int percpu_modalloc(struct module *mod, struct load_info *info) | 569 | static int percpu_modalloc(struct module *mod, struct load_info *info) |
570 | { | 570 | { |
571 | /* UP modules shouldn't have this section: ENOMEM isn't quite right */ | 571 | /* UP modules shouldn't have this section: ENOMEM isn't quite right */ |
572 | if (info->sechdrs[info->index.pcpu].sh_size != 0) | 572 | if (info->sechdrs[info->index.pcpu].sh_size != 0) |
573 | return -ENOMEM; | 573 | return -ENOMEM; |
574 | return 0; | 574 | return 0; |
575 | } | 575 | } |
576 | static inline void percpu_modfree(struct module *mod) | 576 | static inline void percpu_modfree(struct module *mod) |
577 | { | 577 | { |
578 | } | 578 | } |
579 | static unsigned int find_pcpusec(struct load_info *info) | 579 | static unsigned int find_pcpusec(struct load_info *info) |
580 | { | 580 | { |
581 | return 0; | 581 | return 0; |
582 | } | 582 | } |
583 | static inline void percpu_modcopy(struct module *mod, | 583 | static inline void percpu_modcopy(struct module *mod, |
584 | const void *from, unsigned long size) | 584 | const void *from, unsigned long size) |
585 | { | 585 | { |
586 | /* pcpusec should be 0, and size of that section should be 0. */ | 586 | /* pcpusec should be 0, and size of that section should be 0. */ |
587 | BUG_ON(size != 0); | 587 | BUG_ON(size != 0); |
588 | } | 588 | } |
589 | bool is_module_percpu_address(unsigned long addr) | 589 | bool is_module_percpu_address(unsigned long addr) |
590 | { | 590 | { |
591 | return false; | 591 | return false; |
592 | } | 592 | } |
593 | 593 | ||
594 | #endif /* CONFIG_SMP */ | 594 | #endif /* CONFIG_SMP */ |
595 | 595 | ||
596 | #define MODINFO_ATTR(field) \ | 596 | #define MODINFO_ATTR(field) \ |
597 | static void setup_modinfo_##field(struct module *mod, const char *s) \ | 597 | static void setup_modinfo_##field(struct module *mod, const char *s) \ |
598 | { \ | 598 | { \ |
599 | mod->field = kstrdup(s, GFP_KERNEL); \ | 599 | mod->field = kstrdup(s, GFP_KERNEL); \ |
600 | } \ | 600 | } \ |
601 | static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ | 601 | static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ |
602 | struct module_kobject *mk, char *buffer) \ | 602 | struct module_kobject *mk, char *buffer) \ |
603 | { \ | 603 | { \ |
604 | return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ | 604 | return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ |
605 | } \ | 605 | } \ |
606 | static int modinfo_##field##_exists(struct module *mod) \ | 606 | static int modinfo_##field##_exists(struct module *mod) \ |
607 | { \ | 607 | { \ |
608 | return mod->field != NULL; \ | 608 | return mod->field != NULL; \ |
609 | } \ | 609 | } \ |
610 | static void free_modinfo_##field(struct module *mod) \ | 610 | static void free_modinfo_##field(struct module *mod) \ |
611 | { \ | 611 | { \ |
612 | kfree(mod->field); \ | 612 | kfree(mod->field); \ |
613 | mod->field = NULL; \ | 613 | mod->field = NULL; \ |
614 | } \ | 614 | } \ |
615 | static struct module_attribute modinfo_##field = { \ | 615 | static struct module_attribute modinfo_##field = { \ |
616 | .attr = { .name = __stringify(field), .mode = 0444 }, \ | 616 | .attr = { .name = __stringify(field), .mode = 0444 }, \ |
617 | .show = show_modinfo_##field, \ | 617 | .show = show_modinfo_##field, \ |
618 | .setup = setup_modinfo_##field, \ | 618 | .setup = setup_modinfo_##field, \ |
619 | .test = modinfo_##field##_exists, \ | 619 | .test = modinfo_##field##_exists, \ |
620 | .free = free_modinfo_##field, \ | 620 | .free = free_modinfo_##field, \ |
621 | }; | 621 | }; |
622 | 622 | ||
623 | MODINFO_ATTR(version); | 623 | MODINFO_ATTR(version); |
624 | MODINFO_ATTR(srcversion); | 624 | MODINFO_ATTR(srcversion); |
625 | 625 | ||
626 | static char last_unloaded_module[MODULE_NAME_LEN+1]; | 626 | static char last_unloaded_module[MODULE_NAME_LEN+1]; |
627 | 627 | ||
628 | #ifdef CONFIG_MODULE_UNLOAD | 628 | #ifdef CONFIG_MODULE_UNLOAD |
629 | 629 | ||
630 | EXPORT_TRACEPOINT_SYMBOL(module_get); | 630 | EXPORT_TRACEPOINT_SYMBOL(module_get); |
631 | 631 | ||
632 | /* Init the unload section of the module. */ | 632 | /* Init the unload section of the module. */ |
633 | static int module_unload_init(struct module *mod) | 633 | static int module_unload_init(struct module *mod) |
634 | { | 634 | { |
635 | mod->refptr = alloc_percpu(struct module_ref); | 635 | mod->refptr = alloc_percpu(struct module_ref); |
636 | if (!mod->refptr) | 636 | if (!mod->refptr) |
637 | return -ENOMEM; | 637 | return -ENOMEM; |
638 | 638 | ||
639 | INIT_LIST_HEAD(&mod->source_list); | 639 | INIT_LIST_HEAD(&mod->source_list); |
640 | INIT_LIST_HEAD(&mod->target_list); | 640 | INIT_LIST_HEAD(&mod->target_list); |
641 | 641 | ||
642 | /* Hold reference count during initialization. */ | 642 | /* Hold reference count during initialization. */ |
643 | raw_cpu_write(mod->refptr->incs, 1); | 643 | raw_cpu_write(mod->refptr->incs, 1); |
644 | 644 | ||
645 | return 0; | 645 | return 0; |
646 | } | 646 | } |
647 | 647 | ||
648 | /* Does a already use b? */ | 648 | /* Does a already use b? */ |
649 | static int already_uses(struct module *a, struct module *b) | 649 | static int already_uses(struct module *a, struct module *b) |
650 | { | 650 | { |
651 | struct module_use *use; | 651 | struct module_use *use; |
652 | 652 | ||
653 | list_for_each_entry(use, &b->source_list, source_list) { | 653 | list_for_each_entry(use, &b->source_list, source_list) { |
654 | if (use->source == a) { | 654 | if (use->source == a) { |
655 | pr_debug("%s uses %s!\n", a->name, b->name); | 655 | pr_debug("%s uses %s!\n", a->name, b->name); |
656 | return 1; | 656 | return 1; |
657 | } | 657 | } |
658 | } | 658 | } |
659 | pr_debug("%s does not use %s!\n", a->name, b->name); | 659 | pr_debug("%s does not use %s!\n", a->name, b->name); |
660 | return 0; | 660 | return 0; |
661 | } | 661 | } |
662 | 662 | ||
663 | /* | 663 | /* |
664 | * Module a uses b | 664 | * Module a uses b |
665 | * - we add 'a' as a "source", 'b' as a "target" of module use | 665 | * - we add 'a' as a "source", 'b' as a "target" of module use |
666 | * - the module_use is added to the list of 'b' sources (so | 666 | * - the module_use is added to the list of 'b' sources (so |
667 | * 'b' can walk the list to see who sourced them), and of 'a' | 667 | * 'b' can walk the list to see who sourced them), and of 'a' |
668 | * targets (so 'a' can see what modules it targets). | 668 | * targets (so 'a' can see what modules it targets). |
669 | */ | 669 | */ |
670 | static int add_module_usage(struct module *a, struct module *b) | 670 | static int add_module_usage(struct module *a, struct module *b) |
671 | { | 671 | { |
672 | struct module_use *use; | 672 | struct module_use *use; |
673 | 673 | ||
674 | pr_debug("Allocating new usage for %s.\n", a->name); | 674 | pr_debug("Allocating new usage for %s.\n", a->name); |
675 | use = kmalloc(sizeof(*use), GFP_ATOMIC); | 675 | use = kmalloc(sizeof(*use), GFP_ATOMIC); |
676 | if (!use) { | 676 | if (!use) { |
677 | pr_warn("%s: out of memory loading\n", a->name); | 677 | pr_warn("%s: out of memory loading\n", a->name); |
678 | return -ENOMEM; | 678 | return -ENOMEM; |
679 | } | 679 | } |
680 | 680 | ||
681 | use->source = a; | 681 | use->source = a; |
682 | use->target = b; | 682 | use->target = b; |
683 | list_add(&use->source_list, &b->source_list); | 683 | list_add(&use->source_list, &b->source_list); |
684 | list_add(&use->target_list, &a->target_list); | 684 | list_add(&use->target_list, &a->target_list); |
685 | return 0; | 685 | return 0; |
686 | } | 686 | } |
687 | 687 | ||
688 | /* Module a uses b: caller needs module_mutex() */ | 688 | /* Module a uses b: caller needs module_mutex() */ |
689 | int ref_module(struct module *a, struct module *b) | 689 | int ref_module(struct module *a, struct module *b) |
690 | { | 690 | { |
691 | int err; | 691 | int err; |
692 | 692 | ||
693 | if (b == NULL || already_uses(a, b)) | 693 | if (b == NULL || already_uses(a, b)) |
694 | return 0; | 694 | return 0; |
695 | 695 | ||
696 | /* If module isn't available, we fail. */ | 696 | /* If module isn't available, we fail. */ |
697 | err = strong_try_module_get(b); | 697 | err = strong_try_module_get(b); |
698 | if (err) | 698 | if (err) |
699 | return err; | 699 | return err; |
700 | 700 | ||
701 | err = add_module_usage(a, b); | 701 | err = add_module_usage(a, b); |
702 | if (err) { | 702 | if (err) { |
703 | module_put(b); | 703 | module_put(b); |
704 | return err; | 704 | return err; |
705 | } | 705 | } |
706 | return 0; | 706 | return 0; |
707 | } | 707 | } |
708 | EXPORT_SYMBOL_GPL(ref_module); | 708 | EXPORT_SYMBOL_GPL(ref_module); |
709 | 709 | ||
710 | /* Clear the unload stuff of the module. */ | 710 | /* Clear the unload stuff of the module. */ |
711 | static void module_unload_free(struct module *mod) | 711 | static void module_unload_free(struct module *mod) |
712 | { | 712 | { |
713 | struct module_use *use, *tmp; | 713 | struct module_use *use, *tmp; |
714 | 714 | ||
715 | mutex_lock(&module_mutex); | 715 | mutex_lock(&module_mutex); |
716 | list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { | 716 | list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { |
717 | struct module *i = use->target; | 717 | struct module *i = use->target; |
718 | pr_debug("%s unusing %s\n", mod->name, i->name); | 718 | pr_debug("%s unusing %s\n", mod->name, i->name); |
719 | module_put(i); | 719 | module_put(i); |
720 | list_del(&use->source_list); | 720 | list_del(&use->source_list); |
721 | list_del(&use->target_list); | 721 | list_del(&use->target_list); |
722 | kfree(use); | 722 | kfree(use); |
723 | } | 723 | } |
724 | mutex_unlock(&module_mutex); | 724 | mutex_unlock(&module_mutex); |
725 | 725 | ||
726 | free_percpu(mod->refptr); | 726 | free_percpu(mod->refptr); |
727 | } | 727 | } |
728 | 728 | ||
729 | #ifdef CONFIG_MODULE_FORCE_UNLOAD | 729 | #ifdef CONFIG_MODULE_FORCE_UNLOAD |
730 | static inline int try_force_unload(unsigned int flags) | 730 | static inline int try_force_unload(unsigned int flags) |
731 | { | 731 | { |
732 | int ret = (flags & O_TRUNC); | 732 | int ret = (flags & O_TRUNC); |
733 | if (ret) | 733 | if (ret) |
734 | add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); | 734 | add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); |
735 | return ret; | 735 | return ret; |
736 | } | 736 | } |
737 | #else | 737 | #else |
738 | static inline int try_force_unload(unsigned int flags) | 738 | static inline int try_force_unload(unsigned int flags) |
739 | { | 739 | { |
740 | return 0; | 740 | return 0; |
741 | } | 741 | } |
742 | #endif /* CONFIG_MODULE_FORCE_UNLOAD */ | 742 | #endif /* CONFIG_MODULE_FORCE_UNLOAD */ |
743 | 743 | ||
744 | struct stopref | 744 | struct stopref |
745 | { | 745 | { |
746 | struct module *mod; | 746 | struct module *mod; |
747 | int flags; | 747 | int flags; |
748 | int *forced; | 748 | int *forced; |
749 | }; | 749 | }; |
750 | 750 | ||
751 | /* Whole machine is stopped with interrupts off when this runs. */ | 751 | /* Whole machine is stopped with interrupts off when this runs. */ |
752 | static int __try_stop_module(void *_sref) | 752 | static int __try_stop_module(void *_sref) |
753 | { | 753 | { |
754 | struct stopref *sref = _sref; | 754 | struct stopref *sref = _sref; |
755 | 755 | ||
756 | /* If it's not unused, quit unless we're forcing. */ | 756 | /* If it's not unused, quit unless we're forcing. */ |
757 | if (module_refcount(sref->mod) != 0) { | 757 | if (module_refcount(sref->mod) != 0) { |
758 | if (!(*sref->forced = try_force_unload(sref->flags))) | 758 | if (!(*sref->forced = try_force_unload(sref->flags))) |
759 | return -EWOULDBLOCK; | 759 | return -EWOULDBLOCK; |
760 | } | 760 | } |
761 | 761 | ||
762 | /* Mark it as dying. */ | 762 | /* Mark it as dying. */ |
763 | sref->mod->state = MODULE_STATE_GOING; | 763 | sref->mod->state = MODULE_STATE_GOING; |
764 | return 0; | 764 | return 0; |
765 | } | 765 | } |
766 | 766 | ||
767 | static int try_stop_module(struct module *mod, int flags, int *forced) | 767 | static int try_stop_module(struct module *mod, int flags, int *forced) |
768 | { | 768 | { |
769 | struct stopref sref = { mod, flags, forced }; | 769 | struct stopref sref = { mod, flags, forced }; |
770 | 770 | ||
771 | return stop_machine(__try_stop_module, &sref, NULL); | 771 | return stop_machine(__try_stop_module, &sref, NULL); |
772 | } | 772 | } |
773 | 773 | ||
774 | unsigned long module_refcount(struct module *mod) | 774 | unsigned long module_refcount(struct module *mod) |
775 | { | 775 | { |
776 | unsigned long incs = 0, decs = 0; | 776 | unsigned long incs = 0, decs = 0; |
777 | int cpu; | 777 | int cpu; |
778 | 778 | ||
779 | for_each_possible_cpu(cpu) | 779 | for_each_possible_cpu(cpu) |
780 | decs += per_cpu_ptr(mod->refptr, cpu)->decs; | 780 | decs += per_cpu_ptr(mod->refptr, cpu)->decs; |
781 | /* | 781 | /* |
782 | * ensure the incs are added up after the decs. | 782 | * ensure the incs are added up after the decs. |
783 | * module_put ensures incs are visible before decs with smp_wmb. | 783 | * module_put ensures incs are visible before decs with smp_wmb. |
784 | * | 784 | * |
785 | * This 2-count scheme avoids the situation where the refcount | 785 | * This 2-count scheme avoids the situation where the refcount |
786 | * for CPU0 is read, then CPU0 increments the module refcount, | 786 | * for CPU0 is read, then CPU0 increments the module refcount, |
787 | * then CPU1 drops that refcount, then the refcount for CPU1 is | 787 | * then CPU1 drops that refcount, then the refcount for CPU1 is |
788 | * read. We would record a decrement but not its corresponding | 788 | * read. We would record a decrement but not its corresponding |
789 | * increment so we would see a low count (disaster). | 789 | * increment so we would see a low count (disaster). |
790 | * | 790 | * |
791 | * Rare situation? But module_refcount can be preempted, and we | 791 | * Rare situation? But module_refcount can be preempted, and we |
792 | * might be tallying up 4096+ CPUs. So it is not impossible. | 792 | * might be tallying up 4096+ CPUs. So it is not impossible. |
793 | */ | 793 | */ |
794 | smp_rmb(); | 794 | smp_rmb(); |
795 | for_each_possible_cpu(cpu) | 795 | for_each_possible_cpu(cpu) |
796 | incs += per_cpu_ptr(mod->refptr, cpu)->incs; | 796 | incs += per_cpu_ptr(mod->refptr, cpu)->incs; |
797 | return incs - decs; | 797 | return incs - decs; |
798 | } | 798 | } |
799 | EXPORT_SYMBOL(module_refcount); | 799 | EXPORT_SYMBOL(module_refcount); |
800 | 800 | ||
801 | /* This exists whether we can unload or not */ | 801 | /* This exists whether we can unload or not */ |
802 | static void free_module(struct module *mod); | 802 | static void free_module(struct module *mod); |
803 | 803 | ||
804 | SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | 804 | SYSCALL_DEFINE2(delete_module, const char __user *, name_user, |
805 | unsigned int, flags) | 805 | unsigned int, flags) |
806 | { | 806 | { |
807 | struct module *mod; | 807 | struct module *mod; |
808 | char name[MODULE_NAME_LEN]; | 808 | char name[MODULE_NAME_LEN]; |
809 | int ret, forced = 0; | 809 | int ret, forced = 0; |
810 | 810 | ||
811 | if (!capable(CAP_SYS_MODULE) || modules_disabled) | 811 | if (!capable(CAP_SYS_MODULE) || modules_disabled) |
812 | return -EPERM; | 812 | return -EPERM; |
813 | 813 | ||
814 | if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) | 814 | if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) |
815 | return -EFAULT; | 815 | return -EFAULT; |
816 | name[MODULE_NAME_LEN-1] = '\0'; | 816 | name[MODULE_NAME_LEN-1] = '\0'; |
817 | 817 | ||
818 | if (!(flags & O_NONBLOCK)) | 818 | if (!(flags & O_NONBLOCK)) |
819 | pr_warn("waiting module removal not supported: please upgrade\n"); | 819 | pr_warn("waiting module removal not supported: please upgrade\n"); |
820 | 820 | ||
821 | if (mutex_lock_interruptible(&module_mutex) != 0) | 821 | if (mutex_lock_interruptible(&module_mutex) != 0) |
822 | return -EINTR; | 822 | return -EINTR; |
823 | 823 | ||
824 | mod = find_module(name); | 824 | mod = find_module(name); |
825 | if (!mod) { | 825 | if (!mod) { |
826 | ret = -ENOENT; | 826 | ret = -ENOENT; |
827 | goto out; | 827 | goto out; |
828 | } | 828 | } |
829 | 829 | ||
830 | if (!list_empty(&mod->source_list)) { | 830 | if (!list_empty(&mod->source_list)) { |
831 | /* Other modules depend on us: get rid of them first. */ | 831 | /* Other modules depend on us: get rid of them first. */ |
832 | ret = -EWOULDBLOCK; | 832 | ret = -EWOULDBLOCK; |
833 | goto out; | 833 | goto out; |
834 | } | 834 | } |
835 | 835 | ||
836 | /* Doing init or already dying? */ | 836 | /* Doing init or already dying? */ |
837 | if (mod->state != MODULE_STATE_LIVE) { | 837 | if (mod->state != MODULE_STATE_LIVE) { |
838 | /* FIXME: if (force), slam module count damn the torpedoes */ | 838 | /* FIXME: if (force), slam module count damn the torpedoes */ |
839 | pr_debug("%s already dying\n", mod->name); | 839 | pr_debug("%s already dying\n", mod->name); |
840 | ret = -EBUSY; | 840 | ret = -EBUSY; |
841 | goto out; | 841 | goto out; |
842 | } | 842 | } |
843 | 843 | ||
844 | /* If it has an init func, it must have an exit func to unload */ | 844 | /* If it has an init func, it must have an exit func to unload */ |
845 | if (mod->init && !mod->exit) { | 845 | if (mod->init && !mod->exit) { |
846 | forced = try_force_unload(flags); | 846 | forced = try_force_unload(flags); |
847 | if (!forced) { | 847 | if (!forced) { |
848 | /* This module can't be removed */ | 848 | /* This module can't be removed */ |
849 | ret = -EBUSY; | 849 | ret = -EBUSY; |
850 | goto out; | 850 | goto out; |
851 | } | 851 | } |
852 | } | 852 | } |
853 | 853 | ||
854 | /* Stop the machine so refcounts can't move and disable module. */ | 854 | /* Stop the machine so refcounts can't move and disable module. */ |
855 | ret = try_stop_module(mod, flags, &forced); | 855 | ret = try_stop_module(mod, flags, &forced); |
856 | if (ret != 0) | 856 | if (ret != 0) |
857 | goto out; | 857 | goto out; |
858 | 858 | ||
859 | mutex_unlock(&module_mutex); | 859 | mutex_unlock(&module_mutex); |
860 | /* Final destruction now no one is using it. */ | 860 | /* Final destruction now no one is using it. */ |
861 | if (mod->exit != NULL) | 861 | if (mod->exit != NULL) |
862 | mod->exit(); | 862 | mod->exit(); |
863 | blocking_notifier_call_chain(&module_notify_list, | 863 | blocking_notifier_call_chain(&module_notify_list, |
864 | MODULE_STATE_GOING, mod); | 864 | MODULE_STATE_GOING, mod); |
865 | async_synchronize_full(); | 865 | async_synchronize_full(); |
866 | 866 | ||
867 | /* Store the name of the last unloaded module for diagnostic purposes */ | 867 | /* Store the name of the last unloaded module for diagnostic purposes */ |
868 | strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); | 868 | strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); |
869 | 869 | ||
870 | free_module(mod); | 870 | free_module(mod); |
871 | return 0; | 871 | return 0; |
872 | out: | 872 | out: |
873 | mutex_unlock(&module_mutex); | 873 | mutex_unlock(&module_mutex); |
874 | return ret; | 874 | return ret; |
875 | } | 875 | } |
876 | 876 | ||
877 | static inline void print_unload_info(struct seq_file *m, struct module *mod) | 877 | static inline void print_unload_info(struct seq_file *m, struct module *mod) |
878 | { | 878 | { |
879 | struct module_use *use; | 879 | struct module_use *use; |
880 | int printed_something = 0; | 880 | int printed_something = 0; |
881 | 881 | ||
882 | seq_printf(m, " %lu ", module_refcount(mod)); | 882 | seq_printf(m, " %lu ", module_refcount(mod)); |
883 | 883 | ||
884 | /* Always include a trailing , so userspace can differentiate | 884 | /* Always include a trailing , so userspace can differentiate |
885 | between this and the old multi-field proc format. */ | 885 | between this and the old multi-field proc format. */ |
886 | list_for_each_entry(use, &mod->source_list, source_list) { | 886 | list_for_each_entry(use, &mod->source_list, source_list) { |
887 | printed_something = 1; | 887 | printed_something = 1; |
888 | seq_printf(m, "%s,", use->source->name); | 888 | seq_printf(m, "%s,", use->source->name); |
889 | } | 889 | } |
890 | 890 | ||
891 | if (mod->init != NULL && mod->exit == NULL) { | 891 | if (mod->init != NULL && mod->exit == NULL) { |
892 | printed_something = 1; | 892 | printed_something = 1; |
893 | seq_printf(m, "[permanent],"); | 893 | seq_printf(m, "[permanent],"); |
894 | } | 894 | } |
895 | 895 | ||
896 | if (!printed_something) | 896 | if (!printed_something) |
897 | seq_printf(m, "-"); | 897 | seq_printf(m, "-"); |
898 | } | 898 | } |
899 | 899 | ||
900 | void __symbol_put(const char *symbol) | 900 | void __symbol_put(const char *symbol) |
901 | { | 901 | { |
902 | struct module *owner; | 902 | struct module *owner; |
903 | 903 | ||
904 | preempt_disable(); | 904 | preempt_disable(); |
905 | if (!find_symbol(symbol, &owner, NULL, true, false)) | 905 | if (!find_symbol(symbol, &owner, NULL, true, false)) |
906 | BUG(); | 906 | BUG(); |
907 | module_put(owner); | 907 | module_put(owner); |
908 | preempt_enable(); | 908 | preempt_enable(); |
909 | } | 909 | } |
910 | EXPORT_SYMBOL(__symbol_put); | 910 | EXPORT_SYMBOL(__symbol_put); |
911 | 911 | ||
912 | /* Note this assumes addr is a function, which it currently always is. */ | 912 | /* Note this assumes addr is a function, which it currently always is. */ |
913 | void symbol_put_addr(void *addr) | 913 | void symbol_put_addr(void *addr) |
914 | { | 914 | { |
915 | struct module *modaddr; | 915 | struct module *modaddr; |
916 | unsigned long a = (unsigned long)dereference_function_descriptor(addr); | 916 | unsigned long a = (unsigned long)dereference_function_descriptor(addr); |
917 | 917 | ||
918 | if (core_kernel_text(a)) | 918 | if (core_kernel_text(a)) |
919 | return; | 919 | return; |
920 | 920 | ||
921 | /* module_text_address is safe here: we're supposed to have reference | 921 | /* module_text_address is safe here: we're supposed to have reference |
922 | * to module from symbol_get, so it can't go away. */ | 922 | * to module from symbol_get, so it can't go away. */ |
923 | modaddr = __module_text_address(a); | 923 | modaddr = __module_text_address(a); |
924 | BUG_ON(!modaddr); | 924 | BUG_ON(!modaddr); |
925 | module_put(modaddr); | 925 | module_put(modaddr); |
926 | } | 926 | } |
927 | EXPORT_SYMBOL_GPL(symbol_put_addr); | 927 | EXPORT_SYMBOL_GPL(symbol_put_addr); |
928 | 928 | ||
929 | static ssize_t show_refcnt(struct module_attribute *mattr, | 929 | static ssize_t show_refcnt(struct module_attribute *mattr, |
930 | struct module_kobject *mk, char *buffer) | 930 | struct module_kobject *mk, char *buffer) |
931 | { | 931 | { |
932 | return sprintf(buffer, "%lu\n", module_refcount(mk->mod)); | 932 | return sprintf(buffer, "%lu\n", module_refcount(mk->mod)); |
933 | } | 933 | } |
934 | 934 | ||
935 | static struct module_attribute modinfo_refcnt = | 935 | static struct module_attribute modinfo_refcnt = |
936 | __ATTR(refcnt, 0444, show_refcnt, NULL); | 936 | __ATTR(refcnt, 0444, show_refcnt, NULL); |
937 | 937 | ||
938 | void __module_get(struct module *module) | 938 | void __module_get(struct module *module) |
939 | { | 939 | { |
940 | if (module) { | 940 | if (module) { |
941 | preempt_disable(); | 941 | preempt_disable(); |
942 | __this_cpu_inc(module->refptr->incs); | 942 | __this_cpu_inc(module->refptr->incs); |
943 | trace_module_get(module, _RET_IP_); | 943 | trace_module_get(module, _RET_IP_); |
944 | preempt_enable(); | 944 | preempt_enable(); |
945 | } | 945 | } |
946 | } | 946 | } |
947 | EXPORT_SYMBOL(__module_get); | 947 | EXPORT_SYMBOL(__module_get); |
948 | 948 | ||
949 | bool try_module_get(struct module *module) | 949 | bool try_module_get(struct module *module) |
950 | { | 950 | { |
951 | bool ret = true; | 951 | bool ret = true; |
952 | 952 | ||
953 | if (module) { | 953 | if (module) { |
954 | preempt_disable(); | 954 | preempt_disable(); |
955 | 955 | ||
956 | if (likely(module_is_live(module))) { | 956 | if (likely(module_is_live(module))) { |
957 | __this_cpu_inc(module->refptr->incs); | 957 | __this_cpu_inc(module->refptr->incs); |
958 | trace_module_get(module, _RET_IP_); | 958 | trace_module_get(module, _RET_IP_); |
959 | } else | 959 | } else |
960 | ret = false; | 960 | ret = false; |
961 | 961 | ||
962 | preempt_enable(); | 962 | preempt_enable(); |
963 | } | 963 | } |
964 | return ret; | 964 | return ret; |
965 | } | 965 | } |
966 | EXPORT_SYMBOL(try_module_get); | 966 | EXPORT_SYMBOL(try_module_get); |
967 | 967 | ||
968 | void module_put(struct module *module) | 968 | void module_put(struct module *module) |
969 | { | 969 | { |
970 | if (module) { | 970 | if (module) { |
971 | preempt_disable(); | 971 | preempt_disable(); |
972 | smp_wmb(); /* see comment in module_refcount */ | 972 | smp_wmb(); /* see comment in module_refcount */ |
973 | __this_cpu_inc(module->refptr->decs); | 973 | __this_cpu_inc(module->refptr->decs); |
974 | 974 | ||
975 | trace_module_put(module, _RET_IP_); | 975 | trace_module_put(module, _RET_IP_); |
976 | preempt_enable(); | 976 | preempt_enable(); |
977 | } | 977 | } |
978 | } | 978 | } |
979 | EXPORT_SYMBOL(module_put); | 979 | EXPORT_SYMBOL(module_put); |
980 | 980 | ||
981 | #else /* !CONFIG_MODULE_UNLOAD */ | 981 | #else /* !CONFIG_MODULE_UNLOAD */ |
982 | static inline void print_unload_info(struct seq_file *m, struct module *mod) | 982 | static inline void print_unload_info(struct seq_file *m, struct module *mod) |
983 | { | 983 | { |
984 | /* We don't know the usage count, or what modules are using. */ | 984 | /* We don't know the usage count, or what modules are using. */ |
985 | seq_printf(m, " - -"); | 985 | seq_printf(m, " - -"); |
986 | } | 986 | } |
987 | 987 | ||
988 | static inline void module_unload_free(struct module *mod) | 988 | static inline void module_unload_free(struct module *mod) |
989 | { | 989 | { |
990 | } | 990 | } |
991 | 991 | ||
992 | int ref_module(struct module *a, struct module *b) | 992 | int ref_module(struct module *a, struct module *b) |
993 | { | 993 | { |
994 | return strong_try_module_get(b); | 994 | return strong_try_module_get(b); |
995 | } | 995 | } |
996 | EXPORT_SYMBOL_GPL(ref_module); | 996 | EXPORT_SYMBOL_GPL(ref_module); |
997 | 997 | ||
998 | static inline int module_unload_init(struct module *mod) | 998 | static inline int module_unload_init(struct module *mod) |
999 | { | 999 | { |
1000 | return 0; | 1000 | return 0; |
1001 | } | 1001 | } |
1002 | #endif /* CONFIG_MODULE_UNLOAD */ | 1002 | #endif /* CONFIG_MODULE_UNLOAD */ |
1003 | 1003 | ||
1004 | static size_t module_flags_taint(struct module *mod, char *buf) | 1004 | static size_t module_flags_taint(struct module *mod, char *buf) |
1005 | { | 1005 | { |
1006 | size_t l = 0; | 1006 | size_t l = 0; |
1007 | 1007 | ||
1008 | if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE)) | 1008 | if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE)) |
1009 | buf[l++] = 'P'; | 1009 | buf[l++] = 'P'; |
1010 | if (mod->taints & (1 << TAINT_OOT_MODULE)) | 1010 | if (mod->taints & (1 << TAINT_OOT_MODULE)) |
1011 | buf[l++] = 'O'; | 1011 | buf[l++] = 'O'; |
1012 | if (mod->taints & (1 << TAINT_FORCED_MODULE)) | 1012 | if (mod->taints & (1 << TAINT_FORCED_MODULE)) |
1013 | buf[l++] = 'F'; | 1013 | buf[l++] = 'F'; |
1014 | if (mod->taints & (1 << TAINT_CRAP)) | 1014 | if (mod->taints & (1 << TAINT_CRAP)) |
1015 | buf[l++] = 'C'; | 1015 | buf[l++] = 'C'; |
1016 | if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) | 1016 | if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) |
1017 | buf[l++] = 'E'; | 1017 | buf[l++] = 'E'; |
1018 | /* | 1018 | /* |
1019 | * TAINT_FORCED_RMMOD: could be added. | 1019 | * TAINT_FORCED_RMMOD: could be added. |
1020 | * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | 1020 | * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't |
1021 | * apply to modules. | 1021 | * apply to modules. |
1022 | */ | 1022 | */ |
1023 | return l; | 1023 | return l; |
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | static ssize_t show_initstate(struct module_attribute *mattr, | 1026 | static ssize_t show_initstate(struct module_attribute *mattr, |
1027 | struct module_kobject *mk, char *buffer) | 1027 | struct module_kobject *mk, char *buffer) |
1028 | { | 1028 | { |
1029 | const char *state = "unknown"; | 1029 | const char *state = "unknown"; |
1030 | 1030 | ||
1031 | switch (mk->mod->state) { | 1031 | switch (mk->mod->state) { |
1032 | case MODULE_STATE_LIVE: | 1032 | case MODULE_STATE_LIVE: |
1033 | state = "live"; | 1033 | state = "live"; |
1034 | break; | 1034 | break; |
1035 | case MODULE_STATE_COMING: | 1035 | case MODULE_STATE_COMING: |
1036 | state = "coming"; | 1036 | state = "coming"; |
1037 | break; | 1037 | break; |
1038 | case MODULE_STATE_GOING: | 1038 | case MODULE_STATE_GOING: |
1039 | state = "going"; | 1039 | state = "going"; |
1040 | break; | 1040 | break; |
1041 | default: | 1041 | default: |
1042 | BUG(); | 1042 | BUG(); |
1043 | } | 1043 | } |
1044 | return sprintf(buffer, "%s\n", state); | 1044 | return sprintf(buffer, "%s\n", state); |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | static struct module_attribute modinfo_initstate = | 1047 | static struct module_attribute modinfo_initstate = |
1048 | __ATTR(initstate, 0444, show_initstate, NULL); | 1048 | __ATTR(initstate, 0444, show_initstate, NULL); |
1049 | 1049 | ||
1050 | static ssize_t store_uevent(struct module_attribute *mattr, | 1050 | static ssize_t store_uevent(struct module_attribute *mattr, |
1051 | struct module_kobject *mk, | 1051 | struct module_kobject *mk, |
1052 | const char *buffer, size_t count) | 1052 | const char *buffer, size_t count) |
1053 | { | 1053 | { |
1054 | enum kobject_action action; | 1054 | enum kobject_action action; |
1055 | 1055 | ||
1056 | if (kobject_action_type(buffer, count, &action) == 0) | 1056 | if (kobject_action_type(buffer, count, &action) == 0) |
1057 | kobject_uevent(&mk->kobj, action); | 1057 | kobject_uevent(&mk->kobj, action); |
1058 | return count; | 1058 | return count; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | struct module_attribute module_uevent = | 1061 | struct module_attribute module_uevent = |
1062 | __ATTR(uevent, 0200, NULL, store_uevent); | 1062 | __ATTR(uevent, 0200, NULL, store_uevent); |
1063 | 1063 | ||
1064 | static ssize_t show_coresize(struct module_attribute *mattr, | 1064 | static ssize_t show_coresize(struct module_attribute *mattr, |
1065 | struct module_kobject *mk, char *buffer) | 1065 | struct module_kobject *mk, char *buffer) |
1066 | { | 1066 | { |
1067 | return sprintf(buffer, "%u\n", mk->mod->core_size); | 1067 | return sprintf(buffer, "%u\n", mk->mod->core_size); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | static struct module_attribute modinfo_coresize = | 1070 | static struct module_attribute modinfo_coresize = |
1071 | __ATTR(coresize, 0444, show_coresize, NULL); | 1071 | __ATTR(coresize, 0444, show_coresize, NULL); |
1072 | 1072 | ||
1073 | static ssize_t show_initsize(struct module_attribute *mattr, | 1073 | static ssize_t show_initsize(struct module_attribute *mattr, |
1074 | struct module_kobject *mk, char *buffer) | 1074 | struct module_kobject *mk, char *buffer) |
1075 | { | 1075 | { |
1076 | return sprintf(buffer, "%u\n", mk->mod->init_size); | 1076 | return sprintf(buffer, "%u\n", mk->mod->init_size); |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | static struct module_attribute modinfo_initsize = | 1079 | static struct module_attribute modinfo_initsize = |
1080 | __ATTR(initsize, 0444, show_initsize, NULL); | 1080 | __ATTR(initsize, 0444, show_initsize, NULL); |
1081 | 1081 | ||
1082 | static ssize_t show_taint(struct module_attribute *mattr, | 1082 | static ssize_t show_taint(struct module_attribute *mattr, |
1083 | struct module_kobject *mk, char *buffer) | 1083 | struct module_kobject *mk, char *buffer) |
1084 | { | 1084 | { |
1085 | size_t l; | 1085 | size_t l; |
1086 | 1086 | ||
1087 | l = module_flags_taint(mk->mod, buffer); | 1087 | l = module_flags_taint(mk->mod, buffer); |
1088 | buffer[l++] = '\n'; | 1088 | buffer[l++] = '\n'; |
1089 | return l; | 1089 | return l; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | static struct module_attribute modinfo_taint = | 1092 | static struct module_attribute modinfo_taint = |
1093 | __ATTR(taint, 0444, show_taint, NULL); | 1093 | __ATTR(taint, 0444, show_taint, NULL); |
1094 | 1094 | ||
1095 | static struct module_attribute *modinfo_attrs[] = { | 1095 | static struct module_attribute *modinfo_attrs[] = { |
1096 | &module_uevent, | 1096 | &module_uevent, |
1097 | &modinfo_version, | 1097 | &modinfo_version, |
1098 | &modinfo_srcversion, | 1098 | &modinfo_srcversion, |
1099 | &modinfo_initstate, | 1099 | &modinfo_initstate, |
1100 | &modinfo_coresize, | 1100 | &modinfo_coresize, |
1101 | &modinfo_initsize, | 1101 | &modinfo_initsize, |
1102 | &modinfo_taint, | 1102 | &modinfo_taint, |
1103 | #ifdef CONFIG_MODULE_UNLOAD | 1103 | #ifdef CONFIG_MODULE_UNLOAD |
1104 | &modinfo_refcnt, | 1104 | &modinfo_refcnt, |
1105 | #endif | 1105 | #endif |
1106 | NULL, | 1106 | NULL, |
1107 | }; | 1107 | }; |
1108 | 1108 | ||
1109 | static const char vermagic[] = VERMAGIC_STRING; | 1109 | static const char vermagic[] = VERMAGIC_STRING; |
1110 | 1110 | ||
1111 | static int try_to_force_load(struct module *mod, const char *reason) | 1111 | static int try_to_force_load(struct module *mod, const char *reason) |
1112 | { | 1112 | { |
1113 | #ifdef CONFIG_MODULE_FORCE_LOAD | 1113 | #ifdef CONFIG_MODULE_FORCE_LOAD |
1114 | if (!test_taint(TAINT_FORCED_MODULE)) | 1114 | if (!test_taint(TAINT_FORCED_MODULE)) |
1115 | pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); | 1115 | pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); |
1116 | add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); | 1116 | add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); |
1117 | return 0; | 1117 | return 0; |
1118 | #else | 1118 | #else |
1119 | return -ENOEXEC; | 1119 | return -ENOEXEC; |
1120 | #endif | 1120 | #endif |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | #ifdef CONFIG_MODVERSIONS | 1123 | #ifdef CONFIG_MODVERSIONS |
1124 | /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ | 1124 | /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ |
1125 | static unsigned long maybe_relocated(unsigned long crc, | 1125 | static unsigned long maybe_relocated(unsigned long crc, |
1126 | const struct module *crc_owner) | 1126 | const struct module *crc_owner) |
1127 | { | 1127 | { |
1128 | #ifdef ARCH_RELOCATES_KCRCTAB | 1128 | #ifdef ARCH_RELOCATES_KCRCTAB |
1129 | if (crc_owner == NULL) | 1129 | if (crc_owner == NULL) |
1130 | return crc - (unsigned long)reloc_start; | 1130 | return crc - (unsigned long)reloc_start; |
1131 | #endif | 1131 | #endif |
1132 | return crc; | 1132 | return crc; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | static int check_version(Elf_Shdr *sechdrs, | 1135 | static int check_version(Elf_Shdr *sechdrs, |
1136 | unsigned int versindex, | 1136 | unsigned int versindex, |
1137 | const char *symname, | 1137 | const char *symname, |
1138 | struct module *mod, | 1138 | struct module *mod, |
1139 | const unsigned long *crc, | 1139 | const unsigned long *crc, |
1140 | const struct module *crc_owner) | 1140 | const struct module *crc_owner) |
1141 | { | 1141 | { |
1142 | unsigned int i, num_versions; | 1142 | unsigned int i, num_versions; |
1143 | struct modversion_info *versions; | 1143 | struct modversion_info *versions; |
1144 | 1144 | ||
1145 | /* Exporting module didn't supply crcs? OK, we're already tainted. */ | 1145 | /* Exporting module didn't supply crcs? OK, we're already tainted. */ |
1146 | if (!crc) | 1146 | if (!crc) |
1147 | return 1; | 1147 | return 1; |
1148 | 1148 | ||
1149 | /* No versions at all? modprobe --force does this. */ | 1149 | /* No versions at all? modprobe --force does this. */ |
1150 | if (versindex == 0) | 1150 | if (versindex == 0) |
1151 | return try_to_force_load(mod, symname) == 0; | 1151 | return try_to_force_load(mod, symname) == 0; |
1152 | 1152 | ||
1153 | versions = (void *) sechdrs[versindex].sh_addr; | 1153 | versions = (void *) sechdrs[versindex].sh_addr; |
1154 | num_versions = sechdrs[versindex].sh_size | 1154 | num_versions = sechdrs[versindex].sh_size |
1155 | / sizeof(struct modversion_info); | 1155 | / sizeof(struct modversion_info); |
1156 | 1156 | ||
1157 | for (i = 0; i < num_versions; i++) { | 1157 | for (i = 0; i < num_versions; i++) { |
1158 | if (strcmp(versions[i].name, symname) != 0) | 1158 | if (strcmp(versions[i].name, symname) != 0) |
1159 | continue; | 1159 | continue; |
1160 | 1160 | ||
1161 | if (versions[i].crc == maybe_relocated(*crc, crc_owner)) | 1161 | if (versions[i].crc == maybe_relocated(*crc, crc_owner)) |
1162 | return 1; | 1162 | return 1; |
1163 | pr_debug("Found checksum %lX vs module %lX\n", | 1163 | pr_debug("Found checksum %lX vs module %lX\n", |
1164 | maybe_relocated(*crc, crc_owner), versions[i].crc); | 1164 | maybe_relocated(*crc, crc_owner), versions[i].crc); |
1165 | goto bad_version; | 1165 | goto bad_version; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | pr_warn("%s: no symbol version for %s\n", mod->name, symname); | 1168 | pr_warn("%s: no symbol version for %s\n", mod->name, symname); |
1169 | return 0; | 1169 | return 0; |
1170 | 1170 | ||
1171 | bad_version: | 1171 | bad_version: |
1172 | printk("%s: disagrees about version of symbol %s\n", | 1172 | printk("%s: disagrees about version of symbol %s\n", |
1173 | mod->name, symname); | 1173 | mod->name, symname); |
1174 | return 0; | 1174 | return 0; |
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | static inline int check_modstruct_version(Elf_Shdr *sechdrs, | 1177 | static inline int check_modstruct_version(Elf_Shdr *sechdrs, |
1178 | unsigned int versindex, | 1178 | unsigned int versindex, |
1179 | struct module *mod) | 1179 | struct module *mod) |
1180 | { | 1180 | { |
1181 | const unsigned long *crc; | 1181 | const unsigned long *crc; |
1182 | 1182 | ||
1183 | /* Since this should be found in kernel (which can't be removed), | 1183 | /* Since this should be found in kernel (which can't be removed), |
1184 | * no locking is necessary. */ | 1184 | * no locking is necessary. */ |
1185 | if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL, | 1185 | if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL, |
1186 | &crc, true, false)) | 1186 | &crc, true, false)) |
1187 | BUG(); | 1187 | BUG(); |
1188 | return check_version(sechdrs, versindex, | 1188 | return check_version(sechdrs, versindex, |
1189 | VMLINUX_SYMBOL_STR(module_layout), mod, crc, | 1189 | VMLINUX_SYMBOL_STR(module_layout), mod, crc, |
1190 | NULL); | 1190 | NULL); |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | /* First part is kernel version, which we ignore if module has crcs. */ | 1193 | /* First part is kernel version, which we ignore if module has crcs. */ |
1194 | static inline int same_magic(const char *amagic, const char *bmagic, | 1194 | static inline int same_magic(const char *amagic, const char *bmagic, |
1195 | bool has_crcs) | 1195 | bool has_crcs) |
1196 | { | 1196 | { |
1197 | if (has_crcs) { | 1197 | if (has_crcs) { |
1198 | amagic += strcspn(amagic, " "); | 1198 | amagic += strcspn(amagic, " "); |
1199 | bmagic += strcspn(bmagic, " "); | 1199 | bmagic += strcspn(bmagic, " "); |
1200 | } | 1200 | } |
1201 | return strcmp(amagic, bmagic) == 0; | 1201 | return strcmp(amagic, bmagic) == 0; |
1202 | } | 1202 | } |
1203 | #else | 1203 | #else |
1204 | static inline int check_version(Elf_Shdr *sechdrs, | 1204 | static inline int check_version(Elf_Shdr *sechdrs, |
1205 | unsigned int versindex, | 1205 | unsigned int versindex, |
1206 | const char *symname, | 1206 | const char *symname, |
1207 | struct module *mod, | 1207 | struct module *mod, |
1208 | const unsigned long *crc, | 1208 | const unsigned long *crc, |
1209 | const struct module *crc_owner) | 1209 | const struct module *crc_owner) |
1210 | { | 1210 | { |
1211 | return 1; | 1211 | return 1; |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | static inline int check_modstruct_version(Elf_Shdr *sechdrs, | 1214 | static inline int check_modstruct_version(Elf_Shdr *sechdrs, |
1215 | unsigned int versindex, | 1215 | unsigned int versindex, |
1216 | struct module *mod) | 1216 | struct module *mod) |
1217 | { | 1217 | { |
1218 | return 1; | 1218 | return 1; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static inline int same_magic(const char *amagic, const char *bmagic, | 1221 | static inline int same_magic(const char *amagic, const char *bmagic, |
1222 | bool has_crcs) | 1222 | bool has_crcs) |
1223 | { | 1223 | { |
1224 | return strcmp(amagic, bmagic) == 0; | 1224 | return strcmp(amagic, bmagic) == 0; |
1225 | } | 1225 | } |
1226 | #endif /* CONFIG_MODVERSIONS */ | 1226 | #endif /* CONFIG_MODVERSIONS */ |
1227 | 1227 | ||
1228 | /* Resolve a symbol for this module. I.e. if we find one, record usage. */ | 1228 | /* Resolve a symbol for this module. I.e. if we find one, record usage. */ |
1229 | static const struct kernel_symbol *resolve_symbol(struct module *mod, | 1229 | static const struct kernel_symbol *resolve_symbol(struct module *mod, |
1230 | const struct load_info *info, | 1230 | const struct load_info *info, |
1231 | const char *name, | 1231 | const char *name, |
1232 | char ownername[]) | 1232 | char ownername[]) |
1233 | { | 1233 | { |
1234 | struct module *owner; | 1234 | struct module *owner; |
1235 | const struct kernel_symbol *sym; | 1235 | const struct kernel_symbol *sym; |
1236 | const unsigned long *crc; | 1236 | const unsigned long *crc; |
1237 | int err; | 1237 | int err; |
1238 | 1238 | ||
1239 | mutex_lock(&module_mutex); | 1239 | mutex_lock(&module_mutex); |
1240 | sym = find_symbol(name, &owner, &crc, | 1240 | sym = find_symbol(name, &owner, &crc, |
1241 | !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); | 1241 | !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); |
1242 | if (!sym) | 1242 | if (!sym) |
1243 | goto unlock; | 1243 | goto unlock; |
1244 | 1244 | ||
1245 | if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, | 1245 | if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, |
1246 | owner)) { | 1246 | owner)) { |
1247 | sym = ERR_PTR(-EINVAL); | 1247 | sym = ERR_PTR(-EINVAL); |
1248 | goto getname; | 1248 | goto getname; |
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | err = ref_module(mod, owner); | 1251 | err = ref_module(mod, owner); |
1252 | if (err) { | 1252 | if (err) { |
1253 | sym = ERR_PTR(err); | 1253 | sym = ERR_PTR(err); |
1254 | goto getname; | 1254 | goto getname; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | getname: | 1257 | getname: |
1258 | /* We must make copy under the lock if we failed to get ref. */ | 1258 | /* We must make copy under the lock if we failed to get ref. */ |
1259 | strncpy(ownername, module_name(owner), MODULE_NAME_LEN); | 1259 | strncpy(ownername, module_name(owner), MODULE_NAME_LEN); |
1260 | unlock: | 1260 | unlock: |
1261 | mutex_unlock(&module_mutex); | 1261 | mutex_unlock(&module_mutex); |
1262 | return sym; | 1262 | return sym; |
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | static const struct kernel_symbol * | 1265 | static const struct kernel_symbol * |
1266 | resolve_symbol_wait(struct module *mod, | 1266 | resolve_symbol_wait(struct module *mod, |
1267 | const struct load_info *info, | 1267 | const struct load_info *info, |
1268 | const char *name) | 1268 | const char *name) |
1269 | { | 1269 | { |
1270 | const struct kernel_symbol *ksym; | 1270 | const struct kernel_symbol *ksym; |
1271 | char owner[MODULE_NAME_LEN]; | 1271 | char owner[MODULE_NAME_LEN]; |
1272 | 1272 | ||
1273 | if (wait_event_interruptible_timeout(module_wq, | 1273 | if (wait_event_interruptible_timeout(module_wq, |
1274 | !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) | 1274 | !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) |
1275 | || PTR_ERR(ksym) != -EBUSY, | 1275 | || PTR_ERR(ksym) != -EBUSY, |
1276 | 30 * HZ) <= 0) { | 1276 | 30 * HZ) <= 0) { |
1277 | pr_warn("%s: gave up waiting for init of module %s.\n", | 1277 | pr_warn("%s: gave up waiting for init of module %s.\n", |
1278 | mod->name, owner); | 1278 | mod->name, owner); |
1279 | } | 1279 | } |
1280 | return ksym; | 1280 | return ksym; |
1281 | } | 1281 | } |
1282 | 1282 | ||
1283 | /* | 1283 | /* |
1284 | * /sys/module/foo/sections stuff | 1284 | * /sys/module/foo/sections stuff |
1285 | * J. Corbet <corbet@lwn.net> | 1285 | * J. Corbet <corbet@lwn.net> |
1286 | */ | 1286 | */ |
1287 | #ifdef CONFIG_SYSFS | 1287 | #ifdef CONFIG_SYSFS |
1288 | 1288 | ||
1289 | #ifdef CONFIG_KALLSYMS | 1289 | #ifdef CONFIG_KALLSYMS |
1290 | static inline bool sect_empty(const Elf_Shdr *sect) | 1290 | static inline bool sect_empty(const Elf_Shdr *sect) |
1291 | { | 1291 | { |
1292 | return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; | 1292 | return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | struct module_sect_attr | 1295 | struct module_sect_attr |
1296 | { | 1296 | { |
1297 | struct module_attribute mattr; | 1297 | struct module_attribute mattr; |
1298 | char *name; | 1298 | char *name; |
1299 | unsigned long address; | 1299 | unsigned long address; |
1300 | }; | 1300 | }; |
1301 | 1301 | ||
1302 | struct module_sect_attrs | 1302 | struct module_sect_attrs |
1303 | { | 1303 | { |
1304 | struct attribute_group grp; | 1304 | struct attribute_group grp; |
1305 | unsigned int nsections; | 1305 | unsigned int nsections; |
1306 | struct module_sect_attr attrs[0]; | 1306 | struct module_sect_attr attrs[0]; |
1307 | }; | 1307 | }; |
1308 | 1308 | ||
1309 | static ssize_t module_sect_show(struct module_attribute *mattr, | 1309 | static ssize_t module_sect_show(struct module_attribute *mattr, |
1310 | struct module_kobject *mk, char *buf) | 1310 | struct module_kobject *mk, char *buf) |
1311 | { | 1311 | { |
1312 | struct module_sect_attr *sattr = | 1312 | struct module_sect_attr *sattr = |
1313 | container_of(mattr, struct module_sect_attr, mattr); | 1313 | container_of(mattr, struct module_sect_attr, mattr); |
1314 | return sprintf(buf, "0x%pK\n", (void *)sattr->address); | 1314 | return sprintf(buf, "0x%pK\n", (void *)sattr->address); |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | static void free_sect_attrs(struct module_sect_attrs *sect_attrs) | 1317 | static void free_sect_attrs(struct module_sect_attrs *sect_attrs) |
1318 | { | 1318 | { |
1319 | unsigned int section; | 1319 | unsigned int section; |
1320 | 1320 | ||
1321 | for (section = 0; section < sect_attrs->nsections; section++) | 1321 | for (section = 0; section < sect_attrs->nsections; section++) |
1322 | kfree(sect_attrs->attrs[section].name); | 1322 | kfree(sect_attrs->attrs[section].name); |
1323 | kfree(sect_attrs); | 1323 | kfree(sect_attrs); |
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | static void add_sect_attrs(struct module *mod, const struct load_info *info) | 1326 | static void add_sect_attrs(struct module *mod, const struct load_info *info) |
1327 | { | 1327 | { |
1328 | unsigned int nloaded = 0, i, size[2]; | 1328 | unsigned int nloaded = 0, i, size[2]; |
1329 | struct module_sect_attrs *sect_attrs; | 1329 | struct module_sect_attrs *sect_attrs; |
1330 | struct module_sect_attr *sattr; | 1330 | struct module_sect_attr *sattr; |
1331 | struct attribute **gattr; | 1331 | struct attribute **gattr; |
1332 | 1332 | ||
1333 | /* Count loaded sections and allocate structures */ | 1333 | /* Count loaded sections and allocate structures */ |
1334 | for (i = 0; i < info->hdr->e_shnum; i++) | 1334 | for (i = 0; i < info->hdr->e_shnum; i++) |
1335 | if (!sect_empty(&info->sechdrs[i])) | 1335 | if (!sect_empty(&info->sechdrs[i])) |
1336 | nloaded++; | 1336 | nloaded++; |
1337 | size[0] = ALIGN(sizeof(*sect_attrs) | 1337 | size[0] = ALIGN(sizeof(*sect_attrs) |
1338 | + nloaded * sizeof(sect_attrs->attrs[0]), | 1338 | + nloaded * sizeof(sect_attrs->attrs[0]), |
1339 | sizeof(sect_attrs->grp.attrs[0])); | 1339 | sizeof(sect_attrs->grp.attrs[0])); |
1340 | size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); | 1340 | size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); |
1341 | sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); | 1341 | sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); |
1342 | if (sect_attrs == NULL) | 1342 | if (sect_attrs == NULL) |
1343 | return; | 1343 | return; |
1344 | 1344 | ||
1345 | /* Setup section attributes. */ | 1345 | /* Setup section attributes. */ |
1346 | sect_attrs->grp.name = "sections"; | 1346 | sect_attrs->grp.name = "sections"; |
1347 | sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; | 1347 | sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; |
1348 | 1348 | ||
1349 | sect_attrs->nsections = 0; | 1349 | sect_attrs->nsections = 0; |
1350 | sattr = §_attrs->attrs[0]; | 1350 | sattr = §_attrs->attrs[0]; |
1351 | gattr = §_attrs->grp.attrs[0]; | 1351 | gattr = §_attrs->grp.attrs[0]; |
1352 | for (i = 0; i < info->hdr->e_shnum; i++) { | 1352 | for (i = 0; i < info->hdr->e_shnum; i++) { |
1353 | Elf_Shdr *sec = &info->sechdrs[i]; | 1353 | Elf_Shdr *sec = &info->sechdrs[i]; |
1354 | if (sect_empty(sec)) | 1354 | if (sect_empty(sec)) |
1355 | continue; | 1355 | continue; |
1356 | sattr->address = sec->sh_addr; | 1356 | sattr->address = sec->sh_addr; |
1357 | sattr->name = kstrdup(info->secstrings + sec->sh_name, | 1357 | sattr->name = kstrdup(info->secstrings + sec->sh_name, |
1358 | GFP_KERNEL); | 1358 | GFP_KERNEL); |
1359 | if (sattr->name == NULL) | 1359 | if (sattr->name == NULL) |
1360 | goto out; | 1360 | goto out; |
1361 | sect_attrs->nsections++; | 1361 | sect_attrs->nsections++; |
1362 | sysfs_attr_init(&sattr->mattr.attr); | 1362 | sysfs_attr_init(&sattr->mattr.attr); |
1363 | sattr->mattr.show = module_sect_show; | 1363 | sattr->mattr.show = module_sect_show; |
1364 | sattr->mattr.store = NULL; | 1364 | sattr->mattr.store = NULL; |
1365 | sattr->mattr.attr.name = sattr->name; | 1365 | sattr->mattr.attr.name = sattr->name; |
1366 | sattr->mattr.attr.mode = S_IRUGO; | 1366 | sattr->mattr.attr.mode = S_IRUGO; |
1367 | *(gattr++) = &(sattr++)->mattr.attr; | 1367 | *(gattr++) = &(sattr++)->mattr.attr; |
1368 | } | 1368 | } |
1369 | *gattr = NULL; | 1369 | *gattr = NULL; |
1370 | 1370 | ||
1371 | if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) | 1371 | if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) |
1372 | goto out; | 1372 | goto out; |
1373 | 1373 | ||
1374 | mod->sect_attrs = sect_attrs; | 1374 | mod->sect_attrs = sect_attrs; |
1375 | return; | 1375 | return; |
1376 | out: | 1376 | out: |
1377 | free_sect_attrs(sect_attrs); | 1377 | free_sect_attrs(sect_attrs); |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | static void remove_sect_attrs(struct module *mod) | 1380 | static void remove_sect_attrs(struct module *mod) |
1381 | { | 1381 | { |
1382 | if (mod->sect_attrs) { | 1382 | if (mod->sect_attrs) { |
1383 | sysfs_remove_group(&mod->mkobj.kobj, | 1383 | sysfs_remove_group(&mod->mkobj.kobj, |
1384 | &mod->sect_attrs->grp); | 1384 | &mod->sect_attrs->grp); |
1385 | /* We are positive that no one is using any sect attrs | 1385 | /* We are positive that no one is using any sect attrs |
1386 | * at this point. Deallocate immediately. */ | 1386 | * at this point. Deallocate immediately. */ |
1387 | free_sect_attrs(mod->sect_attrs); | 1387 | free_sect_attrs(mod->sect_attrs); |
1388 | mod->sect_attrs = NULL; | 1388 | mod->sect_attrs = NULL; |
1389 | } | 1389 | } |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | /* | 1392 | /* |
1393 | * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. | 1393 | * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. |
1394 | */ | 1394 | */ |
1395 | 1395 | ||
1396 | struct module_notes_attrs { | 1396 | struct module_notes_attrs { |
1397 | struct kobject *dir; | 1397 | struct kobject *dir; |
1398 | unsigned int notes; | 1398 | unsigned int notes; |
1399 | struct bin_attribute attrs[0]; | 1399 | struct bin_attribute attrs[0]; |
1400 | }; | 1400 | }; |
1401 | 1401 | ||
1402 | static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, | 1402 | static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, |
1403 | struct bin_attribute *bin_attr, | 1403 | struct bin_attribute *bin_attr, |
1404 | char *buf, loff_t pos, size_t count) | 1404 | char *buf, loff_t pos, size_t count) |
1405 | { | 1405 | { |
1406 | /* | 1406 | /* |
1407 | * The caller checked the pos and count against our size. | 1407 | * The caller checked the pos and count against our size. |
1408 | */ | 1408 | */ |
1409 | memcpy(buf, bin_attr->private + pos, count); | 1409 | memcpy(buf, bin_attr->private + pos, count); |
1410 | return count; | 1410 | return count; |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | static void free_notes_attrs(struct module_notes_attrs *notes_attrs, | 1413 | static void free_notes_attrs(struct module_notes_attrs *notes_attrs, |
1414 | unsigned int i) | 1414 | unsigned int i) |
1415 | { | 1415 | { |
1416 | if (notes_attrs->dir) { | 1416 | if (notes_attrs->dir) { |
1417 | while (i-- > 0) | 1417 | while (i-- > 0) |
1418 | sysfs_remove_bin_file(notes_attrs->dir, | 1418 | sysfs_remove_bin_file(notes_attrs->dir, |
1419 | ¬es_attrs->attrs[i]); | 1419 | ¬es_attrs->attrs[i]); |
1420 | kobject_put(notes_attrs->dir); | 1420 | kobject_put(notes_attrs->dir); |
1421 | } | 1421 | } |
1422 | kfree(notes_attrs); | 1422 | kfree(notes_attrs); |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | static void add_notes_attrs(struct module *mod, const struct load_info *info) | 1425 | static void add_notes_attrs(struct module *mod, const struct load_info *info) |
1426 | { | 1426 | { |
1427 | unsigned int notes, loaded, i; | 1427 | unsigned int notes, loaded, i; |
1428 | struct module_notes_attrs *notes_attrs; | 1428 | struct module_notes_attrs *notes_attrs; |
1429 | struct bin_attribute *nattr; | 1429 | struct bin_attribute *nattr; |
1430 | 1430 | ||
1431 | /* failed to create section attributes, so can't create notes */ | 1431 | /* failed to create section attributes, so can't create notes */ |
1432 | if (!mod->sect_attrs) | 1432 | if (!mod->sect_attrs) |
1433 | return; | 1433 | return; |
1434 | 1434 | ||
1435 | /* Count notes sections and allocate structures. */ | 1435 | /* Count notes sections and allocate structures. */ |
1436 | notes = 0; | 1436 | notes = 0; |
1437 | for (i = 0; i < info->hdr->e_shnum; i++) | 1437 | for (i = 0; i < info->hdr->e_shnum; i++) |
1438 | if (!sect_empty(&info->sechdrs[i]) && | 1438 | if (!sect_empty(&info->sechdrs[i]) && |
1439 | (info->sechdrs[i].sh_type == SHT_NOTE)) | 1439 | (info->sechdrs[i].sh_type == SHT_NOTE)) |
1440 | ++notes; | 1440 | ++notes; |
1441 | 1441 | ||
1442 | if (notes == 0) | 1442 | if (notes == 0) |
1443 | return; | 1443 | return; |
1444 | 1444 | ||
1445 | notes_attrs = kzalloc(sizeof(*notes_attrs) | 1445 | notes_attrs = kzalloc(sizeof(*notes_attrs) |
1446 | + notes * sizeof(notes_attrs->attrs[0]), | 1446 | + notes * sizeof(notes_attrs->attrs[0]), |
1447 | GFP_KERNEL); | 1447 | GFP_KERNEL); |
1448 | if (notes_attrs == NULL) | 1448 | if (notes_attrs == NULL) |
1449 | return; | 1449 | return; |
1450 | 1450 | ||
1451 | notes_attrs->notes = notes; | 1451 | notes_attrs->notes = notes; |
1452 | nattr = ¬es_attrs->attrs[0]; | 1452 | nattr = ¬es_attrs->attrs[0]; |
1453 | for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { | 1453 | for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { |
1454 | if (sect_empty(&info->sechdrs[i])) | 1454 | if (sect_empty(&info->sechdrs[i])) |
1455 | continue; | 1455 | continue; |
1456 | if (info->sechdrs[i].sh_type == SHT_NOTE) { | 1456 | if (info->sechdrs[i].sh_type == SHT_NOTE) { |
1457 | sysfs_bin_attr_init(nattr); | 1457 | sysfs_bin_attr_init(nattr); |
1458 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; | 1458 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; |
1459 | nattr->attr.mode = S_IRUGO; | 1459 | nattr->attr.mode = S_IRUGO; |
1460 | nattr->size = info->sechdrs[i].sh_size; | 1460 | nattr->size = info->sechdrs[i].sh_size; |
1461 | nattr->private = (void *) info->sechdrs[i].sh_addr; | 1461 | nattr->private = (void *) info->sechdrs[i].sh_addr; |
1462 | nattr->read = module_notes_read; | 1462 | nattr->read = module_notes_read; |
1463 | ++nattr; | 1463 | ++nattr; |
1464 | } | 1464 | } |
1465 | ++loaded; | 1465 | ++loaded; |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); | 1468 | notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); |
1469 | if (!notes_attrs->dir) | 1469 | if (!notes_attrs->dir) |
1470 | goto out; | 1470 | goto out; |
1471 | 1471 | ||
1472 | for (i = 0; i < notes; ++i) | 1472 | for (i = 0; i < notes; ++i) |
1473 | if (sysfs_create_bin_file(notes_attrs->dir, | 1473 | if (sysfs_create_bin_file(notes_attrs->dir, |
1474 | ¬es_attrs->attrs[i])) | 1474 | ¬es_attrs->attrs[i])) |
1475 | goto out; | 1475 | goto out; |
1476 | 1476 | ||
1477 | mod->notes_attrs = notes_attrs; | 1477 | mod->notes_attrs = notes_attrs; |
1478 | return; | 1478 | return; |
1479 | 1479 | ||
1480 | out: | 1480 | out: |
1481 | free_notes_attrs(notes_attrs, i); | 1481 | free_notes_attrs(notes_attrs, i); |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static void remove_notes_attrs(struct module *mod) | 1484 | static void remove_notes_attrs(struct module *mod) |
1485 | { | 1485 | { |
1486 | if (mod->notes_attrs) | 1486 | if (mod->notes_attrs) |
1487 | free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); | 1487 | free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); |
1488 | } | 1488 | } |
1489 | 1489 | ||
1490 | #else | 1490 | #else |
1491 | 1491 | ||
1492 | static inline void add_sect_attrs(struct module *mod, | 1492 | static inline void add_sect_attrs(struct module *mod, |
1493 | const struct load_info *info) | 1493 | const struct load_info *info) |
1494 | { | 1494 | { |
1495 | } | 1495 | } |
1496 | 1496 | ||
1497 | static inline void remove_sect_attrs(struct module *mod) | 1497 | static inline void remove_sect_attrs(struct module *mod) |
1498 | { | 1498 | { |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | static inline void add_notes_attrs(struct module *mod, | 1501 | static inline void add_notes_attrs(struct module *mod, |
1502 | const struct load_info *info) | 1502 | const struct load_info *info) |
1503 | { | 1503 | { |
1504 | } | 1504 | } |
1505 | 1505 | ||
1506 | static inline void remove_notes_attrs(struct module *mod) | 1506 | static inline void remove_notes_attrs(struct module *mod) |
1507 | { | 1507 | { |
1508 | } | 1508 | } |
1509 | #endif /* CONFIG_KALLSYMS */ | 1509 | #endif /* CONFIG_KALLSYMS */ |
1510 | 1510 | ||
1511 | static void add_usage_links(struct module *mod) | 1511 | static void add_usage_links(struct module *mod) |
1512 | { | 1512 | { |
1513 | #ifdef CONFIG_MODULE_UNLOAD | 1513 | #ifdef CONFIG_MODULE_UNLOAD |
1514 | struct module_use *use; | 1514 | struct module_use *use; |
1515 | int nowarn; | 1515 | int nowarn; |
1516 | 1516 | ||
1517 | mutex_lock(&module_mutex); | 1517 | mutex_lock(&module_mutex); |
1518 | list_for_each_entry(use, &mod->target_list, target_list) { | 1518 | list_for_each_entry(use, &mod->target_list, target_list) { |
1519 | nowarn = sysfs_create_link(use->target->holders_dir, | 1519 | nowarn = sysfs_create_link(use->target->holders_dir, |
1520 | &mod->mkobj.kobj, mod->name); | 1520 | &mod->mkobj.kobj, mod->name); |
1521 | } | 1521 | } |
1522 | mutex_unlock(&module_mutex); | 1522 | mutex_unlock(&module_mutex); |
1523 | #endif | 1523 | #endif |
1524 | } | 1524 | } |
1525 | 1525 | ||
1526 | static void del_usage_links(struct module *mod) | 1526 | static void del_usage_links(struct module *mod) |
1527 | { | 1527 | { |
1528 | #ifdef CONFIG_MODULE_UNLOAD | 1528 | #ifdef CONFIG_MODULE_UNLOAD |
1529 | struct module_use *use; | 1529 | struct module_use *use; |
1530 | 1530 | ||
1531 | mutex_lock(&module_mutex); | 1531 | mutex_lock(&module_mutex); |
1532 | list_for_each_entry(use, &mod->target_list, target_list) | 1532 | list_for_each_entry(use, &mod->target_list, target_list) |
1533 | sysfs_remove_link(use->target->holders_dir, mod->name); | 1533 | sysfs_remove_link(use->target->holders_dir, mod->name); |
1534 | mutex_unlock(&module_mutex); | 1534 | mutex_unlock(&module_mutex); |
1535 | #endif | 1535 | #endif |
1536 | } | 1536 | } |
1537 | 1537 | ||
1538 | static int module_add_modinfo_attrs(struct module *mod) | 1538 | static int module_add_modinfo_attrs(struct module *mod) |
1539 | { | 1539 | { |
1540 | struct module_attribute *attr; | 1540 | struct module_attribute *attr; |
1541 | struct module_attribute *temp_attr; | 1541 | struct module_attribute *temp_attr; |
1542 | int error = 0; | 1542 | int error = 0; |
1543 | int i; | 1543 | int i; |
1544 | 1544 | ||
1545 | mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * | 1545 | mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * |
1546 | (ARRAY_SIZE(modinfo_attrs) + 1)), | 1546 | (ARRAY_SIZE(modinfo_attrs) + 1)), |
1547 | GFP_KERNEL); | 1547 | GFP_KERNEL); |
1548 | if (!mod->modinfo_attrs) | 1548 | if (!mod->modinfo_attrs) |
1549 | return -ENOMEM; | 1549 | return -ENOMEM; |
1550 | 1550 | ||
1551 | temp_attr = mod->modinfo_attrs; | 1551 | temp_attr = mod->modinfo_attrs; |
1552 | for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { | 1552 | for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { |
1553 | if (!attr->test || | 1553 | if (!attr->test || |
1554 | (attr->test && attr->test(mod))) { | 1554 | (attr->test && attr->test(mod))) { |
1555 | memcpy(temp_attr, attr, sizeof(*temp_attr)); | 1555 | memcpy(temp_attr, attr, sizeof(*temp_attr)); |
1556 | sysfs_attr_init(&temp_attr->attr); | 1556 | sysfs_attr_init(&temp_attr->attr); |
1557 | error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); | 1557 | error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); |
1558 | ++temp_attr; | 1558 | ++temp_attr; |
1559 | } | 1559 | } |
1560 | } | 1560 | } |
1561 | return error; | 1561 | return error; |
1562 | } | 1562 | } |
1563 | 1563 | ||
1564 | static void module_remove_modinfo_attrs(struct module *mod) | 1564 | static void module_remove_modinfo_attrs(struct module *mod) |
1565 | { | 1565 | { |
1566 | struct module_attribute *attr; | 1566 | struct module_attribute *attr; |
1567 | int i; | 1567 | int i; |
1568 | 1568 | ||
1569 | for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { | 1569 | for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { |
1570 | /* pick a field to test for end of list */ | 1570 | /* pick a field to test for end of list */ |
1571 | if (!attr->attr.name) | 1571 | if (!attr->attr.name) |
1572 | break; | 1572 | break; |
1573 | sysfs_remove_file(&mod->mkobj.kobj,&attr->attr); | 1573 | sysfs_remove_file(&mod->mkobj.kobj,&attr->attr); |
1574 | if (attr->free) | 1574 | if (attr->free) |
1575 | attr->free(mod); | 1575 | attr->free(mod); |
1576 | } | 1576 | } |
1577 | kfree(mod->modinfo_attrs); | 1577 | kfree(mod->modinfo_attrs); |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static void mod_kobject_put(struct module *mod) | 1580 | static void mod_kobject_put(struct module *mod) |
1581 | { | 1581 | { |
1582 | DECLARE_COMPLETION_ONSTACK(c); | 1582 | DECLARE_COMPLETION_ONSTACK(c); |
1583 | mod->mkobj.kobj_completion = &c; | 1583 | mod->mkobj.kobj_completion = &c; |
1584 | kobject_put(&mod->mkobj.kobj); | 1584 | kobject_put(&mod->mkobj.kobj); |
1585 | wait_for_completion(&c); | 1585 | wait_for_completion(&c); |
1586 | } | 1586 | } |
1587 | 1587 | ||
1588 | static int mod_sysfs_init(struct module *mod) | 1588 | static int mod_sysfs_init(struct module *mod) |
1589 | { | 1589 | { |
1590 | int err; | 1590 | int err; |
1591 | struct kobject *kobj; | 1591 | struct kobject *kobj; |
1592 | 1592 | ||
1593 | if (!module_sysfs_initialized) { | 1593 | if (!module_sysfs_initialized) { |
1594 | pr_err("%s: module sysfs not initialized\n", mod->name); | 1594 | pr_err("%s: module sysfs not initialized\n", mod->name); |
1595 | err = -EINVAL; | 1595 | err = -EINVAL; |
1596 | goto out; | 1596 | goto out; |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | kobj = kset_find_obj(module_kset, mod->name); | 1599 | kobj = kset_find_obj(module_kset, mod->name); |
1600 | if (kobj) { | 1600 | if (kobj) { |
1601 | pr_err("%s: module is already loaded\n", mod->name); | 1601 | pr_err("%s: module is already loaded\n", mod->name); |
1602 | kobject_put(kobj); | 1602 | kobject_put(kobj); |
1603 | err = -EINVAL; | 1603 | err = -EINVAL; |
1604 | goto out; | 1604 | goto out; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | mod->mkobj.mod = mod; | 1607 | mod->mkobj.mod = mod; |
1608 | 1608 | ||
1609 | memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); | 1609 | memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); |
1610 | mod->mkobj.kobj.kset = module_kset; | 1610 | mod->mkobj.kobj.kset = module_kset; |
1611 | err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, | 1611 | err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, |
1612 | "%s", mod->name); | 1612 | "%s", mod->name); |
1613 | if (err) | 1613 | if (err) |
1614 | mod_kobject_put(mod); | 1614 | mod_kobject_put(mod); |
1615 | 1615 | ||
1616 | /* delay uevent until full sysfs population */ | 1616 | /* delay uevent until full sysfs population */ |
1617 | out: | 1617 | out: |
1618 | return err; | 1618 | return err; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | static int mod_sysfs_setup(struct module *mod, | 1621 | static int mod_sysfs_setup(struct module *mod, |
1622 | const struct load_info *info, | 1622 | const struct load_info *info, |
1623 | struct kernel_param *kparam, | 1623 | struct kernel_param *kparam, |
1624 | unsigned int num_params) | 1624 | unsigned int num_params) |
1625 | { | 1625 | { |
1626 | int err; | 1626 | int err; |
1627 | 1627 | ||
1628 | err = mod_sysfs_init(mod); | 1628 | err = mod_sysfs_init(mod); |
1629 | if (err) | 1629 | if (err) |
1630 | goto out; | 1630 | goto out; |
1631 | 1631 | ||
1632 | mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); | 1632 | mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); |
1633 | if (!mod->holders_dir) { | 1633 | if (!mod->holders_dir) { |
1634 | err = -ENOMEM; | 1634 | err = -ENOMEM; |
1635 | goto out_unreg; | 1635 | goto out_unreg; |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | err = module_param_sysfs_setup(mod, kparam, num_params); | 1638 | err = module_param_sysfs_setup(mod, kparam, num_params); |
1639 | if (err) | 1639 | if (err) |
1640 | goto out_unreg_holders; | 1640 | goto out_unreg_holders; |
1641 | 1641 | ||
1642 | err = module_add_modinfo_attrs(mod); | 1642 | err = module_add_modinfo_attrs(mod); |
1643 | if (err) | 1643 | if (err) |
1644 | goto out_unreg_param; | 1644 | goto out_unreg_param; |
1645 | 1645 | ||
1646 | add_usage_links(mod); | 1646 | add_usage_links(mod); |
1647 | add_sect_attrs(mod, info); | 1647 | add_sect_attrs(mod, info); |
1648 | add_notes_attrs(mod, info); | 1648 | add_notes_attrs(mod, info); |
1649 | 1649 | ||
1650 | kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); | 1650 | kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); |
1651 | return 0; | 1651 | return 0; |
1652 | 1652 | ||
1653 | out_unreg_param: | 1653 | out_unreg_param: |
1654 | module_param_sysfs_remove(mod); | 1654 | module_param_sysfs_remove(mod); |
1655 | out_unreg_holders: | 1655 | out_unreg_holders: |
1656 | kobject_put(mod->holders_dir); | 1656 | kobject_put(mod->holders_dir); |
1657 | out_unreg: | 1657 | out_unreg: |
1658 | mod_kobject_put(mod); | 1658 | mod_kobject_put(mod); |
1659 | out: | 1659 | out: |
1660 | return err; | 1660 | return err; |
1661 | } | 1661 | } |
1662 | 1662 | ||
1663 | static void mod_sysfs_fini(struct module *mod) | 1663 | static void mod_sysfs_fini(struct module *mod) |
1664 | { | 1664 | { |
1665 | remove_notes_attrs(mod); | 1665 | remove_notes_attrs(mod); |
1666 | remove_sect_attrs(mod); | 1666 | remove_sect_attrs(mod); |
1667 | mod_kobject_put(mod); | 1667 | mod_kobject_put(mod); |
1668 | } | 1668 | } |
1669 | 1669 | ||
1670 | #else /* !CONFIG_SYSFS */ | 1670 | #else /* !CONFIG_SYSFS */ |
1671 | 1671 | ||
1672 | static int mod_sysfs_setup(struct module *mod, | 1672 | static int mod_sysfs_setup(struct module *mod, |
1673 | const struct load_info *info, | 1673 | const struct load_info *info, |
1674 | struct kernel_param *kparam, | 1674 | struct kernel_param *kparam, |
1675 | unsigned int num_params) | 1675 | unsigned int num_params) |
1676 | { | 1676 | { |
1677 | return 0; | 1677 | return 0; |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | static void mod_sysfs_fini(struct module *mod) | 1680 | static void mod_sysfs_fini(struct module *mod) |
1681 | { | 1681 | { |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | static void module_remove_modinfo_attrs(struct module *mod) | 1684 | static void module_remove_modinfo_attrs(struct module *mod) |
1685 | { | 1685 | { |
1686 | } | 1686 | } |
1687 | 1687 | ||
1688 | static void del_usage_links(struct module *mod) | 1688 | static void del_usage_links(struct module *mod) |
1689 | { | 1689 | { |
1690 | } | 1690 | } |
1691 | 1691 | ||
1692 | #endif /* CONFIG_SYSFS */ | 1692 | #endif /* CONFIG_SYSFS */ |
1693 | 1693 | ||
1694 | static void mod_sysfs_teardown(struct module *mod) | 1694 | static void mod_sysfs_teardown(struct module *mod) |
1695 | { | 1695 | { |
1696 | del_usage_links(mod); | 1696 | del_usage_links(mod); |
1697 | module_remove_modinfo_attrs(mod); | 1697 | module_remove_modinfo_attrs(mod); |
1698 | module_param_sysfs_remove(mod); | 1698 | module_param_sysfs_remove(mod); |
1699 | kobject_put(mod->mkobj.drivers_dir); | 1699 | kobject_put(mod->mkobj.drivers_dir); |
1700 | kobject_put(mod->holders_dir); | 1700 | kobject_put(mod->holders_dir); |
1701 | mod_sysfs_fini(mod); | 1701 | mod_sysfs_fini(mod); |
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | /* | 1704 | /* |
1705 | * unlink the module with the whole machine is stopped with interrupts off | 1705 | * unlink the module with the whole machine is stopped with interrupts off |
1706 | * - this defends against kallsyms not taking locks | 1706 | * - this defends against kallsyms not taking locks |
1707 | */ | 1707 | */ |
1708 | static int __unlink_module(void *_mod) | 1708 | static int __unlink_module(void *_mod) |
1709 | { | 1709 | { |
1710 | struct module *mod = _mod; | 1710 | struct module *mod = _mod; |
1711 | list_del(&mod->list); | 1711 | list_del(&mod->list); |
1712 | module_bug_cleanup(mod); | 1712 | module_bug_cleanup(mod); |
1713 | return 0; | 1713 | return 0; |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX | 1716 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX |
1717 | /* | 1717 | /* |
1718 | * LKM RO/NX protection: protect module's text/ro-data | 1718 | * LKM RO/NX protection: protect module's text/ro-data |
1719 | * from modification and any data from execution. | 1719 | * from modification and any data from execution. |
1720 | */ | 1720 | */ |
1721 | void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) | 1721 | void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) |
1722 | { | 1722 | { |
1723 | unsigned long begin_pfn = PFN_DOWN((unsigned long)start); | 1723 | unsigned long begin_pfn = PFN_DOWN((unsigned long)start); |
1724 | unsigned long end_pfn = PFN_DOWN((unsigned long)end); | 1724 | unsigned long end_pfn = PFN_DOWN((unsigned long)end); |
1725 | 1725 | ||
1726 | if (end_pfn > begin_pfn) | 1726 | if (end_pfn > begin_pfn) |
1727 | set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); | 1727 | set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); |
1728 | } | 1728 | } |
1729 | 1729 | ||
1730 | static void set_section_ro_nx(void *base, | 1730 | static void set_section_ro_nx(void *base, |
1731 | unsigned long text_size, | 1731 | unsigned long text_size, |
1732 | unsigned long ro_size, | 1732 | unsigned long ro_size, |
1733 | unsigned long total_size) | 1733 | unsigned long total_size) |
1734 | { | 1734 | { |
1735 | /* begin and end PFNs of the current subsection */ | 1735 | /* begin and end PFNs of the current subsection */ |
1736 | unsigned long begin_pfn; | 1736 | unsigned long begin_pfn; |
1737 | unsigned long end_pfn; | 1737 | unsigned long end_pfn; |
1738 | 1738 | ||
1739 | /* | 1739 | /* |
1740 | * Set RO for module text and RO-data: | 1740 | * Set RO for module text and RO-data: |
1741 | * - Always protect first page. | 1741 | * - Always protect first page. |
1742 | * - Do not protect last partial page. | 1742 | * - Do not protect last partial page. |
1743 | */ | 1743 | */ |
1744 | if (ro_size > 0) | 1744 | if (ro_size > 0) |
1745 | set_page_attributes(base, base + ro_size, set_memory_ro); | 1745 | set_page_attributes(base, base + ro_size, set_memory_ro); |
1746 | 1746 | ||
1747 | /* | 1747 | /* |
1748 | * Set NX permissions for module data: | 1748 | * Set NX permissions for module data: |
1749 | * - Do not protect first partial page. | 1749 | * - Do not protect first partial page. |
1750 | * - Always protect last page. | 1750 | * - Always protect last page. |
1751 | */ | 1751 | */ |
1752 | if (total_size > text_size) { | 1752 | if (total_size > text_size) { |
1753 | begin_pfn = PFN_UP((unsigned long)base + text_size); | 1753 | begin_pfn = PFN_UP((unsigned long)base + text_size); |
1754 | end_pfn = PFN_UP((unsigned long)base + total_size); | 1754 | end_pfn = PFN_UP((unsigned long)base + total_size); |
1755 | if (end_pfn > begin_pfn) | 1755 | if (end_pfn > begin_pfn) |
1756 | set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); | 1756 | set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); |
1757 | } | 1757 | } |
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | static void unset_module_core_ro_nx(struct module *mod) | 1760 | static void unset_module_core_ro_nx(struct module *mod) |
1761 | { | 1761 | { |
1762 | set_page_attributes(mod->module_core + mod->core_text_size, | 1762 | set_page_attributes(mod->module_core + mod->core_text_size, |
1763 | mod->module_core + mod->core_size, | 1763 | mod->module_core + mod->core_size, |
1764 | set_memory_x); | 1764 | set_memory_x); |
1765 | set_page_attributes(mod->module_core, | 1765 | set_page_attributes(mod->module_core, |
1766 | mod->module_core + mod->core_ro_size, | 1766 | mod->module_core + mod->core_ro_size, |
1767 | set_memory_rw); | 1767 | set_memory_rw); |
1768 | } | 1768 | } |
1769 | 1769 | ||
1770 | static void unset_module_init_ro_nx(struct module *mod) | 1770 | static void unset_module_init_ro_nx(struct module *mod) |
1771 | { | 1771 | { |
1772 | set_page_attributes(mod->module_init + mod->init_text_size, | 1772 | set_page_attributes(mod->module_init + mod->init_text_size, |
1773 | mod->module_init + mod->init_size, | 1773 | mod->module_init + mod->init_size, |
1774 | set_memory_x); | 1774 | set_memory_x); |
1775 | set_page_attributes(mod->module_init, | 1775 | set_page_attributes(mod->module_init, |
1776 | mod->module_init + mod->init_ro_size, | 1776 | mod->module_init + mod->init_ro_size, |
1777 | set_memory_rw); | 1777 | set_memory_rw); |
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | /* Iterate through all modules and set each module's text as RW */ | 1780 | /* Iterate through all modules and set each module's text as RW */ |
1781 | void set_all_modules_text_rw(void) | 1781 | void set_all_modules_text_rw(void) |
1782 | { | 1782 | { |
1783 | struct module *mod; | 1783 | struct module *mod; |
1784 | 1784 | ||
1785 | mutex_lock(&module_mutex); | 1785 | mutex_lock(&module_mutex); |
1786 | list_for_each_entry_rcu(mod, &modules, list) { | 1786 | list_for_each_entry_rcu(mod, &modules, list) { |
1787 | if (mod->state == MODULE_STATE_UNFORMED) | 1787 | if (mod->state == MODULE_STATE_UNFORMED) |
1788 | continue; | 1788 | continue; |
1789 | if ((mod->module_core) && (mod->core_text_size)) { | 1789 | if ((mod->module_core) && (mod->core_text_size)) { |
1790 | set_page_attributes(mod->module_core, | 1790 | set_page_attributes(mod->module_core, |
1791 | mod->module_core + mod->core_text_size, | 1791 | mod->module_core + mod->core_text_size, |
1792 | set_memory_rw); | 1792 | set_memory_rw); |
1793 | } | 1793 | } |
1794 | if ((mod->module_init) && (mod->init_text_size)) { | 1794 | if ((mod->module_init) && (mod->init_text_size)) { |
1795 | set_page_attributes(mod->module_init, | 1795 | set_page_attributes(mod->module_init, |
1796 | mod->module_init + mod->init_text_size, | 1796 | mod->module_init + mod->init_text_size, |
1797 | set_memory_rw); | 1797 | set_memory_rw); |
1798 | } | 1798 | } |
1799 | } | 1799 | } |
1800 | mutex_unlock(&module_mutex); | 1800 | mutex_unlock(&module_mutex); |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | /* Iterate through all modules and set each module's text as RO */ | 1803 | /* Iterate through all modules and set each module's text as RO */ |
1804 | void set_all_modules_text_ro(void) | 1804 | void set_all_modules_text_ro(void) |
1805 | { | 1805 | { |
1806 | struct module *mod; | 1806 | struct module *mod; |
1807 | 1807 | ||
1808 | mutex_lock(&module_mutex); | 1808 | mutex_lock(&module_mutex); |
1809 | list_for_each_entry_rcu(mod, &modules, list) { | 1809 | list_for_each_entry_rcu(mod, &modules, list) { |
1810 | if (mod->state == MODULE_STATE_UNFORMED) | 1810 | if (mod->state == MODULE_STATE_UNFORMED) |
1811 | continue; | 1811 | continue; |
1812 | if ((mod->module_core) && (mod->core_text_size)) { | 1812 | if ((mod->module_core) && (mod->core_text_size)) { |
1813 | set_page_attributes(mod->module_core, | 1813 | set_page_attributes(mod->module_core, |
1814 | mod->module_core + mod->core_text_size, | 1814 | mod->module_core + mod->core_text_size, |
1815 | set_memory_ro); | 1815 | set_memory_ro); |
1816 | } | 1816 | } |
1817 | if ((mod->module_init) && (mod->init_text_size)) { | 1817 | if ((mod->module_init) && (mod->init_text_size)) { |
1818 | set_page_attributes(mod->module_init, | 1818 | set_page_attributes(mod->module_init, |
1819 | mod->module_init + mod->init_text_size, | 1819 | mod->module_init + mod->init_text_size, |
1820 | set_memory_ro); | 1820 | set_memory_ro); |
1821 | } | 1821 | } |
1822 | } | 1822 | } |
1823 | mutex_unlock(&module_mutex); | 1823 | mutex_unlock(&module_mutex); |
1824 | } | 1824 | } |
1825 | #else | 1825 | #else |
1826 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } | 1826 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } |
1827 | static void unset_module_core_ro_nx(struct module *mod) { } | 1827 | static void unset_module_core_ro_nx(struct module *mod) { } |
1828 | static void unset_module_init_ro_nx(struct module *mod) { } | 1828 | static void unset_module_init_ro_nx(struct module *mod) { } |
1829 | #endif | 1829 | #endif |
1830 | 1830 | ||
1831 | void __weak module_free(struct module *mod, void *module_region) | 1831 | void __weak module_free(struct module *mod, void *module_region) |
1832 | { | 1832 | { |
1833 | vfree(module_region); | 1833 | vfree(module_region); |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | void __weak module_arch_cleanup(struct module *mod) | 1836 | void __weak module_arch_cleanup(struct module *mod) |
1837 | { | 1837 | { |
1838 | } | 1838 | } |
1839 | 1839 | ||
1840 | /* Free a module, remove from lists, etc. */ | 1840 | /* Free a module, remove from lists, etc. */ |
1841 | static void free_module(struct module *mod) | 1841 | static void free_module(struct module *mod) |
1842 | { | 1842 | { |
1843 | trace_module_free(mod); | 1843 | trace_module_free(mod); |
1844 | 1844 | ||
1845 | mod_sysfs_teardown(mod); | 1845 | mod_sysfs_teardown(mod); |
1846 | 1846 | ||
1847 | /* We leave it in list to prevent duplicate loads, but make sure | 1847 | /* We leave it in list to prevent duplicate loads, but make sure |
1848 | * that noone uses it while it's being deconstructed. */ | 1848 | * that noone uses it while it's being deconstructed. */ |
1849 | mod->state = MODULE_STATE_UNFORMED; | 1849 | mod->state = MODULE_STATE_UNFORMED; |
1850 | 1850 | ||
1851 | /* Remove dynamic debug info */ | 1851 | /* Remove dynamic debug info */ |
1852 | ddebug_remove_module(mod->name); | 1852 | ddebug_remove_module(mod->name); |
1853 | 1853 | ||
1854 | /* Arch-specific cleanup. */ | 1854 | /* Arch-specific cleanup. */ |
1855 | module_arch_cleanup(mod); | 1855 | module_arch_cleanup(mod); |
1856 | 1856 | ||
1857 | /* Module unload stuff */ | 1857 | /* Module unload stuff */ |
1858 | module_unload_free(mod); | 1858 | module_unload_free(mod); |
1859 | 1859 | ||
1860 | /* Free any allocated parameters. */ | 1860 | /* Free any allocated parameters. */ |
1861 | destroy_params(mod->kp, mod->num_kp); | 1861 | destroy_params(mod->kp, mod->num_kp); |
1862 | 1862 | ||
1863 | /* Now we can delete it from the lists */ | 1863 | /* Now we can delete it from the lists */ |
1864 | mutex_lock(&module_mutex); | 1864 | mutex_lock(&module_mutex); |
1865 | stop_machine(__unlink_module, mod, NULL); | 1865 | stop_machine(__unlink_module, mod, NULL); |
1866 | mutex_unlock(&module_mutex); | 1866 | mutex_unlock(&module_mutex); |
1867 | 1867 | ||
1868 | /* This may be NULL, but that's OK */ | 1868 | /* This may be NULL, but that's OK */ |
1869 | unset_module_init_ro_nx(mod); | 1869 | unset_module_init_ro_nx(mod); |
1870 | module_free(mod, mod->module_init); | 1870 | module_free(mod, mod->module_init); |
1871 | kfree(mod->args); | 1871 | kfree(mod->args); |
1872 | percpu_modfree(mod); | 1872 | percpu_modfree(mod); |
1873 | 1873 | ||
1874 | /* Free lock-classes: */ | 1874 | /* Free lock-classes: */ |
1875 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1875 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1876 | 1876 | ||
1877 | /* Finally, free the core (containing the module structure) */ | 1877 | /* Finally, free the core (containing the module structure) */ |
1878 | unset_module_core_ro_nx(mod); | 1878 | unset_module_core_ro_nx(mod); |
1879 | module_free(mod, mod->module_core); | 1879 | module_free(mod, mod->module_core); |
1880 | 1880 | ||
1881 | #ifdef CONFIG_MPU | 1881 | #ifdef CONFIG_MPU |
1882 | update_protections(current->mm); | 1882 | update_protections(current->mm); |
1883 | #endif | 1883 | #endif |
1884 | } | 1884 | } |
1885 | 1885 | ||
1886 | void *__symbol_get(const char *symbol) | 1886 | void *__symbol_get(const char *symbol) |
1887 | { | 1887 | { |
1888 | struct module *owner; | 1888 | struct module *owner; |
1889 | const struct kernel_symbol *sym; | 1889 | const struct kernel_symbol *sym; |
1890 | 1890 | ||
1891 | preempt_disable(); | 1891 | preempt_disable(); |
1892 | sym = find_symbol(symbol, &owner, NULL, true, true); | 1892 | sym = find_symbol(symbol, &owner, NULL, true, true); |
1893 | if (sym && strong_try_module_get(owner)) | 1893 | if (sym && strong_try_module_get(owner)) |
1894 | sym = NULL; | 1894 | sym = NULL; |
1895 | preempt_enable(); | 1895 | preempt_enable(); |
1896 | 1896 | ||
1897 | return sym ? (void *)sym->value : NULL; | 1897 | return sym ? (void *)sym->value : NULL; |
1898 | } | 1898 | } |
1899 | EXPORT_SYMBOL_GPL(__symbol_get); | 1899 | EXPORT_SYMBOL_GPL(__symbol_get); |
1900 | 1900 | ||
1901 | /* | 1901 | /* |
1902 | * Ensure that an exported symbol [global namespace] does not already exist | 1902 | * Ensure that an exported symbol [global namespace] does not already exist |
1903 | * in the kernel or in some other module's exported symbol table. | 1903 | * in the kernel or in some other module's exported symbol table. |
1904 | * | 1904 | * |
1905 | * You must hold the module_mutex. | 1905 | * You must hold the module_mutex. |
1906 | */ | 1906 | */ |
1907 | static int verify_export_symbols(struct module *mod) | 1907 | static int verify_export_symbols(struct module *mod) |
1908 | { | 1908 | { |
1909 | unsigned int i; | 1909 | unsigned int i; |
1910 | struct module *owner; | 1910 | struct module *owner; |
1911 | const struct kernel_symbol *s; | 1911 | const struct kernel_symbol *s; |
1912 | struct { | 1912 | struct { |
1913 | const struct kernel_symbol *sym; | 1913 | const struct kernel_symbol *sym; |
1914 | unsigned int num; | 1914 | unsigned int num; |
1915 | } arr[] = { | 1915 | } arr[] = { |
1916 | { mod->syms, mod->num_syms }, | 1916 | { mod->syms, mod->num_syms }, |
1917 | { mod->gpl_syms, mod->num_gpl_syms }, | 1917 | { mod->gpl_syms, mod->num_gpl_syms }, |
1918 | { mod->gpl_future_syms, mod->num_gpl_future_syms }, | 1918 | { mod->gpl_future_syms, mod->num_gpl_future_syms }, |
1919 | #ifdef CONFIG_UNUSED_SYMBOLS | 1919 | #ifdef CONFIG_UNUSED_SYMBOLS |
1920 | { mod->unused_syms, mod->num_unused_syms }, | 1920 | { mod->unused_syms, mod->num_unused_syms }, |
1921 | { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, | 1921 | { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, |
1922 | #endif | 1922 | #endif |
1923 | }; | 1923 | }; |
1924 | 1924 | ||
1925 | for (i = 0; i < ARRAY_SIZE(arr); i++) { | 1925 | for (i = 0; i < ARRAY_SIZE(arr); i++) { |
1926 | for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { | 1926 | for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { |
1927 | if (find_symbol(s->name, &owner, NULL, true, false)) { | 1927 | if (find_symbol(s->name, &owner, NULL, true, false)) { |
1928 | pr_err("%s: exports duplicate symbol %s" | 1928 | pr_err("%s: exports duplicate symbol %s" |
1929 | " (owned by %s)\n", | 1929 | " (owned by %s)\n", |
1930 | mod->name, s->name, module_name(owner)); | 1930 | mod->name, s->name, module_name(owner)); |
1931 | return -ENOEXEC; | 1931 | return -ENOEXEC; |
1932 | } | 1932 | } |
1933 | } | 1933 | } |
1934 | } | 1934 | } |
1935 | return 0; | 1935 | return 0; |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | /* Change all symbols so that st_value encodes the pointer directly. */ | 1938 | /* Change all symbols so that st_value encodes the pointer directly. */ |
1939 | static int simplify_symbols(struct module *mod, const struct load_info *info) | 1939 | static int simplify_symbols(struct module *mod, const struct load_info *info) |
1940 | { | 1940 | { |
1941 | Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; | 1941 | Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; |
1942 | Elf_Sym *sym = (void *)symsec->sh_addr; | 1942 | Elf_Sym *sym = (void *)symsec->sh_addr; |
1943 | unsigned long secbase; | 1943 | unsigned long secbase; |
1944 | unsigned int i; | 1944 | unsigned int i; |
1945 | int ret = 0; | 1945 | int ret = 0; |
1946 | const struct kernel_symbol *ksym; | 1946 | const struct kernel_symbol *ksym; |
1947 | 1947 | ||
1948 | for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { | 1948 | for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { |
1949 | const char *name = info->strtab + sym[i].st_name; | 1949 | const char *name = info->strtab + sym[i].st_name; |
1950 | 1950 | ||
1951 | switch (sym[i].st_shndx) { | 1951 | switch (sym[i].st_shndx) { |
1952 | case SHN_COMMON: | 1952 | case SHN_COMMON: |
1953 | /* Ignore common symbols */ | 1953 | /* Ignore common symbols */ |
1954 | if (!strncmp(name, "__gnu_lto", 9)) | 1954 | if (!strncmp(name, "__gnu_lto", 9)) |
1955 | break; | 1955 | break; |
1956 | 1956 | ||
1957 | /* We compiled with -fno-common. These are not | 1957 | /* We compiled with -fno-common. These are not |
1958 | supposed to happen. */ | 1958 | supposed to happen. */ |
1959 | pr_debug("Common symbol: %s\n", name); | 1959 | pr_debug("Common symbol: %s\n", name); |
1960 | printk("%s: please compile with -fno-common\n", | 1960 | printk("%s: please compile with -fno-common\n", |
1961 | mod->name); | 1961 | mod->name); |
1962 | ret = -ENOEXEC; | 1962 | ret = -ENOEXEC; |
1963 | break; | 1963 | break; |
1964 | 1964 | ||
1965 | case SHN_ABS: | 1965 | case SHN_ABS: |
1966 | /* Don't need to do anything */ | 1966 | /* Don't need to do anything */ |
1967 | pr_debug("Absolute symbol: 0x%08lx\n", | 1967 | pr_debug("Absolute symbol: 0x%08lx\n", |
1968 | (long)sym[i].st_value); | 1968 | (long)sym[i].st_value); |
1969 | break; | 1969 | break; |
1970 | 1970 | ||
1971 | case SHN_UNDEF: | 1971 | case SHN_UNDEF: |
1972 | ksym = resolve_symbol_wait(mod, info, name); | 1972 | ksym = resolve_symbol_wait(mod, info, name); |
1973 | /* Ok if resolved. */ | 1973 | /* Ok if resolved. */ |
1974 | if (ksym && !IS_ERR(ksym)) { | 1974 | if (ksym && !IS_ERR(ksym)) { |
1975 | sym[i].st_value = ksym->value; | 1975 | sym[i].st_value = ksym->value; |
1976 | break; | 1976 | break; |
1977 | } | 1977 | } |
1978 | 1978 | ||
1979 | /* Ok if weak. */ | 1979 | /* Ok if weak. */ |
1980 | if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) | 1980 | if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) |
1981 | break; | 1981 | break; |
1982 | 1982 | ||
1983 | pr_warn("%s: Unknown symbol %s (err %li)\n", | 1983 | pr_warn("%s: Unknown symbol %s (err %li)\n", |
1984 | mod->name, name, PTR_ERR(ksym)); | 1984 | mod->name, name, PTR_ERR(ksym)); |
1985 | ret = PTR_ERR(ksym) ?: -ENOENT; | 1985 | ret = PTR_ERR(ksym) ?: -ENOENT; |
1986 | break; | 1986 | break; |
1987 | 1987 | ||
1988 | default: | 1988 | default: |
1989 | /* Divert to percpu allocation if a percpu var. */ | 1989 | /* Divert to percpu allocation if a percpu var. */ |
1990 | if (sym[i].st_shndx == info->index.pcpu) | 1990 | if (sym[i].st_shndx == info->index.pcpu) |
1991 | secbase = (unsigned long)mod_percpu(mod); | 1991 | secbase = (unsigned long)mod_percpu(mod); |
1992 | else | 1992 | else |
1993 | secbase = info->sechdrs[sym[i].st_shndx].sh_addr; | 1993 | secbase = info->sechdrs[sym[i].st_shndx].sh_addr; |
1994 | sym[i].st_value += secbase; | 1994 | sym[i].st_value += secbase; |
1995 | break; | 1995 | break; |
1996 | } | 1996 | } |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | return ret; | 1999 | return ret; |
2000 | } | 2000 | } |
2001 | 2001 | ||
2002 | static int apply_relocations(struct module *mod, const struct load_info *info) | 2002 | static int apply_relocations(struct module *mod, const struct load_info *info) |
2003 | { | 2003 | { |
2004 | unsigned int i; | 2004 | unsigned int i; |
2005 | int err = 0; | 2005 | int err = 0; |
2006 | 2006 | ||
2007 | /* Now do relocations. */ | 2007 | /* Now do relocations. */ |
2008 | for (i = 1; i < info->hdr->e_shnum; i++) { | 2008 | for (i = 1; i < info->hdr->e_shnum; i++) { |
2009 | unsigned int infosec = info->sechdrs[i].sh_info; | 2009 | unsigned int infosec = info->sechdrs[i].sh_info; |
2010 | 2010 | ||
2011 | /* Not a valid relocation section? */ | 2011 | /* Not a valid relocation section? */ |
2012 | if (infosec >= info->hdr->e_shnum) | 2012 | if (infosec >= info->hdr->e_shnum) |
2013 | continue; | 2013 | continue; |
2014 | 2014 | ||
2015 | /* Don't bother with non-allocated sections */ | 2015 | /* Don't bother with non-allocated sections */ |
2016 | if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) | 2016 | if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) |
2017 | continue; | 2017 | continue; |
2018 | 2018 | ||
2019 | if (info->sechdrs[i].sh_type == SHT_REL) | 2019 | if (info->sechdrs[i].sh_type == SHT_REL) |
2020 | err = apply_relocate(info->sechdrs, info->strtab, | 2020 | err = apply_relocate(info->sechdrs, info->strtab, |
2021 | info->index.sym, i, mod); | 2021 | info->index.sym, i, mod); |
2022 | else if (info->sechdrs[i].sh_type == SHT_RELA) | 2022 | else if (info->sechdrs[i].sh_type == SHT_RELA) |
2023 | err = apply_relocate_add(info->sechdrs, info->strtab, | 2023 | err = apply_relocate_add(info->sechdrs, info->strtab, |
2024 | info->index.sym, i, mod); | 2024 | info->index.sym, i, mod); |
2025 | if (err < 0) | 2025 | if (err < 0) |
2026 | break; | 2026 | break; |
2027 | } | 2027 | } |
2028 | return err; | 2028 | return err; |
2029 | } | 2029 | } |
2030 | 2030 | ||
2031 | /* Additional bytes needed by arch in front of individual sections */ | 2031 | /* Additional bytes needed by arch in front of individual sections */ |
2032 | unsigned int __weak arch_mod_section_prepend(struct module *mod, | 2032 | unsigned int __weak arch_mod_section_prepend(struct module *mod, |
2033 | unsigned int section) | 2033 | unsigned int section) |
2034 | { | 2034 | { |
2035 | /* default implementation just returns zero */ | 2035 | /* default implementation just returns zero */ |
2036 | return 0; | 2036 | return 0; |
2037 | } | 2037 | } |
2038 | 2038 | ||
2039 | /* Update size with this section: return offset. */ | 2039 | /* Update size with this section: return offset. */ |
2040 | static long get_offset(struct module *mod, unsigned int *size, | 2040 | static long get_offset(struct module *mod, unsigned int *size, |
2041 | Elf_Shdr *sechdr, unsigned int section) | 2041 | Elf_Shdr *sechdr, unsigned int section) |
2042 | { | 2042 | { |
2043 | long ret; | 2043 | long ret; |
2044 | 2044 | ||
2045 | *size += arch_mod_section_prepend(mod, section); | 2045 | *size += arch_mod_section_prepend(mod, section); |
2046 | ret = ALIGN(*size, sechdr->sh_addralign ?: 1); | 2046 | ret = ALIGN(*size, sechdr->sh_addralign ?: 1); |
2047 | *size = ret + sechdr->sh_size; | 2047 | *size = ret + sechdr->sh_size; |
2048 | return ret; | 2048 | return ret; |
2049 | } | 2049 | } |
2050 | 2050 | ||
2051 | /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld | 2051 | /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld |
2052 | might -- code, read-only data, read-write data, small data. Tally | 2052 | might -- code, read-only data, read-write data, small data. Tally |
2053 | sizes, and place the offsets into sh_entsize fields: high bit means it | 2053 | sizes, and place the offsets into sh_entsize fields: high bit means it |
2054 | belongs in init. */ | 2054 | belongs in init. */ |
2055 | static void layout_sections(struct module *mod, struct load_info *info) | 2055 | static void layout_sections(struct module *mod, struct load_info *info) |
2056 | { | 2056 | { |
2057 | static unsigned long const masks[][2] = { | 2057 | static unsigned long const masks[][2] = { |
2058 | /* NOTE: all executable code must be the first section | 2058 | /* NOTE: all executable code must be the first section |
2059 | * in this array; otherwise modify the text_size | 2059 | * in this array; otherwise modify the text_size |
2060 | * finder in the two loops below */ | 2060 | * finder in the two loops below */ |
2061 | { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, | 2061 | { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, |
2062 | { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, | 2062 | { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, |
2063 | { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, | 2063 | { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, |
2064 | { ARCH_SHF_SMALL | SHF_ALLOC, 0 } | 2064 | { ARCH_SHF_SMALL | SHF_ALLOC, 0 } |
2065 | }; | 2065 | }; |
2066 | unsigned int m, i; | 2066 | unsigned int m, i; |
2067 | 2067 | ||
2068 | for (i = 0; i < info->hdr->e_shnum; i++) | 2068 | for (i = 0; i < info->hdr->e_shnum; i++) |
2069 | info->sechdrs[i].sh_entsize = ~0UL; | 2069 | info->sechdrs[i].sh_entsize = ~0UL; |
2070 | 2070 | ||
2071 | pr_debug("Core section allocation order:\n"); | 2071 | pr_debug("Core section allocation order:\n"); |
2072 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { | 2072 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { |
2073 | for (i = 0; i < info->hdr->e_shnum; ++i) { | 2073 | for (i = 0; i < info->hdr->e_shnum; ++i) { |
2074 | Elf_Shdr *s = &info->sechdrs[i]; | 2074 | Elf_Shdr *s = &info->sechdrs[i]; |
2075 | const char *sname = info->secstrings + s->sh_name; | 2075 | const char *sname = info->secstrings + s->sh_name; |
2076 | 2076 | ||
2077 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | 2077 | if ((s->sh_flags & masks[m][0]) != masks[m][0] |
2078 | || (s->sh_flags & masks[m][1]) | 2078 | || (s->sh_flags & masks[m][1]) |
2079 | || s->sh_entsize != ~0UL | 2079 | || s->sh_entsize != ~0UL |
2080 | || strstarts(sname, ".init")) | 2080 | || strstarts(sname, ".init")) |
2081 | continue; | 2081 | continue; |
2082 | s->sh_entsize = get_offset(mod, &mod->core_size, s, i); | 2082 | s->sh_entsize = get_offset(mod, &mod->core_size, s, i); |
2083 | pr_debug("\t%s\n", sname); | 2083 | pr_debug("\t%s\n", sname); |
2084 | } | 2084 | } |
2085 | switch (m) { | 2085 | switch (m) { |
2086 | case 0: /* executable */ | 2086 | case 0: /* executable */ |
2087 | mod->core_size = debug_align(mod->core_size); | 2087 | mod->core_size = debug_align(mod->core_size); |
2088 | mod->core_text_size = mod->core_size; | 2088 | mod->core_text_size = mod->core_size; |
2089 | break; | 2089 | break; |
2090 | case 1: /* RO: text and ro-data */ | 2090 | case 1: /* RO: text and ro-data */ |
2091 | mod->core_size = debug_align(mod->core_size); | 2091 | mod->core_size = debug_align(mod->core_size); |
2092 | mod->core_ro_size = mod->core_size; | 2092 | mod->core_ro_size = mod->core_size; |
2093 | break; | 2093 | break; |
2094 | case 3: /* whole core */ | 2094 | case 3: /* whole core */ |
2095 | mod->core_size = debug_align(mod->core_size); | 2095 | mod->core_size = debug_align(mod->core_size); |
2096 | break; | 2096 | break; |
2097 | } | 2097 | } |
2098 | } | 2098 | } |
2099 | 2099 | ||
2100 | pr_debug("Init section allocation order:\n"); | 2100 | pr_debug("Init section allocation order:\n"); |
2101 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { | 2101 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { |
2102 | for (i = 0; i < info->hdr->e_shnum; ++i) { | 2102 | for (i = 0; i < info->hdr->e_shnum; ++i) { |
2103 | Elf_Shdr *s = &info->sechdrs[i]; | 2103 | Elf_Shdr *s = &info->sechdrs[i]; |
2104 | const char *sname = info->secstrings + s->sh_name; | 2104 | const char *sname = info->secstrings + s->sh_name; |
2105 | 2105 | ||
2106 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | 2106 | if ((s->sh_flags & masks[m][0]) != masks[m][0] |
2107 | || (s->sh_flags & masks[m][1]) | 2107 | || (s->sh_flags & masks[m][1]) |
2108 | || s->sh_entsize != ~0UL | 2108 | || s->sh_entsize != ~0UL |
2109 | || !strstarts(sname, ".init")) | 2109 | || !strstarts(sname, ".init")) |
2110 | continue; | 2110 | continue; |
2111 | s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) | 2111 | s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) |
2112 | | INIT_OFFSET_MASK); | 2112 | | INIT_OFFSET_MASK); |
2113 | pr_debug("\t%s\n", sname); | 2113 | pr_debug("\t%s\n", sname); |
2114 | } | 2114 | } |
2115 | switch (m) { | 2115 | switch (m) { |
2116 | case 0: /* executable */ | 2116 | case 0: /* executable */ |
2117 | mod->init_size = debug_align(mod->init_size); | 2117 | mod->init_size = debug_align(mod->init_size); |
2118 | mod->init_text_size = mod->init_size; | 2118 | mod->init_text_size = mod->init_size; |
2119 | break; | 2119 | break; |
2120 | case 1: /* RO: text and ro-data */ | 2120 | case 1: /* RO: text and ro-data */ |
2121 | mod->init_size = debug_align(mod->init_size); | 2121 | mod->init_size = debug_align(mod->init_size); |
2122 | mod->init_ro_size = mod->init_size; | 2122 | mod->init_ro_size = mod->init_size; |
2123 | break; | 2123 | break; |
2124 | case 3: /* whole init */ | 2124 | case 3: /* whole init */ |
2125 | mod->init_size = debug_align(mod->init_size); | 2125 | mod->init_size = debug_align(mod->init_size); |
2126 | break; | 2126 | break; |
2127 | } | 2127 | } |
2128 | } | 2128 | } |
2129 | } | 2129 | } |
2130 | 2130 | ||
2131 | static void set_license(struct module *mod, const char *license) | 2131 | static void set_license(struct module *mod, const char *license) |
2132 | { | 2132 | { |
2133 | if (!license) | 2133 | if (!license) |
2134 | license = "unspecified"; | 2134 | license = "unspecified"; |
2135 | 2135 | ||
2136 | if (!license_is_gpl_compatible(license)) { | 2136 | if (!license_is_gpl_compatible(license)) { |
2137 | if (!test_taint(TAINT_PROPRIETARY_MODULE)) | 2137 | if (!test_taint(TAINT_PROPRIETARY_MODULE)) |
2138 | pr_warn("%s: module license '%s' taints kernel.\n", | 2138 | pr_warn("%s: module license '%s' taints kernel.\n", |
2139 | mod->name, license); | 2139 | mod->name, license); |
2140 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, | 2140 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, |
2141 | LOCKDEP_NOW_UNRELIABLE); | 2141 | LOCKDEP_NOW_UNRELIABLE); |
2142 | } | 2142 | } |
2143 | } | 2143 | } |
2144 | 2144 | ||
2145 | /* Parse tag=value strings from .modinfo section */ | 2145 | /* Parse tag=value strings from .modinfo section */ |
2146 | static char *next_string(char *string, unsigned long *secsize) | 2146 | static char *next_string(char *string, unsigned long *secsize) |
2147 | { | 2147 | { |
2148 | /* Skip non-zero chars */ | 2148 | /* Skip non-zero chars */ |
2149 | while (string[0]) { | 2149 | while (string[0]) { |
2150 | string++; | 2150 | string++; |
2151 | if ((*secsize)-- <= 1) | 2151 | if ((*secsize)-- <= 1) |
2152 | return NULL; | 2152 | return NULL; |
2153 | } | 2153 | } |
2154 | 2154 | ||
2155 | /* Skip any zero padding. */ | 2155 | /* Skip any zero padding. */ |
2156 | while (!string[0]) { | 2156 | while (!string[0]) { |
2157 | string++; | 2157 | string++; |
2158 | if ((*secsize)-- <= 1) | 2158 | if ((*secsize)-- <= 1) |
2159 | return NULL; | 2159 | return NULL; |
2160 | } | 2160 | } |
2161 | return string; | 2161 | return string; |
2162 | } | 2162 | } |
2163 | 2163 | ||
2164 | static char *get_modinfo(struct load_info *info, const char *tag) | 2164 | static char *get_modinfo(struct load_info *info, const char *tag) |
2165 | { | 2165 | { |
2166 | char *p; | 2166 | char *p; |
2167 | unsigned int taglen = strlen(tag); | 2167 | unsigned int taglen = strlen(tag); |
2168 | Elf_Shdr *infosec = &info->sechdrs[info->index.info]; | 2168 | Elf_Shdr *infosec = &info->sechdrs[info->index.info]; |
2169 | unsigned long size = infosec->sh_size; | 2169 | unsigned long size = infosec->sh_size; |
2170 | 2170 | ||
2171 | for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) { | 2171 | for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) { |
2172 | if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') | 2172 | if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') |
2173 | return p + taglen + 1; | 2173 | return p + taglen + 1; |
2174 | } | 2174 | } |
2175 | return NULL; | 2175 | return NULL; |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | static void setup_modinfo(struct module *mod, struct load_info *info) | 2178 | static void setup_modinfo(struct module *mod, struct load_info *info) |
2179 | { | 2179 | { |
2180 | struct module_attribute *attr; | 2180 | struct module_attribute *attr; |
2181 | int i; | 2181 | int i; |
2182 | 2182 | ||
2183 | for (i = 0; (attr = modinfo_attrs[i]); i++) { | 2183 | for (i = 0; (attr = modinfo_attrs[i]); i++) { |
2184 | if (attr->setup) | 2184 | if (attr->setup) |
2185 | attr->setup(mod, get_modinfo(info, attr->attr.name)); | 2185 | attr->setup(mod, get_modinfo(info, attr->attr.name)); |
2186 | } | 2186 | } |
2187 | } | 2187 | } |
2188 | 2188 | ||
2189 | static void free_modinfo(struct module *mod) | 2189 | static void free_modinfo(struct module *mod) |
2190 | { | 2190 | { |
2191 | struct module_attribute *attr; | 2191 | struct module_attribute *attr; |
2192 | int i; | 2192 | int i; |
2193 | 2193 | ||
2194 | for (i = 0; (attr = modinfo_attrs[i]); i++) { | 2194 | for (i = 0; (attr = modinfo_attrs[i]); i++) { |
2195 | if (attr->free) | 2195 | if (attr->free) |
2196 | attr->free(mod); | 2196 | attr->free(mod); |
2197 | } | 2197 | } |
2198 | } | 2198 | } |
2199 | 2199 | ||
2200 | #ifdef CONFIG_KALLSYMS | 2200 | #ifdef CONFIG_KALLSYMS |
2201 | 2201 | ||
2202 | /* lookup symbol in given range of kernel_symbols */ | 2202 | /* lookup symbol in given range of kernel_symbols */ |
2203 | static const struct kernel_symbol *lookup_symbol(const char *name, | 2203 | static const struct kernel_symbol *lookup_symbol(const char *name, |
2204 | const struct kernel_symbol *start, | 2204 | const struct kernel_symbol *start, |
2205 | const struct kernel_symbol *stop) | 2205 | const struct kernel_symbol *stop) |
2206 | { | 2206 | { |
2207 | return bsearch(name, start, stop - start, | 2207 | return bsearch(name, start, stop - start, |
2208 | sizeof(struct kernel_symbol), cmp_name); | 2208 | sizeof(struct kernel_symbol), cmp_name); |
2209 | } | 2209 | } |
2210 | 2210 | ||
2211 | static int is_exported(const char *name, unsigned long value, | 2211 | static int is_exported(const char *name, unsigned long value, |
2212 | const struct module *mod) | 2212 | const struct module *mod) |
2213 | { | 2213 | { |
2214 | const struct kernel_symbol *ks; | 2214 | const struct kernel_symbol *ks; |
2215 | if (!mod) | 2215 | if (!mod) |
2216 | ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); | 2216 | ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); |
2217 | else | 2217 | else |
2218 | ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); | 2218 | ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); |
2219 | return ks != NULL && ks->value == value; | 2219 | return ks != NULL && ks->value == value; |
2220 | } | 2220 | } |
2221 | 2221 | ||
2222 | /* As per nm */ | 2222 | /* As per nm */ |
2223 | static char elf_type(const Elf_Sym *sym, const struct load_info *info) | 2223 | static char elf_type(const Elf_Sym *sym, const struct load_info *info) |
2224 | { | 2224 | { |
2225 | const Elf_Shdr *sechdrs = info->sechdrs; | 2225 | const Elf_Shdr *sechdrs = info->sechdrs; |
2226 | 2226 | ||
2227 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { | 2227 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { |
2228 | if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) | 2228 | if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) |
2229 | return 'v'; | 2229 | return 'v'; |
2230 | else | 2230 | else |
2231 | return 'w'; | 2231 | return 'w'; |
2232 | } | 2232 | } |
2233 | if (sym->st_shndx == SHN_UNDEF) | 2233 | if (sym->st_shndx == SHN_UNDEF) |
2234 | return 'U'; | 2234 | return 'U'; |
2235 | if (sym->st_shndx == SHN_ABS) | 2235 | if (sym->st_shndx == SHN_ABS) |
2236 | return 'a'; | 2236 | return 'a'; |
2237 | if (sym->st_shndx >= SHN_LORESERVE) | 2237 | if (sym->st_shndx >= SHN_LORESERVE) |
2238 | return '?'; | 2238 | return '?'; |
2239 | if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) | 2239 | if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) |
2240 | return 't'; | 2240 | return 't'; |
2241 | if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC | 2241 | if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC |
2242 | && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { | 2242 | && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { |
2243 | if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) | 2243 | if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) |
2244 | return 'r'; | 2244 | return 'r'; |
2245 | else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) | 2245 | else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) |
2246 | return 'g'; | 2246 | return 'g'; |
2247 | else | 2247 | else |
2248 | return 'd'; | 2248 | return 'd'; |
2249 | } | 2249 | } |
2250 | if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { | 2250 | if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { |
2251 | if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) | 2251 | if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) |
2252 | return 's'; | 2252 | return 's'; |
2253 | else | 2253 | else |
2254 | return 'b'; | 2254 | return 'b'; |
2255 | } | 2255 | } |
2256 | if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, | 2256 | if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, |
2257 | ".debug")) { | 2257 | ".debug")) { |
2258 | return 'n'; | 2258 | return 'n'; |
2259 | } | 2259 | } |
2260 | return '?'; | 2260 | return '?'; |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | 2263 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, |
2264 | unsigned int shnum) | 2264 | unsigned int shnum) |
2265 | { | 2265 | { |
2266 | const Elf_Shdr *sec; | 2266 | const Elf_Shdr *sec; |
2267 | 2267 | ||
2268 | if (src->st_shndx == SHN_UNDEF | 2268 | if (src->st_shndx == SHN_UNDEF |
2269 | || src->st_shndx >= shnum | 2269 | || src->st_shndx >= shnum |
2270 | || !src->st_name) | 2270 | || !src->st_name) |
2271 | return false; | 2271 | return false; |
2272 | 2272 | ||
2273 | sec = sechdrs + src->st_shndx; | 2273 | sec = sechdrs + src->st_shndx; |
2274 | if (!(sec->sh_flags & SHF_ALLOC) | 2274 | if (!(sec->sh_flags & SHF_ALLOC) |
2275 | #ifndef CONFIG_KALLSYMS_ALL | 2275 | #ifndef CONFIG_KALLSYMS_ALL |
2276 | || !(sec->sh_flags & SHF_EXECINSTR) | 2276 | || !(sec->sh_flags & SHF_EXECINSTR) |
2277 | #endif | 2277 | #endif |
2278 | || (sec->sh_entsize & INIT_OFFSET_MASK)) | 2278 | || (sec->sh_entsize & INIT_OFFSET_MASK)) |
2279 | return false; | 2279 | return false; |
2280 | 2280 | ||
2281 | return true; | 2281 | return true; |
2282 | } | 2282 | } |
2283 | 2283 | ||
2284 | /* | 2284 | /* |
2285 | * We only allocate and copy the strings needed by the parts of symtab | 2285 | * We only allocate and copy the strings needed by the parts of symtab |
2286 | * we keep. This is simple, but has the effect of making multiple | 2286 | * we keep. This is simple, but has the effect of making multiple |
2287 | * copies of duplicates. We could be more sophisticated, see | 2287 | * copies of duplicates. We could be more sophisticated, see |
2288 | * linux-kernel thread starting with | 2288 | * linux-kernel thread starting with |
2289 | * <73defb5e4bca04a6431392cc341112b1@localhost>. | 2289 | * <73defb5e4bca04a6431392cc341112b1@localhost>. |
2290 | */ | 2290 | */ |
2291 | static void layout_symtab(struct module *mod, struct load_info *info) | 2291 | static void layout_symtab(struct module *mod, struct load_info *info) |
2292 | { | 2292 | { |
2293 | Elf_Shdr *symsect = info->sechdrs + info->index.sym; | 2293 | Elf_Shdr *symsect = info->sechdrs + info->index.sym; |
2294 | Elf_Shdr *strsect = info->sechdrs + info->index.str; | 2294 | Elf_Shdr *strsect = info->sechdrs + info->index.str; |
2295 | const Elf_Sym *src; | 2295 | const Elf_Sym *src; |
2296 | unsigned int i, nsrc, ndst, strtab_size = 0; | 2296 | unsigned int i, nsrc, ndst, strtab_size = 0; |
2297 | 2297 | ||
2298 | /* Put symbol section at end of init part of module. */ | 2298 | /* Put symbol section at end of init part of module. */ |
2299 | symsect->sh_flags |= SHF_ALLOC; | 2299 | symsect->sh_flags |= SHF_ALLOC; |
2300 | symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, | 2300 | symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, |
2301 | info->index.sym) | INIT_OFFSET_MASK; | 2301 | info->index.sym) | INIT_OFFSET_MASK; |
2302 | pr_debug("\t%s\n", info->secstrings + symsect->sh_name); | 2302 | pr_debug("\t%s\n", info->secstrings + symsect->sh_name); |
2303 | 2303 | ||
2304 | src = (void *)info->hdr + symsect->sh_offset; | 2304 | src = (void *)info->hdr + symsect->sh_offset; |
2305 | nsrc = symsect->sh_size / sizeof(*src); | 2305 | nsrc = symsect->sh_size / sizeof(*src); |
2306 | 2306 | ||
2307 | /* Compute total space required for the core symbols' strtab. */ | 2307 | /* Compute total space required for the core symbols' strtab. */ |
2308 | for (ndst = i = 0; i < nsrc; i++) { | 2308 | for (ndst = i = 0; i < nsrc; i++) { |
2309 | if (i == 0 || | 2309 | if (i == 0 || |
2310 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2310 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { |
2311 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; | 2311 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; |
2312 | ndst++; | 2312 | ndst++; |
2313 | } | 2313 | } |
2314 | } | 2314 | } |
2315 | 2315 | ||
2316 | /* Append room for core symbols at end of core part. */ | 2316 | /* Append room for core symbols at end of core part. */ |
2317 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | 2317 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); |
2318 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); | 2318 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); |
2319 | mod->core_size += strtab_size; | 2319 | mod->core_size += strtab_size; |
2320 | 2320 | ||
2321 | /* Put string table section at end of init part of module. */ | 2321 | /* Put string table section at end of init part of module. */ |
2322 | strsect->sh_flags |= SHF_ALLOC; | 2322 | strsect->sh_flags |= SHF_ALLOC; |
2323 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | 2323 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, |
2324 | info->index.str) | INIT_OFFSET_MASK; | 2324 | info->index.str) | INIT_OFFSET_MASK; |
2325 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); | 2325 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); |
2326 | } | 2326 | } |
2327 | 2327 | ||
2328 | static void add_kallsyms(struct module *mod, const struct load_info *info) | 2328 | static void add_kallsyms(struct module *mod, const struct load_info *info) |
2329 | { | 2329 | { |
2330 | unsigned int i, ndst; | 2330 | unsigned int i, ndst; |
2331 | const Elf_Sym *src; | 2331 | const Elf_Sym *src; |
2332 | Elf_Sym *dst; | 2332 | Elf_Sym *dst; |
2333 | char *s; | 2333 | char *s; |
2334 | Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; | 2334 | Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; |
2335 | 2335 | ||
2336 | mod->symtab = (void *)symsec->sh_addr; | 2336 | mod->symtab = (void *)symsec->sh_addr; |
2337 | mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); | 2337 | mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); |
2338 | /* Make sure we get permanent strtab: don't use info->strtab. */ | 2338 | /* Make sure we get permanent strtab: don't use info->strtab. */ |
2339 | mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; | 2339 | mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; |
2340 | 2340 | ||
2341 | /* Set types up while we still have access to sections. */ | 2341 | /* Set types up while we still have access to sections. */ |
2342 | for (i = 0; i < mod->num_symtab; i++) | 2342 | for (i = 0; i < mod->num_symtab; i++) |
2343 | mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); | 2343 | mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); |
2344 | 2344 | ||
2345 | mod->core_symtab = dst = mod->module_core + info->symoffs; | 2345 | mod->core_symtab = dst = mod->module_core + info->symoffs; |
2346 | mod->core_strtab = s = mod->module_core + info->stroffs; | 2346 | mod->core_strtab = s = mod->module_core + info->stroffs; |
2347 | src = mod->symtab; | 2347 | src = mod->symtab; |
2348 | for (ndst = i = 0; i < mod->num_symtab; i++) { | 2348 | for (ndst = i = 0; i < mod->num_symtab; i++) { |
2349 | if (i == 0 || | 2349 | if (i == 0 || |
2350 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2350 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { |
2351 | dst[ndst] = src[i]; | 2351 | dst[ndst] = src[i]; |
2352 | dst[ndst++].st_name = s - mod->core_strtab; | 2352 | dst[ndst++].st_name = s - mod->core_strtab; |
2353 | s += strlcpy(s, &mod->strtab[src[i].st_name], | 2353 | s += strlcpy(s, &mod->strtab[src[i].st_name], |
2354 | KSYM_NAME_LEN) + 1; | 2354 | KSYM_NAME_LEN) + 1; |
2355 | } | 2355 | } |
2356 | } | 2356 | } |
2357 | mod->core_num_syms = ndst; | 2357 | mod->core_num_syms = ndst; |
2358 | } | 2358 | } |
2359 | #else | 2359 | #else |
2360 | static inline void layout_symtab(struct module *mod, struct load_info *info) | 2360 | static inline void layout_symtab(struct module *mod, struct load_info *info) |
2361 | { | 2361 | { |
2362 | } | 2362 | } |
2363 | 2363 | ||
2364 | static void add_kallsyms(struct module *mod, const struct load_info *info) | 2364 | static void add_kallsyms(struct module *mod, const struct load_info *info) |
2365 | { | 2365 | { |
2366 | } | 2366 | } |
2367 | #endif /* CONFIG_KALLSYMS */ | 2367 | #endif /* CONFIG_KALLSYMS */ |
2368 | 2368 | ||
2369 | static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) | 2369 | static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) |
2370 | { | 2370 | { |
2371 | if (!debug) | 2371 | if (!debug) |
2372 | return; | 2372 | return; |
2373 | #ifdef CONFIG_DYNAMIC_DEBUG | 2373 | #ifdef CONFIG_DYNAMIC_DEBUG |
2374 | if (ddebug_add_module(debug, num, debug->modname)) | 2374 | if (ddebug_add_module(debug, num, debug->modname)) |
2375 | pr_err("dynamic debug error adding module: %s\n", | 2375 | pr_err("dynamic debug error adding module: %s\n", |
2376 | debug->modname); | 2376 | debug->modname); |
2377 | #endif | 2377 | #endif |
2378 | } | 2378 | } |
2379 | 2379 | ||
2380 | static void dynamic_debug_remove(struct _ddebug *debug) | 2380 | static void dynamic_debug_remove(struct _ddebug *debug) |
2381 | { | 2381 | { |
2382 | if (debug) | 2382 | if (debug) |
2383 | ddebug_remove_module(debug->modname); | 2383 | ddebug_remove_module(debug->modname); |
2384 | } | 2384 | } |
2385 | 2385 | ||
2386 | void * __weak module_alloc(unsigned long size) | 2386 | void * __weak module_alloc(unsigned long size) |
2387 | { | 2387 | { |
2388 | return vmalloc_exec(size); | 2388 | return vmalloc_exec(size); |
2389 | } | 2389 | } |
2390 | 2390 | ||
2391 | static void *module_alloc_update_bounds(unsigned long size) | 2391 | static void *module_alloc_update_bounds(unsigned long size) |
2392 | { | 2392 | { |
2393 | void *ret = module_alloc(size); | 2393 | void *ret = module_alloc(size); |
2394 | 2394 | ||
2395 | if (ret) { | 2395 | if (ret) { |
2396 | mutex_lock(&module_mutex); | 2396 | mutex_lock(&module_mutex); |
2397 | /* Update module bounds. */ | 2397 | /* Update module bounds. */ |
2398 | if ((unsigned long)ret < module_addr_min) | 2398 | if ((unsigned long)ret < module_addr_min) |
2399 | module_addr_min = (unsigned long)ret; | 2399 | module_addr_min = (unsigned long)ret; |
2400 | if ((unsigned long)ret + size > module_addr_max) | 2400 | if ((unsigned long)ret + size > module_addr_max) |
2401 | module_addr_max = (unsigned long)ret + size; | 2401 | module_addr_max = (unsigned long)ret + size; |
2402 | mutex_unlock(&module_mutex); | 2402 | mutex_unlock(&module_mutex); |
2403 | } | 2403 | } |
2404 | return ret; | 2404 | return ret; |
2405 | } | 2405 | } |
2406 | 2406 | ||
2407 | #ifdef CONFIG_DEBUG_KMEMLEAK | 2407 | #ifdef CONFIG_DEBUG_KMEMLEAK |
2408 | static void kmemleak_load_module(const struct module *mod, | 2408 | static void kmemleak_load_module(const struct module *mod, |
2409 | const struct load_info *info) | 2409 | const struct load_info *info) |
2410 | { | 2410 | { |
2411 | unsigned int i; | 2411 | unsigned int i; |
2412 | 2412 | ||
2413 | /* only scan the sections containing data */ | 2413 | /* only scan the sections containing data */ |
2414 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); | 2414 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); |
2415 | 2415 | ||
2416 | for (i = 1; i < info->hdr->e_shnum; i++) { | 2416 | for (i = 1; i < info->hdr->e_shnum; i++) { |
2417 | /* Scan all writable sections that's not executable */ | 2417 | /* Scan all writable sections that's not executable */ |
2418 | if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || | 2418 | if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || |
2419 | !(info->sechdrs[i].sh_flags & SHF_WRITE) || | 2419 | !(info->sechdrs[i].sh_flags & SHF_WRITE) || |
2420 | (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) | 2420 | (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) |
2421 | continue; | 2421 | continue; |
2422 | 2422 | ||
2423 | kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, | 2423 | kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, |
2424 | info->sechdrs[i].sh_size, GFP_KERNEL); | 2424 | info->sechdrs[i].sh_size, GFP_KERNEL); |
2425 | } | 2425 | } |
2426 | } | 2426 | } |
2427 | #else | 2427 | #else |
2428 | static inline void kmemleak_load_module(const struct module *mod, | 2428 | static inline void kmemleak_load_module(const struct module *mod, |
2429 | const struct load_info *info) | 2429 | const struct load_info *info) |
2430 | { | 2430 | { |
2431 | } | 2431 | } |
2432 | #endif | 2432 | #endif |
2433 | 2433 | ||
2434 | #ifdef CONFIG_MODULE_SIG | 2434 | #ifdef CONFIG_MODULE_SIG |
2435 | static int module_sig_check(struct load_info *info) | 2435 | static int module_sig_check(struct load_info *info) |
2436 | { | 2436 | { |
2437 | int err = -ENOKEY; | 2437 | int err = -ENOKEY; |
2438 | const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; | 2438 | const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; |
2439 | const void *mod = info->hdr; | 2439 | const void *mod = info->hdr; |
2440 | 2440 | ||
2441 | if (info->len > markerlen && | 2441 | if (info->len > markerlen && |
2442 | memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { | 2442 | memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { |
2443 | /* We truncate the module to discard the signature */ | 2443 | /* We truncate the module to discard the signature */ |
2444 | info->len -= markerlen; | 2444 | info->len -= markerlen; |
2445 | err = mod_verify_sig(mod, &info->len); | 2445 | err = mod_verify_sig(mod, &info->len); |
2446 | } | 2446 | } |
2447 | 2447 | ||
2448 | if (!err) { | 2448 | if (!err) { |
2449 | info->sig_ok = true; | 2449 | info->sig_ok = true; |
2450 | return 0; | 2450 | return 0; |
2451 | } | 2451 | } |
2452 | 2452 | ||
2453 | /* Not having a signature is only an error if we're strict. */ | 2453 | /* Not having a signature is only an error if we're strict. */ |
2454 | if (err < 0 && fips_enabled) | 2454 | if (err < 0 && fips_enabled) |
2455 | panic("Module verification failed with error %d in FIPS mode\n", | 2455 | panic("Module verification failed with error %d in FIPS mode\n", |
2456 | err); | 2456 | err); |
2457 | if (err == -ENOKEY && !sig_enforce) | 2457 | if (err == -ENOKEY && !sig_enforce) |
2458 | err = 0; | 2458 | err = 0; |
2459 | 2459 | ||
2460 | return err; | 2460 | return err; |
2461 | } | 2461 | } |
2462 | #else /* !CONFIG_MODULE_SIG */ | 2462 | #else /* !CONFIG_MODULE_SIG */ |
2463 | static int module_sig_check(struct load_info *info) | 2463 | static int module_sig_check(struct load_info *info) |
2464 | { | 2464 | { |
2465 | return 0; | 2465 | return 0; |
2466 | } | 2466 | } |
2467 | #endif /* !CONFIG_MODULE_SIG */ | 2467 | #endif /* !CONFIG_MODULE_SIG */ |
2468 | 2468 | ||
2469 | /* Sanity checks against invalid binaries, wrong arch, weird elf version. */ | 2469 | /* Sanity checks against invalid binaries, wrong arch, weird elf version. */ |
2470 | static int elf_header_check(struct load_info *info) | 2470 | static int elf_header_check(struct load_info *info) |
2471 | { | 2471 | { |
2472 | if (info->len < sizeof(*(info->hdr))) | 2472 | if (info->len < sizeof(*(info->hdr))) |
2473 | return -ENOEXEC; | 2473 | return -ENOEXEC; |
2474 | 2474 | ||
2475 | if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 | 2475 | if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 |
2476 | || info->hdr->e_type != ET_REL | 2476 | || info->hdr->e_type != ET_REL |
2477 | || !elf_check_arch(info->hdr) | 2477 | || !elf_check_arch(info->hdr) |
2478 | || info->hdr->e_shentsize != sizeof(Elf_Shdr)) | 2478 | || info->hdr->e_shentsize != sizeof(Elf_Shdr)) |
2479 | return -ENOEXEC; | 2479 | return -ENOEXEC; |
2480 | 2480 | ||
2481 | if (info->hdr->e_shoff >= info->len | 2481 | if (info->hdr->e_shoff >= info->len |
2482 | || (info->hdr->e_shnum * sizeof(Elf_Shdr) > | 2482 | || (info->hdr->e_shnum * sizeof(Elf_Shdr) > |
2483 | info->len - info->hdr->e_shoff)) | 2483 | info->len - info->hdr->e_shoff)) |
2484 | return -ENOEXEC; | 2484 | return -ENOEXEC; |
2485 | 2485 | ||
2486 | return 0; | 2486 | return 0; |
2487 | } | 2487 | } |
2488 | 2488 | ||
2489 | /* Sets info->hdr and info->len. */ | 2489 | /* Sets info->hdr and info->len. */ |
2490 | static int copy_module_from_user(const void __user *umod, unsigned long len, | 2490 | static int copy_module_from_user(const void __user *umod, unsigned long len, |
2491 | struct load_info *info) | 2491 | struct load_info *info) |
2492 | { | 2492 | { |
2493 | int err; | 2493 | int err; |
2494 | 2494 | ||
2495 | info->len = len; | 2495 | info->len = len; |
2496 | if (info->len < sizeof(*(info->hdr))) | 2496 | if (info->len < sizeof(*(info->hdr))) |
2497 | return -ENOEXEC; | 2497 | return -ENOEXEC; |
2498 | 2498 | ||
2499 | err = security_kernel_module_from_file(NULL); | 2499 | err = security_kernel_module_from_file(NULL); |
2500 | if (err) | 2500 | if (err) |
2501 | return err; | 2501 | return err; |
2502 | 2502 | ||
2503 | /* Suck in entire file: we'll want most of it. */ | 2503 | /* Suck in entire file: we'll want most of it. */ |
2504 | info->hdr = vmalloc(info->len); | 2504 | info->hdr = vmalloc(info->len); |
2505 | if (!info->hdr) | 2505 | if (!info->hdr) |
2506 | return -ENOMEM; | 2506 | return -ENOMEM; |
2507 | 2507 | ||
2508 | if (copy_from_user(info->hdr, umod, info->len) != 0) { | 2508 | if (copy_from_user(info->hdr, umod, info->len) != 0) { |
2509 | vfree(info->hdr); | 2509 | vfree(info->hdr); |
2510 | return -EFAULT; | 2510 | return -EFAULT; |
2511 | } | 2511 | } |
2512 | 2512 | ||
2513 | return 0; | 2513 | return 0; |
2514 | } | 2514 | } |
2515 | 2515 | ||
2516 | /* Sets info->hdr and info->len. */ | 2516 | /* Sets info->hdr and info->len. */ |
2517 | static int copy_module_from_fd(int fd, struct load_info *info) | 2517 | static int copy_module_from_fd(int fd, struct load_info *info) |
2518 | { | 2518 | { |
2519 | struct fd f = fdget(fd); | 2519 | struct fd f = fdget(fd); |
2520 | int err; | 2520 | int err; |
2521 | struct kstat stat; | 2521 | struct kstat stat; |
2522 | loff_t pos; | 2522 | loff_t pos; |
2523 | ssize_t bytes = 0; | 2523 | ssize_t bytes = 0; |
2524 | 2524 | ||
2525 | if (!f.file) | 2525 | if (!f.file) |
2526 | return -ENOEXEC; | 2526 | return -ENOEXEC; |
2527 | 2527 | ||
2528 | err = security_kernel_module_from_file(f.file); | 2528 | err = security_kernel_module_from_file(f.file); |
2529 | if (err) | 2529 | if (err) |
2530 | goto out; | 2530 | goto out; |
2531 | 2531 | ||
2532 | err = vfs_getattr(&f.file->f_path, &stat); | 2532 | err = vfs_getattr(&f.file->f_path, &stat); |
2533 | if (err) | 2533 | if (err) |
2534 | goto out; | 2534 | goto out; |
2535 | 2535 | ||
2536 | if (stat.size > INT_MAX) { | 2536 | if (stat.size > INT_MAX) { |
2537 | err = -EFBIG; | 2537 | err = -EFBIG; |
2538 | goto out; | 2538 | goto out; |
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | /* Don't hand 0 to vmalloc, it whines. */ | 2541 | /* Don't hand 0 to vmalloc, it whines. */ |
2542 | if (stat.size == 0) { | 2542 | if (stat.size == 0) { |
2543 | err = -EINVAL; | 2543 | err = -EINVAL; |
2544 | goto out; | 2544 | goto out; |
2545 | } | 2545 | } |
2546 | 2546 | ||
2547 | info->hdr = vmalloc(stat.size); | 2547 | info->hdr = vmalloc(stat.size); |
2548 | if (!info->hdr) { | 2548 | if (!info->hdr) { |
2549 | err = -ENOMEM; | 2549 | err = -ENOMEM; |
2550 | goto out; | 2550 | goto out; |
2551 | } | 2551 | } |
2552 | 2552 | ||
2553 | pos = 0; | 2553 | pos = 0; |
2554 | while (pos < stat.size) { | 2554 | while (pos < stat.size) { |
2555 | bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos, | 2555 | bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos, |
2556 | stat.size - pos); | 2556 | stat.size - pos); |
2557 | if (bytes < 0) { | 2557 | if (bytes < 0) { |
2558 | vfree(info->hdr); | 2558 | vfree(info->hdr); |
2559 | err = bytes; | 2559 | err = bytes; |
2560 | goto out; | 2560 | goto out; |
2561 | } | 2561 | } |
2562 | if (bytes == 0) | 2562 | if (bytes == 0) |
2563 | break; | 2563 | break; |
2564 | pos += bytes; | 2564 | pos += bytes; |
2565 | } | 2565 | } |
2566 | info->len = pos; | 2566 | info->len = pos; |
2567 | 2567 | ||
2568 | out: | 2568 | out: |
2569 | fdput(f); | 2569 | fdput(f); |
2570 | return err; | 2570 | return err; |
2571 | } | 2571 | } |
2572 | 2572 | ||
2573 | static void free_copy(struct load_info *info) | 2573 | static void free_copy(struct load_info *info) |
2574 | { | 2574 | { |
2575 | vfree(info->hdr); | 2575 | vfree(info->hdr); |
2576 | } | 2576 | } |
2577 | 2577 | ||
2578 | static int rewrite_section_headers(struct load_info *info, int flags) | 2578 | static int rewrite_section_headers(struct load_info *info, int flags) |
2579 | { | 2579 | { |
2580 | unsigned int i; | 2580 | unsigned int i; |
2581 | 2581 | ||
2582 | /* This should always be true, but let's be sure. */ | 2582 | /* This should always be true, but let's be sure. */ |
2583 | info->sechdrs[0].sh_addr = 0; | 2583 | info->sechdrs[0].sh_addr = 0; |
2584 | 2584 | ||
2585 | for (i = 1; i < info->hdr->e_shnum; i++) { | 2585 | for (i = 1; i < info->hdr->e_shnum; i++) { |
2586 | Elf_Shdr *shdr = &info->sechdrs[i]; | 2586 | Elf_Shdr *shdr = &info->sechdrs[i]; |
2587 | if (shdr->sh_type != SHT_NOBITS | 2587 | if (shdr->sh_type != SHT_NOBITS |
2588 | && info->len < shdr->sh_offset + shdr->sh_size) { | 2588 | && info->len < shdr->sh_offset + shdr->sh_size) { |
2589 | pr_err("Module len %lu truncated\n", info->len); | 2589 | pr_err("Module len %lu truncated\n", info->len); |
2590 | return -ENOEXEC; | 2590 | return -ENOEXEC; |
2591 | } | 2591 | } |
2592 | 2592 | ||
2593 | /* Mark all sections sh_addr with their address in the | 2593 | /* Mark all sections sh_addr with their address in the |
2594 | temporary image. */ | 2594 | temporary image. */ |
2595 | shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; | 2595 | shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; |
2596 | 2596 | ||
2597 | #ifndef CONFIG_MODULE_UNLOAD | 2597 | #ifndef CONFIG_MODULE_UNLOAD |
2598 | /* Don't load .exit sections */ | 2598 | /* Don't load .exit sections */ |
2599 | if (strstarts(info->secstrings+shdr->sh_name, ".exit")) | 2599 | if (strstarts(info->secstrings+shdr->sh_name, ".exit")) |
2600 | shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; | 2600 | shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; |
2601 | #endif | 2601 | #endif |
2602 | } | 2602 | } |
2603 | 2603 | ||
2604 | /* Track but don't keep modinfo and version sections. */ | 2604 | /* Track but don't keep modinfo and version sections. */ |
2605 | if (flags & MODULE_INIT_IGNORE_MODVERSIONS) | 2605 | if (flags & MODULE_INIT_IGNORE_MODVERSIONS) |
2606 | info->index.vers = 0; /* Pretend no __versions section! */ | 2606 | info->index.vers = 0; /* Pretend no __versions section! */ |
2607 | else | 2607 | else |
2608 | info->index.vers = find_sec(info, "__versions"); | 2608 | info->index.vers = find_sec(info, "__versions"); |
2609 | info->index.info = find_sec(info, ".modinfo"); | 2609 | info->index.info = find_sec(info, ".modinfo"); |
2610 | info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2610 | info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2611 | info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2611 | info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2612 | return 0; | 2612 | return 0; |
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | /* | 2615 | /* |
2616 | * Set up our basic convenience variables (pointers to section headers, | 2616 | * Set up our basic convenience variables (pointers to section headers, |
2617 | * search for module section index etc), and do some basic section | 2617 | * search for module section index etc), and do some basic section |
2618 | * verification. | 2618 | * verification. |
2619 | * | 2619 | * |
2620 | * Return the temporary module pointer (we'll replace it with the final | 2620 | * Return the temporary module pointer (we'll replace it with the final |
2621 | * one when we move the module sections around). | 2621 | * one when we move the module sections around). |
2622 | */ | 2622 | */ |
2623 | static struct module *setup_load_info(struct load_info *info, int flags) | 2623 | static struct module *setup_load_info(struct load_info *info, int flags) |
2624 | { | 2624 | { |
2625 | unsigned int i; | 2625 | unsigned int i; |
2626 | int err; | 2626 | int err; |
2627 | struct module *mod; | 2627 | struct module *mod; |
2628 | 2628 | ||
2629 | /* Set up the convenience variables */ | 2629 | /* Set up the convenience variables */ |
2630 | info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; | 2630 | info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; |
2631 | info->secstrings = (void *)info->hdr | 2631 | info->secstrings = (void *)info->hdr |
2632 | + info->sechdrs[info->hdr->e_shstrndx].sh_offset; | 2632 | + info->sechdrs[info->hdr->e_shstrndx].sh_offset; |
2633 | 2633 | ||
2634 | err = rewrite_section_headers(info, flags); | 2634 | err = rewrite_section_headers(info, flags); |
2635 | if (err) | 2635 | if (err) |
2636 | return ERR_PTR(err); | 2636 | return ERR_PTR(err); |
2637 | 2637 | ||
2638 | /* Find internal symbols and strings. */ | 2638 | /* Find internal symbols and strings. */ |
2639 | for (i = 1; i < info->hdr->e_shnum; i++) { | 2639 | for (i = 1; i < info->hdr->e_shnum; i++) { |
2640 | if (info->sechdrs[i].sh_type == SHT_SYMTAB) { | 2640 | if (info->sechdrs[i].sh_type == SHT_SYMTAB) { |
2641 | info->index.sym = i; | 2641 | info->index.sym = i; |
2642 | info->index.str = info->sechdrs[i].sh_link; | 2642 | info->index.str = info->sechdrs[i].sh_link; |
2643 | info->strtab = (char *)info->hdr | 2643 | info->strtab = (char *)info->hdr |
2644 | + info->sechdrs[info->index.str].sh_offset; | 2644 | + info->sechdrs[info->index.str].sh_offset; |
2645 | break; | 2645 | break; |
2646 | } | 2646 | } |
2647 | } | 2647 | } |
2648 | 2648 | ||
2649 | info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); | 2649 | info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); |
2650 | if (!info->index.mod) { | 2650 | if (!info->index.mod) { |
2651 | pr_warn("No module found in object\n"); | 2651 | pr_warn("No module found in object\n"); |
2652 | return ERR_PTR(-ENOEXEC); | 2652 | return ERR_PTR(-ENOEXEC); |
2653 | } | 2653 | } |
2654 | /* This is temporary: point mod into copy of data. */ | 2654 | /* This is temporary: point mod into copy of data. */ |
2655 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; | 2655 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; |
2656 | 2656 | ||
2657 | if (info->index.sym == 0) { | 2657 | if (info->index.sym == 0) { |
2658 | pr_warn("%s: module has no symbols (stripped?)\n", mod->name); | 2658 | pr_warn("%s: module has no symbols (stripped?)\n", mod->name); |
2659 | return ERR_PTR(-ENOEXEC); | 2659 | return ERR_PTR(-ENOEXEC); |
2660 | } | 2660 | } |
2661 | 2661 | ||
2662 | info->index.pcpu = find_pcpusec(info); | 2662 | info->index.pcpu = find_pcpusec(info); |
2663 | 2663 | ||
2664 | /* Check module struct version now, before we try to use module. */ | 2664 | /* Check module struct version now, before we try to use module. */ |
2665 | if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) | 2665 | if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) |
2666 | return ERR_PTR(-ENOEXEC); | 2666 | return ERR_PTR(-ENOEXEC); |
2667 | 2667 | ||
2668 | return mod; | 2668 | return mod; |
2669 | } | 2669 | } |
2670 | 2670 | ||
2671 | static int check_modinfo(struct module *mod, struct load_info *info, int flags) | 2671 | static int check_modinfo(struct module *mod, struct load_info *info, int flags) |
2672 | { | 2672 | { |
2673 | const char *modmagic = get_modinfo(info, "vermagic"); | 2673 | const char *modmagic = get_modinfo(info, "vermagic"); |
2674 | int err; | 2674 | int err; |
2675 | 2675 | ||
2676 | if (flags & MODULE_INIT_IGNORE_VERMAGIC) | 2676 | if (flags & MODULE_INIT_IGNORE_VERMAGIC) |
2677 | modmagic = NULL; | 2677 | modmagic = NULL; |
2678 | 2678 | ||
2679 | /* This is allowed: modprobe --force will invalidate it. */ | 2679 | /* This is allowed: modprobe --force will invalidate it. */ |
2680 | if (!modmagic) { | 2680 | if (!modmagic) { |
2681 | err = try_to_force_load(mod, "bad vermagic"); | 2681 | err = try_to_force_load(mod, "bad vermagic"); |
2682 | if (err) | 2682 | if (err) |
2683 | return err; | 2683 | return err; |
2684 | } else if (!same_magic(modmagic, vermagic, info->index.vers)) { | 2684 | } else if (!same_magic(modmagic, vermagic, info->index.vers)) { |
2685 | pr_err("%s: version magic '%s' should be '%s'\n", | 2685 | pr_err("%s: version magic '%s' should be '%s'\n", |
2686 | mod->name, modmagic, vermagic); | 2686 | mod->name, modmagic, vermagic); |
2687 | return -ENOEXEC; | 2687 | return -ENOEXEC; |
2688 | } | 2688 | } |
2689 | 2689 | ||
2690 | if (!get_modinfo(info, "intree")) | 2690 | if (!get_modinfo(info, "intree")) |
2691 | add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); | 2691 | add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); |
2692 | 2692 | ||
2693 | if (get_modinfo(info, "staging")) { | 2693 | if (get_modinfo(info, "staging")) { |
2694 | add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); | 2694 | add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); |
2695 | pr_warn("%s: module is from the staging directory, the quality " | 2695 | pr_warn("%s: module is from the staging directory, the quality " |
2696 | "is unknown, you have been warned.\n", mod->name); | 2696 | "is unknown, you have been warned.\n", mod->name); |
2697 | } | 2697 | } |
2698 | 2698 | ||
2699 | /* Set up license info based on the info section */ | 2699 | /* Set up license info based on the info section */ |
2700 | set_license(mod, get_modinfo(info, "license")); | 2700 | set_license(mod, get_modinfo(info, "license")); |
2701 | 2701 | ||
2702 | return 0; | 2702 | return 0; |
2703 | } | 2703 | } |
2704 | 2704 | ||
2705 | static int find_module_sections(struct module *mod, struct load_info *info) | 2705 | static int find_module_sections(struct module *mod, struct load_info *info) |
2706 | { | 2706 | { |
2707 | mod->kp = section_objs(info, "__param", | 2707 | mod->kp = section_objs(info, "__param", |
2708 | sizeof(*mod->kp), &mod->num_kp); | 2708 | sizeof(*mod->kp), &mod->num_kp); |
2709 | mod->syms = section_objs(info, "__ksymtab", | 2709 | mod->syms = section_objs(info, "__ksymtab", |
2710 | sizeof(*mod->syms), &mod->num_syms); | 2710 | sizeof(*mod->syms), &mod->num_syms); |
2711 | mod->crcs = section_addr(info, "__kcrctab"); | 2711 | mod->crcs = section_addr(info, "__kcrctab"); |
2712 | mod->gpl_syms = section_objs(info, "__ksymtab_gpl", | 2712 | mod->gpl_syms = section_objs(info, "__ksymtab_gpl", |
2713 | sizeof(*mod->gpl_syms), | 2713 | sizeof(*mod->gpl_syms), |
2714 | &mod->num_gpl_syms); | 2714 | &mod->num_gpl_syms); |
2715 | mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); | 2715 | mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); |
2716 | mod->gpl_future_syms = section_objs(info, | 2716 | mod->gpl_future_syms = section_objs(info, |
2717 | "__ksymtab_gpl_future", | 2717 | "__ksymtab_gpl_future", |
2718 | sizeof(*mod->gpl_future_syms), | 2718 | sizeof(*mod->gpl_future_syms), |
2719 | &mod->num_gpl_future_syms); | 2719 | &mod->num_gpl_future_syms); |
2720 | mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); | 2720 | mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); |
2721 | 2721 | ||
2722 | #ifdef CONFIG_UNUSED_SYMBOLS | 2722 | #ifdef CONFIG_UNUSED_SYMBOLS |
2723 | mod->unused_syms = section_objs(info, "__ksymtab_unused", | 2723 | mod->unused_syms = section_objs(info, "__ksymtab_unused", |
2724 | sizeof(*mod->unused_syms), | 2724 | sizeof(*mod->unused_syms), |
2725 | &mod->num_unused_syms); | 2725 | &mod->num_unused_syms); |
2726 | mod->unused_crcs = section_addr(info, "__kcrctab_unused"); | 2726 | mod->unused_crcs = section_addr(info, "__kcrctab_unused"); |
2727 | mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", | 2727 | mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", |
2728 | sizeof(*mod->unused_gpl_syms), | 2728 | sizeof(*mod->unused_gpl_syms), |
2729 | &mod->num_unused_gpl_syms); | 2729 | &mod->num_unused_gpl_syms); |
2730 | mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); | 2730 | mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); |
2731 | #endif | 2731 | #endif |
2732 | #ifdef CONFIG_CONSTRUCTORS | 2732 | #ifdef CONFIG_CONSTRUCTORS |
2733 | mod->ctors = section_objs(info, ".ctors", | 2733 | mod->ctors = section_objs(info, ".ctors", |
2734 | sizeof(*mod->ctors), &mod->num_ctors); | 2734 | sizeof(*mod->ctors), &mod->num_ctors); |
2735 | if (!mod->ctors) | 2735 | if (!mod->ctors) |
2736 | mod->ctors = section_objs(info, ".init_array", | 2736 | mod->ctors = section_objs(info, ".init_array", |
2737 | sizeof(*mod->ctors), &mod->num_ctors); | 2737 | sizeof(*mod->ctors), &mod->num_ctors); |
2738 | else if (find_sec(info, ".init_array")) { | 2738 | else if (find_sec(info, ".init_array")) { |
2739 | /* | 2739 | /* |
2740 | * This shouldn't happen with same compiler and binutils | 2740 | * This shouldn't happen with same compiler and binutils |
2741 | * building all parts of the module. | 2741 | * building all parts of the module. |
2742 | */ | 2742 | */ |
2743 | printk(KERN_WARNING "%s: has both .ctors and .init_array.\n", | 2743 | printk(KERN_WARNING "%s: has both .ctors and .init_array.\n", |
2744 | mod->name); | 2744 | mod->name); |
2745 | return -EINVAL; | 2745 | return -EINVAL; |
2746 | } | 2746 | } |
2747 | #endif | 2747 | #endif |
2748 | 2748 | ||
2749 | #ifdef CONFIG_TRACEPOINTS | 2749 | #ifdef CONFIG_TRACEPOINTS |
2750 | mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", | 2750 | mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", |
2751 | sizeof(*mod->tracepoints_ptrs), | 2751 | sizeof(*mod->tracepoints_ptrs), |
2752 | &mod->num_tracepoints); | 2752 | &mod->num_tracepoints); |
2753 | #endif | 2753 | #endif |
2754 | #ifdef HAVE_JUMP_LABEL | 2754 | #ifdef HAVE_JUMP_LABEL |
2755 | mod->jump_entries = section_objs(info, "__jump_table", | 2755 | mod->jump_entries = section_objs(info, "__jump_table", |
2756 | sizeof(*mod->jump_entries), | 2756 | sizeof(*mod->jump_entries), |
2757 | &mod->num_jump_entries); | 2757 | &mod->num_jump_entries); |
2758 | #endif | 2758 | #endif |
2759 | #ifdef CONFIG_EVENT_TRACING | 2759 | #ifdef CONFIG_EVENT_TRACING |
2760 | mod->trace_events = section_objs(info, "_ftrace_events", | 2760 | mod->trace_events = section_objs(info, "_ftrace_events", |
2761 | sizeof(*mod->trace_events), | 2761 | sizeof(*mod->trace_events), |
2762 | &mod->num_trace_events); | 2762 | &mod->num_trace_events); |
2763 | #endif | 2763 | #endif |
2764 | #ifdef CONFIG_TRACING | 2764 | #ifdef CONFIG_TRACING |
2765 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", | 2765 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", |
2766 | sizeof(*mod->trace_bprintk_fmt_start), | 2766 | sizeof(*mod->trace_bprintk_fmt_start), |
2767 | &mod->num_trace_bprintk_fmt); | 2767 | &mod->num_trace_bprintk_fmt); |
2768 | #endif | 2768 | #endif |
2769 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2769 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
2770 | /* sechdrs[0].sh_size is always zero */ | 2770 | /* sechdrs[0].sh_size is always zero */ |
2771 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", | 2771 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", |
2772 | sizeof(*mod->ftrace_callsites), | 2772 | sizeof(*mod->ftrace_callsites), |
2773 | &mod->num_ftrace_callsites); | 2773 | &mod->num_ftrace_callsites); |
2774 | #endif | 2774 | #endif |
2775 | 2775 | ||
2776 | mod->extable = section_objs(info, "__ex_table", | 2776 | mod->extable = section_objs(info, "__ex_table", |
2777 | sizeof(*mod->extable), &mod->num_exentries); | 2777 | sizeof(*mod->extable), &mod->num_exentries); |
2778 | 2778 | ||
2779 | if (section_addr(info, "__obsparm")) | 2779 | if (section_addr(info, "__obsparm")) |
2780 | pr_warn("%s: Ignoring obsolete parameters\n", mod->name); | 2780 | pr_warn("%s: Ignoring obsolete parameters\n", mod->name); |
2781 | 2781 | ||
2782 | info->debug = section_objs(info, "__verbose", | 2782 | info->debug = section_objs(info, "__verbose", |
2783 | sizeof(*info->debug), &info->num_debug); | 2783 | sizeof(*info->debug), &info->num_debug); |
2784 | 2784 | ||
2785 | return 0; | 2785 | return 0; |
2786 | } | 2786 | } |
2787 | 2787 | ||
2788 | static int move_module(struct module *mod, struct load_info *info) | 2788 | static int move_module(struct module *mod, struct load_info *info) |
2789 | { | 2789 | { |
2790 | int i; | 2790 | int i; |
2791 | void *ptr; | 2791 | void *ptr; |
2792 | 2792 | ||
2793 | /* Do the allocs. */ | 2793 | /* Do the allocs. */ |
2794 | ptr = module_alloc_update_bounds(mod->core_size); | 2794 | ptr = module_alloc_update_bounds(mod->core_size); |
2795 | /* | 2795 | /* |
2796 | * The pointer to this block is stored in the module structure | 2796 | * The pointer to this block is stored in the module structure |
2797 | * which is inside the block. Just mark it as not being a | 2797 | * which is inside the block. Just mark it as not being a |
2798 | * leak. | 2798 | * leak. |
2799 | */ | 2799 | */ |
2800 | kmemleak_not_leak(ptr); | 2800 | kmemleak_not_leak(ptr); |
2801 | if (!ptr) | 2801 | if (!ptr) |
2802 | return -ENOMEM; | 2802 | return -ENOMEM; |
2803 | 2803 | ||
2804 | memset(ptr, 0, mod->core_size); | 2804 | memset(ptr, 0, mod->core_size); |
2805 | mod->module_core = ptr; | 2805 | mod->module_core = ptr; |
2806 | 2806 | ||
2807 | if (mod->init_size) { | 2807 | if (mod->init_size) { |
2808 | ptr = module_alloc_update_bounds(mod->init_size); | 2808 | ptr = module_alloc_update_bounds(mod->init_size); |
2809 | /* | 2809 | /* |
2810 | * The pointer to this block is stored in the module structure | 2810 | * The pointer to this block is stored in the module structure |
2811 | * which is inside the block. This block doesn't need to be | 2811 | * which is inside the block. This block doesn't need to be |
2812 | * scanned as it contains data and code that will be freed | 2812 | * scanned as it contains data and code that will be freed |
2813 | * after the module is initialized. | 2813 | * after the module is initialized. |
2814 | */ | 2814 | */ |
2815 | kmemleak_ignore(ptr); | 2815 | kmemleak_ignore(ptr); |
2816 | if (!ptr) { | 2816 | if (!ptr) { |
2817 | module_free(mod, mod->module_core); | 2817 | module_free(mod, mod->module_core); |
2818 | return -ENOMEM; | 2818 | return -ENOMEM; |
2819 | } | 2819 | } |
2820 | memset(ptr, 0, mod->init_size); | 2820 | memset(ptr, 0, mod->init_size); |
2821 | mod->module_init = ptr; | 2821 | mod->module_init = ptr; |
2822 | } else | 2822 | } else |
2823 | mod->module_init = NULL; | 2823 | mod->module_init = NULL; |
2824 | 2824 | ||
2825 | /* Transfer each section which specifies SHF_ALLOC */ | 2825 | /* Transfer each section which specifies SHF_ALLOC */ |
2826 | pr_debug("final section addresses:\n"); | 2826 | pr_debug("final section addresses:\n"); |
2827 | for (i = 0; i < info->hdr->e_shnum; i++) { | 2827 | for (i = 0; i < info->hdr->e_shnum; i++) { |
2828 | void *dest; | 2828 | void *dest; |
2829 | Elf_Shdr *shdr = &info->sechdrs[i]; | 2829 | Elf_Shdr *shdr = &info->sechdrs[i]; |
2830 | 2830 | ||
2831 | if (!(shdr->sh_flags & SHF_ALLOC)) | 2831 | if (!(shdr->sh_flags & SHF_ALLOC)) |
2832 | continue; | 2832 | continue; |
2833 | 2833 | ||
2834 | if (shdr->sh_entsize & INIT_OFFSET_MASK) | 2834 | if (shdr->sh_entsize & INIT_OFFSET_MASK) |
2835 | dest = mod->module_init | 2835 | dest = mod->module_init |
2836 | + (shdr->sh_entsize & ~INIT_OFFSET_MASK); | 2836 | + (shdr->sh_entsize & ~INIT_OFFSET_MASK); |
2837 | else | 2837 | else |
2838 | dest = mod->module_core + shdr->sh_entsize; | 2838 | dest = mod->module_core + shdr->sh_entsize; |
2839 | 2839 | ||
2840 | if (shdr->sh_type != SHT_NOBITS) | 2840 | if (shdr->sh_type != SHT_NOBITS) |
2841 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); | 2841 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); |
2842 | /* Update sh_addr to point to copy in image. */ | 2842 | /* Update sh_addr to point to copy in image. */ |
2843 | shdr->sh_addr = (unsigned long)dest; | 2843 | shdr->sh_addr = (unsigned long)dest; |
2844 | pr_debug("\t0x%lx %s\n", | 2844 | pr_debug("\t0x%lx %s\n", |
2845 | (long)shdr->sh_addr, info->secstrings + shdr->sh_name); | 2845 | (long)shdr->sh_addr, info->secstrings + shdr->sh_name); |
2846 | } | 2846 | } |
2847 | 2847 | ||
2848 | return 0; | 2848 | return 0; |
2849 | } | 2849 | } |
2850 | 2850 | ||
2851 | static int check_module_license_and_versions(struct module *mod) | 2851 | static int check_module_license_and_versions(struct module *mod) |
2852 | { | 2852 | { |
2853 | /* | 2853 | /* |
2854 | * ndiswrapper is under GPL by itself, but loads proprietary modules. | 2854 | * ndiswrapper is under GPL by itself, but loads proprietary modules. |
2855 | * Don't use add_taint_module(), as it would prevent ndiswrapper from | 2855 | * Don't use add_taint_module(), as it would prevent ndiswrapper from |
2856 | * using GPL-only symbols it needs. | 2856 | * using GPL-only symbols it needs. |
2857 | */ | 2857 | */ |
2858 | if (strcmp(mod->name, "ndiswrapper") == 0) | 2858 | if (strcmp(mod->name, "ndiswrapper") == 0) |
2859 | add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); | 2859 | add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); |
2860 | 2860 | ||
2861 | /* driverloader was caught wrongly pretending to be under GPL */ | 2861 | /* driverloader was caught wrongly pretending to be under GPL */ |
2862 | if (strcmp(mod->name, "driverloader") == 0) | 2862 | if (strcmp(mod->name, "driverloader") == 0) |
2863 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, | 2863 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, |
2864 | LOCKDEP_NOW_UNRELIABLE); | 2864 | LOCKDEP_NOW_UNRELIABLE); |
2865 | 2865 | ||
2866 | /* lve claims to be GPL but upstream won't provide source */ | 2866 | /* lve claims to be GPL but upstream won't provide source */ |
2867 | if (strcmp(mod->name, "lve") == 0) | 2867 | if (strcmp(mod->name, "lve") == 0) |
2868 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, | 2868 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, |
2869 | LOCKDEP_NOW_UNRELIABLE); | 2869 | LOCKDEP_NOW_UNRELIABLE); |
2870 | 2870 | ||
2871 | #ifdef CONFIG_MODVERSIONS | 2871 | #ifdef CONFIG_MODVERSIONS |
2872 | if ((mod->num_syms && !mod->crcs) | 2872 | if ((mod->num_syms && !mod->crcs) |
2873 | || (mod->num_gpl_syms && !mod->gpl_crcs) | 2873 | || (mod->num_gpl_syms && !mod->gpl_crcs) |
2874 | || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) | 2874 | || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) |
2875 | #ifdef CONFIG_UNUSED_SYMBOLS | 2875 | #ifdef CONFIG_UNUSED_SYMBOLS |
2876 | || (mod->num_unused_syms && !mod->unused_crcs) | 2876 | || (mod->num_unused_syms && !mod->unused_crcs) |
2877 | || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) | 2877 | || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) |
2878 | #endif | 2878 | #endif |
2879 | ) { | 2879 | ) { |
2880 | return try_to_force_load(mod, | 2880 | return try_to_force_load(mod, |
2881 | "no versions for exported symbols"); | 2881 | "no versions for exported symbols"); |
2882 | } | 2882 | } |
2883 | #endif | 2883 | #endif |
2884 | return 0; | 2884 | return 0; |
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | static void flush_module_icache(const struct module *mod) | 2887 | static void flush_module_icache(const struct module *mod) |
2888 | { | 2888 | { |
2889 | mm_segment_t old_fs; | 2889 | mm_segment_t old_fs; |
2890 | 2890 | ||
2891 | /* flush the icache in correct context */ | 2891 | /* flush the icache in correct context */ |
2892 | old_fs = get_fs(); | 2892 | old_fs = get_fs(); |
2893 | set_fs(KERNEL_DS); | 2893 | set_fs(KERNEL_DS); |
2894 | 2894 | ||
2895 | /* | 2895 | /* |
2896 | * Flush the instruction cache, since we've played with text. | 2896 | * Flush the instruction cache, since we've played with text. |
2897 | * Do it before processing of module parameters, so the module | 2897 | * Do it before processing of module parameters, so the module |
2898 | * can provide parameter accessor functions of its own. | 2898 | * can provide parameter accessor functions of its own. |
2899 | */ | 2899 | */ |
2900 | if (mod->module_init) | 2900 | if (mod->module_init) |
2901 | flush_icache_range((unsigned long)mod->module_init, | 2901 | flush_icache_range((unsigned long)mod->module_init, |
2902 | (unsigned long)mod->module_init | 2902 | (unsigned long)mod->module_init |
2903 | + mod->init_size); | 2903 | + mod->init_size); |
2904 | flush_icache_range((unsigned long)mod->module_core, | 2904 | flush_icache_range((unsigned long)mod->module_core, |
2905 | (unsigned long)mod->module_core + mod->core_size); | 2905 | (unsigned long)mod->module_core + mod->core_size); |
2906 | 2906 | ||
2907 | set_fs(old_fs); | 2907 | set_fs(old_fs); |
2908 | } | 2908 | } |
2909 | 2909 | ||
2910 | int __weak module_frob_arch_sections(Elf_Ehdr *hdr, | 2910 | int __weak module_frob_arch_sections(Elf_Ehdr *hdr, |
2911 | Elf_Shdr *sechdrs, | 2911 | Elf_Shdr *sechdrs, |
2912 | char *secstrings, | 2912 | char *secstrings, |
2913 | struct module *mod) | 2913 | struct module *mod) |
2914 | { | 2914 | { |
2915 | return 0; | 2915 | return 0; |
2916 | } | 2916 | } |
2917 | 2917 | ||
2918 | static struct module *layout_and_allocate(struct load_info *info, int flags) | 2918 | static struct module *layout_and_allocate(struct load_info *info, int flags) |
2919 | { | 2919 | { |
2920 | /* Module within temporary copy. */ | 2920 | /* Module within temporary copy. */ |
2921 | struct module *mod; | 2921 | struct module *mod; |
2922 | int err; | 2922 | int err; |
2923 | 2923 | ||
2924 | mod = setup_load_info(info, flags); | 2924 | mod = setup_load_info(info, flags); |
2925 | if (IS_ERR(mod)) | 2925 | if (IS_ERR(mod)) |
2926 | return mod; | 2926 | return mod; |
2927 | 2927 | ||
2928 | err = check_modinfo(mod, info, flags); | 2928 | err = check_modinfo(mod, info, flags); |
2929 | if (err) | 2929 | if (err) |
2930 | return ERR_PTR(err); | 2930 | return ERR_PTR(err); |
2931 | 2931 | ||
2932 | /* Allow arches to frob section contents and sizes. */ | 2932 | /* Allow arches to frob section contents and sizes. */ |
2933 | err = module_frob_arch_sections(info->hdr, info->sechdrs, | 2933 | err = module_frob_arch_sections(info->hdr, info->sechdrs, |
2934 | info->secstrings, mod); | 2934 | info->secstrings, mod); |
2935 | if (err < 0) | 2935 | if (err < 0) |
2936 | return ERR_PTR(err); | 2936 | return ERR_PTR(err); |
2937 | 2937 | ||
2938 | /* We will do a special allocation for per-cpu sections later. */ | 2938 | /* We will do a special allocation for per-cpu sections later. */ |
2939 | info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2939 | info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2940 | 2940 | ||
2941 | /* Determine total sizes, and put offsets in sh_entsize. For now | 2941 | /* Determine total sizes, and put offsets in sh_entsize. For now |
2942 | this is done generically; there doesn't appear to be any | 2942 | this is done generically; there doesn't appear to be any |
2943 | special cases for the architectures. */ | 2943 | special cases for the architectures. */ |
2944 | layout_sections(mod, info); | 2944 | layout_sections(mod, info); |
2945 | layout_symtab(mod, info); | 2945 | layout_symtab(mod, info); |
2946 | 2946 | ||
2947 | /* Allocate and move to the final place */ | 2947 | /* Allocate and move to the final place */ |
2948 | err = move_module(mod, info); | 2948 | err = move_module(mod, info); |
2949 | if (err) | 2949 | if (err) |
2950 | return ERR_PTR(err); | 2950 | return ERR_PTR(err); |
2951 | 2951 | ||
2952 | /* Module has been copied to its final place now: return it. */ | 2952 | /* Module has been copied to its final place now: return it. */ |
2953 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; | 2953 | mod = (void *)info->sechdrs[info->index.mod].sh_addr; |
2954 | kmemleak_load_module(mod, info); | 2954 | kmemleak_load_module(mod, info); |
2955 | return mod; | 2955 | return mod; |
2956 | } | 2956 | } |
2957 | 2957 | ||
2958 | /* mod is no longer valid after this! */ | 2958 | /* mod is no longer valid after this! */ |
2959 | static void module_deallocate(struct module *mod, struct load_info *info) | 2959 | static void module_deallocate(struct module *mod, struct load_info *info) |
2960 | { | 2960 | { |
2961 | percpu_modfree(mod); | 2961 | percpu_modfree(mod); |
2962 | module_free(mod, mod->module_init); | 2962 | module_free(mod, mod->module_init); |
2963 | module_free(mod, mod->module_core); | 2963 | module_free(mod, mod->module_core); |
2964 | } | 2964 | } |
2965 | 2965 | ||
2966 | int __weak module_finalize(const Elf_Ehdr *hdr, | 2966 | int __weak module_finalize(const Elf_Ehdr *hdr, |
2967 | const Elf_Shdr *sechdrs, | 2967 | const Elf_Shdr *sechdrs, |
2968 | struct module *me) | 2968 | struct module *me) |
2969 | { | 2969 | { |
2970 | return 0; | 2970 | return 0; |
2971 | } | 2971 | } |
2972 | 2972 | ||
2973 | static int post_relocation(struct module *mod, const struct load_info *info) | 2973 | static int post_relocation(struct module *mod, const struct load_info *info) |
2974 | { | 2974 | { |
2975 | /* Sort exception table now relocations are done. */ | 2975 | /* Sort exception table now relocations are done. */ |
2976 | sort_extable(mod->extable, mod->extable + mod->num_exentries); | 2976 | sort_extable(mod->extable, mod->extable + mod->num_exentries); |
2977 | 2977 | ||
2978 | /* Copy relocated percpu area over. */ | 2978 | /* Copy relocated percpu area over. */ |
2979 | percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, | 2979 | percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, |
2980 | info->sechdrs[info->index.pcpu].sh_size); | 2980 | info->sechdrs[info->index.pcpu].sh_size); |
2981 | 2981 | ||
2982 | /* Setup kallsyms-specific fields. */ | 2982 | /* Setup kallsyms-specific fields. */ |
2983 | add_kallsyms(mod, info); | 2983 | add_kallsyms(mod, info); |
2984 | 2984 | ||
2985 | /* Arch-specific module finalizing. */ | 2985 | /* Arch-specific module finalizing. */ |
2986 | return module_finalize(info->hdr, info->sechdrs, mod); | 2986 | return module_finalize(info->hdr, info->sechdrs, mod); |
2987 | } | 2987 | } |
2988 | 2988 | ||
2989 | /* Is this module of this name done loading? No locks held. */ | 2989 | /* Is this module of this name done loading? No locks held. */ |
2990 | static bool finished_loading(const char *name) | 2990 | static bool finished_loading(const char *name) |
2991 | { | 2991 | { |
2992 | struct module *mod; | 2992 | struct module *mod; |
2993 | bool ret; | 2993 | bool ret; |
2994 | 2994 | ||
2995 | mutex_lock(&module_mutex); | 2995 | mutex_lock(&module_mutex); |
2996 | mod = find_module_all(name, strlen(name), true); | 2996 | mod = find_module_all(name, strlen(name), true); |
2997 | ret = !mod || mod->state == MODULE_STATE_LIVE | 2997 | ret = !mod || mod->state == MODULE_STATE_LIVE |
2998 | || mod->state == MODULE_STATE_GOING; | 2998 | || mod->state == MODULE_STATE_GOING; |
2999 | mutex_unlock(&module_mutex); | 2999 | mutex_unlock(&module_mutex); |
3000 | 3000 | ||
3001 | return ret; | 3001 | return ret; |
3002 | } | 3002 | } |
3003 | 3003 | ||
3004 | /* Call module constructors. */ | 3004 | /* Call module constructors. */ |
3005 | static void do_mod_ctors(struct module *mod) | 3005 | static void do_mod_ctors(struct module *mod) |
3006 | { | 3006 | { |
3007 | #ifdef CONFIG_CONSTRUCTORS | 3007 | #ifdef CONFIG_CONSTRUCTORS |
3008 | unsigned long i; | 3008 | unsigned long i; |
3009 | 3009 | ||
3010 | for (i = 0; i < mod->num_ctors; i++) | 3010 | for (i = 0; i < mod->num_ctors; i++) |
3011 | mod->ctors[i](); | 3011 | mod->ctors[i](); |
3012 | #endif | 3012 | #endif |
3013 | } | 3013 | } |
3014 | 3014 | ||
3015 | /* This is where the real work happens */ | 3015 | /* This is where the real work happens */ |
3016 | static int do_init_module(struct module *mod) | 3016 | static int do_init_module(struct module *mod) |
3017 | { | 3017 | { |
3018 | int ret = 0; | 3018 | int ret = 0; |
3019 | 3019 | ||
3020 | /* | 3020 | /* |
3021 | * We want to find out whether @mod uses async during init. Clear | 3021 | * We want to find out whether @mod uses async during init. Clear |
3022 | * PF_USED_ASYNC. async_schedule*() will set it. | 3022 | * PF_USED_ASYNC. async_schedule*() will set it. |
3023 | */ | 3023 | */ |
3024 | current->flags &= ~PF_USED_ASYNC; | 3024 | current->flags &= ~PF_USED_ASYNC; |
3025 | 3025 | ||
3026 | blocking_notifier_call_chain(&module_notify_list, | 3026 | blocking_notifier_call_chain(&module_notify_list, |
3027 | MODULE_STATE_COMING, mod); | 3027 | MODULE_STATE_COMING, mod); |
3028 | 3028 | ||
3029 | /* Set RO and NX regions for core */ | 3029 | /* Set RO and NX regions for core */ |
3030 | set_section_ro_nx(mod->module_core, | 3030 | set_section_ro_nx(mod->module_core, |
3031 | mod->core_text_size, | 3031 | mod->core_text_size, |
3032 | mod->core_ro_size, | 3032 | mod->core_ro_size, |
3033 | mod->core_size); | 3033 | mod->core_size); |
3034 | 3034 | ||
3035 | /* Set RO and NX regions for init */ | 3035 | /* Set RO and NX regions for init */ |
3036 | set_section_ro_nx(mod->module_init, | 3036 | set_section_ro_nx(mod->module_init, |
3037 | mod->init_text_size, | 3037 | mod->init_text_size, |
3038 | mod->init_ro_size, | 3038 | mod->init_ro_size, |
3039 | mod->init_size); | 3039 | mod->init_size); |
3040 | 3040 | ||
3041 | do_mod_ctors(mod); | 3041 | do_mod_ctors(mod); |
3042 | /* Start the module */ | 3042 | /* Start the module */ |
3043 | if (mod->init != NULL) | 3043 | if (mod->init != NULL) |
3044 | ret = do_one_initcall(mod->init); | 3044 | ret = do_one_initcall(mod->init); |
3045 | if (ret < 0) { | 3045 | if (ret < 0) { |
3046 | /* Init routine failed: abort. Try to protect us from | 3046 | /* Init routine failed: abort. Try to protect us from |
3047 | buggy refcounters. */ | 3047 | buggy refcounters. */ |
3048 | mod->state = MODULE_STATE_GOING; | 3048 | mod->state = MODULE_STATE_GOING; |
3049 | synchronize_sched(); | 3049 | synchronize_sched(); |
3050 | module_put(mod); | 3050 | module_put(mod); |
3051 | blocking_notifier_call_chain(&module_notify_list, | 3051 | blocking_notifier_call_chain(&module_notify_list, |
3052 | MODULE_STATE_GOING, mod); | 3052 | MODULE_STATE_GOING, mod); |
3053 | free_module(mod); | 3053 | free_module(mod); |
3054 | wake_up_all(&module_wq); | 3054 | wake_up_all(&module_wq); |
3055 | return ret; | 3055 | return ret; |
3056 | } | 3056 | } |
3057 | if (ret > 0) { | 3057 | if (ret > 0) { |
3058 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " | 3058 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " |
3059 | "follow 0/-E convention\n" | 3059 | "follow 0/-E convention\n" |
3060 | "%s: loading module anyway...\n", | 3060 | "%s: loading module anyway...\n", |
3061 | __func__, mod->name, ret, __func__); | 3061 | __func__, mod->name, ret, __func__); |
3062 | dump_stack(); | 3062 | dump_stack(); |
3063 | } | 3063 | } |
3064 | 3064 | ||
3065 | /* Now it's a first class citizen! */ | 3065 | /* Now it's a first class citizen! */ |
3066 | mod->state = MODULE_STATE_LIVE; | 3066 | mod->state = MODULE_STATE_LIVE; |
3067 | blocking_notifier_call_chain(&module_notify_list, | 3067 | blocking_notifier_call_chain(&module_notify_list, |
3068 | MODULE_STATE_LIVE, mod); | 3068 | MODULE_STATE_LIVE, mod); |
3069 | 3069 | ||
3070 | /* | 3070 | /* |
3071 | * We need to finish all async code before the module init sequence | 3071 | * We need to finish all async code before the module init sequence |
3072 | * is done. This has potential to deadlock. For example, a newly | 3072 | * is done. This has potential to deadlock. For example, a newly |
3073 | * detected block device can trigger request_module() of the | 3073 | * detected block device can trigger request_module() of the |
3074 | * default iosched from async probing task. Once userland helper | 3074 | * default iosched from async probing task. Once userland helper |
3075 | * reaches here, async_synchronize_full() will wait on the async | 3075 | * reaches here, async_synchronize_full() will wait on the async |
3076 | * task waiting on request_module() and deadlock. | 3076 | * task waiting on request_module() and deadlock. |
3077 | * | 3077 | * |
3078 | * This deadlock is avoided by perfomring async_synchronize_full() | 3078 | * This deadlock is avoided by perfomring async_synchronize_full() |
3079 | * iff module init queued any async jobs. This isn't a full | 3079 | * iff module init queued any async jobs. This isn't a full |
3080 | * solution as it will deadlock the same if module loading from | 3080 | * solution as it will deadlock the same if module loading from |
3081 | * async jobs nests more than once; however, due to the various | 3081 | * async jobs nests more than once; however, due to the various |
3082 | * constraints, this hack seems to be the best option for now. | 3082 | * constraints, this hack seems to be the best option for now. |
3083 | * Please refer to the following thread for details. | 3083 | * Please refer to the following thread for details. |
3084 | * | 3084 | * |
3085 | * http://thread.gmane.org/gmane.linux.kernel/1420814 | 3085 | * http://thread.gmane.org/gmane.linux.kernel/1420814 |
3086 | */ | 3086 | */ |
3087 | if (current->flags & PF_USED_ASYNC) | 3087 | if (current->flags & PF_USED_ASYNC) |
3088 | async_synchronize_full(); | 3088 | async_synchronize_full(); |
3089 | 3089 | ||
3090 | mutex_lock(&module_mutex); | 3090 | mutex_lock(&module_mutex); |
3091 | /* Drop initial reference. */ | 3091 | /* Drop initial reference. */ |
3092 | module_put(mod); | 3092 | module_put(mod); |
3093 | trim_init_extable(mod); | 3093 | trim_init_extable(mod); |
3094 | #ifdef CONFIG_KALLSYMS | 3094 | #ifdef CONFIG_KALLSYMS |
3095 | mod->num_symtab = mod->core_num_syms; | 3095 | mod->num_symtab = mod->core_num_syms; |
3096 | mod->symtab = mod->core_symtab; | 3096 | mod->symtab = mod->core_symtab; |
3097 | mod->strtab = mod->core_strtab; | 3097 | mod->strtab = mod->core_strtab; |
3098 | #endif | 3098 | #endif |
3099 | unset_module_init_ro_nx(mod); | 3099 | unset_module_init_ro_nx(mod); |
3100 | module_free(mod, mod->module_init); | 3100 | module_free(mod, mod->module_init); |
3101 | mod->module_init = NULL; | 3101 | mod->module_init = NULL; |
3102 | mod->init_size = 0; | 3102 | mod->init_size = 0; |
3103 | mod->init_ro_size = 0; | 3103 | mod->init_ro_size = 0; |
3104 | mod->init_text_size = 0; | 3104 | mod->init_text_size = 0; |
3105 | mutex_unlock(&module_mutex); | 3105 | mutex_unlock(&module_mutex); |
3106 | wake_up_all(&module_wq); | 3106 | wake_up_all(&module_wq); |
3107 | 3107 | ||
3108 | return 0; | 3108 | return 0; |
3109 | } | 3109 | } |
3110 | 3110 | ||
3111 | static int may_init_module(void) | 3111 | static int may_init_module(void) |
3112 | { | 3112 | { |
3113 | if (!capable(CAP_SYS_MODULE) || modules_disabled) | 3113 | if (!capable(CAP_SYS_MODULE) || modules_disabled) |
3114 | return -EPERM; | 3114 | return -EPERM; |
3115 | 3115 | ||
3116 | return 0; | 3116 | return 0; |
3117 | } | 3117 | } |
3118 | 3118 | ||
3119 | /* | 3119 | /* |
3120 | * We try to place it in the list now to make sure it's unique before | 3120 | * We try to place it in the list now to make sure it's unique before |
3121 | * we dedicate too many resources. In particular, temporary percpu | 3121 | * we dedicate too many resources. In particular, temporary percpu |
3122 | * memory exhaustion. | 3122 | * memory exhaustion. |
3123 | */ | 3123 | */ |
3124 | static int add_unformed_module(struct module *mod) | 3124 | static int add_unformed_module(struct module *mod) |
3125 | { | 3125 | { |
3126 | int err; | 3126 | int err; |
3127 | struct module *old; | 3127 | struct module *old; |
3128 | 3128 | ||
3129 | mod->state = MODULE_STATE_UNFORMED; | 3129 | mod->state = MODULE_STATE_UNFORMED; |
3130 | 3130 | ||
3131 | again: | 3131 | again: |
3132 | mutex_lock(&module_mutex); | 3132 | mutex_lock(&module_mutex); |
3133 | old = find_module_all(mod->name, strlen(mod->name), true); | 3133 | old = find_module_all(mod->name, strlen(mod->name), true); |
3134 | if (old != NULL) { | 3134 | if (old != NULL) { |
3135 | if (old->state == MODULE_STATE_COMING | 3135 | if (old->state == MODULE_STATE_COMING |
3136 | || old->state == MODULE_STATE_UNFORMED) { | 3136 | || old->state == MODULE_STATE_UNFORMED) { |
3137 | /* Wait in case it fails to load. */ | 3137 | /* Wait in case it fails to load. */ |
3138 | mutex_unlock(&module_mutex); | 3138 | mutex_unlock(&module_mutex); |
3139 | err = wait_event_interruptible(module_wq, | 3139 | err = wait_event_interruptible(module_wq, |
3140 | finished_loading(mod->name)); | 3140 | finished_loading(mod->name)); |
3141 | if (err) | 3141 | if (err) |
3142 | goto out_unlocked; | 3142 | goto out_unlocked; |
3143 | goto again; | 3143 | goto again; |
3144 | } | 3144 | } |
3145 | err = -EEXIST; | 3145 | err = -EEXIST; |
3146 | goto out; | 3146 | goto out; |
3147 | } | 3147 | } |
3148 | list_add_rcu(&mod->list, &modules); | 3148 | list_add_rcu(&mod->list, &modules); |
3149 | err = 0; | 3149 | err = 0; |
3150 | 3150 | ||
3151 | out: | 3151 | out: |
3152 | mutex_unlock(&module_mutex); | 3152 | mutex_unlock(&module_mutex); |
3153 | out_unlocked: | 3153 | out_unlocked: |
3154 | return err; | 3154 | return err; |
3155 | } | 3155 | } |
3156 | 3156 | ||
3157 | static int complete_formation(struct module *mod, struct load_info *info) | 3157 | static int complete_formation(struct module *mod, struct load_info *info) |
3158 | { | 3158 | { |
3159 | int err; | 3159 | int err; |
3160 | 3160 | ||
3161 | mutex_lock(&module_mutex); | 3161 | mutex_lock(&module_mutex); |
3162 | 3162 | ||
3163 | /* Find duplicate symbols (must be called under lock). */ | 3163 | /* Find duplicate symbols (must be called under lock). */ |
3164 | err = verify_export_symbols(mod); | 3164 | err = verify_export_symbols(mod); |
3165 | if (err < 0) | 3165 | if (err < 0) |
3166 | goto out; | 3166 | goto out; |
3167 | 3167 | ||
3168 | /* This relies on module_mutex for list integrity. */ | 3168 | /* This relies on module_mutex for list integrity. */ |
3169 | module_bug_finalize(info->hdr, info->sechdrs, mod); | 3169 | module_bug_finalize(info->hdr, info->sechdrs, mod); |
3170 | 3170 | ||
3171 | /* Mark state as coming so strong_try_module_get() ignores us, | 3171 | /* Mark state as coming so strong_try_module_get() ignores us, |
3172 | * but kallsyms etc. can see us. */ | 3172 | * but kallsyms etc. can see us. */ |
3173 | mod->state = MODULE_STATE_COMING; | 3173 | mod->state = MODULE_STATE_COMING; |
3174 | 3174 | ||
3175 | out: | 3175 | out: |
3176 | mutex_unlock(&module_mutex); | 3176 | mutex_unlock(&module_mutex); |
3177 | return err; | 3177 | return err; |
3178 | } | 3178 | } |
3179 | 3179 | ||
3180 | static int unknown_module_param_cb(char *param, char *val, const char *modname) | 3180 | static int unknown_module_param_cb(char *param, char *val, const char *modname) |
3181 | { | 3181 | { |
3182 | /* Check for magic 'dyndbg' arg */ | 3182 | /* Check for magic 'dyndbg' arg */ |
3183 | int ret = ddebug_dyndbg_module_param_cb(param, val, modname); | 3183 | int ret = ddebug_dyndbg_module_param_cb(param, val, modname); |
3184 | if (ret != 0) | 3184 | if (ret != 0) |
3185 | pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); | 3185 | pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); |
3186 | return 0; | 3186 | return 0; |
3187 | } | 3187 | } |
3188 | 3188 | ||
3189 | /* Allocate and load the module: note that size of section 0 is always | 3189 | /* Allocate and load the module: note that size of section 0 is always |
3190 | zero, and we rely on this for optional sections. */ | 3190 | zero, and we rely on this for optional sections. */ |
3191 | static int load_module(struct load_info *info, const char __user *uargs, | 3191 | static int load_module(struct load_info *info, const char __user *uargs, |
3192 | int flags) | 3192 | int flags) |
3193 | { | 3193 | { |
3194 | struct module *mod; | 3194 | struct module *mod; |
3195 | long err; | 3195 | long err; |
3196 | 3196 | ||
3197 | err = module_sig_check(info); | 3197 | err = module_sig_check(info); |
3198 | if (err) | 3198 | if (err) |
3199 | goto free_copy; | 3199 | goto free_copy; |
3200 | 3200 | ||
3201 | err = elf_header_check(info); | 3201 | err = elf_header_check(info); |
3202 | if (err) | 3202 | if (err) |
3203 | goto free_copy; | 3203 | goto free_copy; |
3204 | 3204 | ||
3205 | /* Figure out module layout, and allocate all the memory. */ | 3205 | /* Figure out module layout, and allocate all the memory. */ |
3206 | mod = layout_and_allocate(info, flags); | 3206 | mod = layout_and_allocate(info, flags); |
3207 | if (IS_ERR(mod)) { | 3207 | if (IS_ERR(mod)) { |
3208 | err = PTR_ERR(mod); | 3208 | err = PTR_ERR(mod); |
3209 | goto free_copy; | 3209 | goto free_copy; |
3210 | } | 3210 | } |
3211 | 3211 | ||
3212 | /* Reserve our place in the list. */ | 3212 | /* Reserve our place in the list. */ |
3213 | err = add_unformed_module(mod); | 3213 | err = add_unformed_module(mod); |
3214 | if (err) | 3214 | if (err) |
3215 | goto free_module; | 3215 | goto free_module; |
3216 | 3216 | ||
3217 | #ifdef CONFIG_MODULE_SIG | 3217 | #ifdef CONFIG_MODULE_SIG |
3218 | mod->sig_ok = info->sig_ok; | 3218 | mod->sig_ok = info->sig_ok; |
3219 | if (!mod->sig_ok) { | 3219 | if (!mod->sig_ok) { |
3220 | pr_notice_once("%s: module verification failed: signature " | 3220 | pr_notice_once("%s: module verification failed: signature " |
3221 | "and/or required key missing - tainting " | 3221 | "and/or required key missing - tainting " |
3222 | "kernel\n", mod->name); | 3222 | "kernel\n", mod->name); |
3223 | add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); | 3223 | add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); |
3224 | } | 3224 | } |
3225 | #endif | 3225 | #endif |
3226 | 3226 | ||
3227 | /* To avoid stressing percpu allocator, do this once we're unique. */ | 3227 | /* To avoid stressing percpu allocator, do this once we're unique. */ |
3228 | err = percpu_modalloc(mod, info); | 3228 | err = percpu_modalloc(mod, info); |
3229 | if (err) | 3229 | if (err) |
3230 | goto unlink_mod; | 3230 | goto unlink_mod; |
3231 | 3231 | ||
3232 | /* Now module is in final location, initialize linked lists, etc. */ | 3232 | /* Now module is in final location, initialize linked lists, etc. */ |
3233 | err = module_unload_init(mod); | 3233 | err = module_unload_init(mod); |
3234 | if (err) | 3234 | if (err) |
3235 | goto unlink_mod; | 3235 | goto unlink_mod; |
3236 | 3236 | ||
3237 | /* Now we've got everything in the final locations, we can | 3237 | /* Now we've got everything in the final locations, we can |
3238 | * find optional sections. */ | 3238 | * find optional sections. */ |
3239 | err = find_module_sections(mod, info); | 3239 | err = find_module_sections(mod, info); |
3240 | if (err) | 3240 | if (err) |
3241 | goto free_unload; | 3241 | goto free_unload; |
3242 | 3242 | ||
3243 | err = check_module_license_and_versions(mod); | 3243 | err = check_module_license_and_versions(mod); |
3244 | if (err) | 3244 | if (err) |
3245 | goto free_unload; | 3245 | goto free_unload; |
3246 | 3246 | ||
3247 | /* Set up MODINFO_ATTR fields */ | 3247 | /* Set up MODINFO_ATTR fields */ |
3248 | setup_modinfo(mod, info); | 3248 | setup_modinfo(mod, info); |
3249 | 3249 | ||
3250 | /* Fix up syms, so that st_value is a pointer to location. */ | 3250 | /* Fix up syms, so that st_value is a pointer to location. */ |
3251 | err = simplify_symbols(mod, info); | 3251 | err = simplify_symbols(mod, info); |
3252 | if (err < 0) | 3252 | if (err < 0) |
3253 | goto free_modinfo; | 3253 | goto free_modinfo; |
3254 | 3254 | ||
3255 | err = apply_relocations(mod, info); | 3255 | err = apply_relocations(mod, info); |
3256 | if (err < 0) | 3256 | if (err < 0) |
3257 | goto free_modinfo; | 3257 | goto free_modinfo; |
3258 | 3258 | ||
3259 | err = post_relocation(mod, info); | 3259 | err = post_relocation(mod, info); |
3260 | if (err < 0) | 3260 | if (err < 0) |
3261 | goto free_modinfo; | 3261 | goto free_modinfo; |
3262 | 3262 | ||
3263 | flush_module_icache(mod); | 3263 | flush_module_icache(mod); |
3264 | 3264 | ||
3265 | /* Now copy in args */ | 3265 | /* Now copy in args */ |
3266 | mod->args = strndup_user(uargs, ~0UL >> 1); | 3266 | mod->args = strndup_user(uargs, ~0UL >> 1); |
3267 | if (IS_ERR(mod->args)) { | 3267 | if (IS_ERR(mod->args)) { |
3268 | err = PTR_ERR(mod->args); | 3268 | err = PTR_ERR(mod->args); |
3269 | goto free_arch_cleanup; | 3269 | goto free_arch_cleanup; |
3270 | } | 3270 | } |
3271 | 3271 | ||
3272 | dynamic_debug_setup(info->debug, info->num_debug); | 3272 | dynamic_debug_setup(info->debug, info->num_debug); |
3273 | 3273 | ||
3274 | /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ | ||
3275 | ftrace_module_init(mod); | ||
3276 | |||
3274 | /* Finally it's fully formed, ready to start executing. */ | 3277 | /* Finally it's fully formed, ready to start executing. */ |
3275 | err = complete_formation(mod, info); | 3278 | err = complete_formation(mod, info); |
3276 | if (err) | 3279 | if (err) |
3277 | goto ddebug_cleanup; | 3280 | goto ddebug_cleanup; |
3278 | 3281 | ||
3279 | /* Module is ready to execute: parsing args may do that. */ | 3282 | /* Module is ready to execute: parsing args may do that. */ |
3280 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, | 3283 | err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, |
3281 | -32768, 32767, unknown_module_param_cb); | 3284 | -32768, 32767, unknown_module_param_cb); |
3282 | if (err < 0) | 3285 | if (err < 0) |
3283 | goto bug_cleanup; | 3286 | goto bug_cleanup; |
3284 | 3287 | ||
3285 | /* Link in to syfs. */ | 3288 | /* Link in to syfs. */ |
3286 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); | 3289 | err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); |
3287 | if (err < 0) | 3290 | if (err < 0) |
3288 | goto bug_cleanup; | 3291 | goto bug_cleanup; |
3289 | 3292 | ||
3290 | /* Get rid of temporary copy. */ | 3293 | /* Get rid of temporary copy. */ |
3291 | free_copy(info); | 3294 | free_copy(info); |
3292 | 3295 | ||
3293 | /* Done! */ | 3296 | /* Done! */ |
3294 | trace_module_load(mod); | 3297 | trace_module_load(mod); |
3295 | 3298 | ||
3296 | return do_init_module(mod); | 3299 | return do_init_module(mod); |
3297 | 3300 | ||
3298 | bug_cleanup: | 3301 | bug_cleanup: |
3299 | /* module_bug_cleanup needs module_mutex protection */ | 3302 | /* module_bug_cleanup needs module_mutex protection */ |
3300 | mutex_lock(&module_mutex); | 3303 | mutex_lock(&module_mutex); |
3301 | module_bug_cleanup(mod); | 3304 | module_bug_cleanup(mod); |
3302 | mutex_unlock(&module_mutex); | 3305 | mutex_unlock(&module_mutex); |
3303 | ddebug_cleanup: | 3306 | ddebug_cleanup: |
3304 | dynamic_debug_remove(info->debug); | 3307 | dynamic_debug_remove(info->debug); |
3305 | synchronize_sched(); | 3308 | synchronize_sched(); |
3306 | kfree(mod->args); | 3309 | kfree(mod->args); |
3307 | free_arch_cleanup: | 3310 | free_arch_cleanup: |
3308 | module_arch_cleanup(mod); | 3311 | module_arch_cleanup(mod); |
3309 | free_modinfo: | 3312 | free_modinfo: |
3310 | free_modinfo(mod); | 3313 | free_modinfo(mod); |
3311 | free_unload: | 3314 | free_unload: |
3312 | module_unload_free(mod); | 3315 | module_unload_free(mod); |
3313 | unlink_mod: | 3316 | unlink_mod: |
3314 | mutex_lock(&module_mutex); | 3317 | mutex_lock(&module_mutex); |
3315 | /* Unlink carefully: kallsyms could be walking list. */ | 3318 | /* Unlink carefully: kallsyms could be walking list. */ |
3316 | list_del_rcu(&mod->list); | 3319 | list_del_rcu(&mod->list); |
3317 | wake_up_all(&module_wq); | 3320 | wake_up_all(&module_wq); |
3318 | mutex_unlock(&module_mutex); | 3321 | mutex_unlock(&module_mutex); |
3319 | free_module: | 3322 | free_module: |
3320 | module_deallocate(mod, info); | 3323 | module_deallocate(mod, info); |
3321 | free_copy: | 3324 | free_copy: |
3322 | free_copy(info); | 3325 | free_copy(info); |
3323 | return err; | 3326 | return err; |
3324 | } | 3327 | } |
3325 | 3328 | ||
3326 | SYSCALL_DEFINE3(init_module, void __user *, umod, | 3329 | SYSCALL_DEFINE3(init_module, void __user *, umod, |
3327 | unsigned long, len, const char __user *, uargs) | 3330 | unsigned long, len, const char __user *, uargs) |
3328 | { | 3331 | { |
3329 | int err; | 3332 | int err; |
3330 | struct load_info info = { }; | 3333 | struct load_info info = { }; |
3331 | 3334 | ||
3332 | err = may_init_module(); | 3335 | err = may_init_module(); |
3333 | if (err) | 3336 | if (err) |
3334 | return err; | 3337 | return err; |
3335 | 3338 | ||
3336 | pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", | 3339 | pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", |
3337 | umod, len, uargs); | 3340 | umod, len, uargs); |
3338 | 3341 | ||
3339 | err = copy_module_from_user(umod, len, &info); | 3342 | err = copy_module_from_user(umod, len, &info); |
3340 | if (err) | 3343 | if (err) |
3341 | return err; | 3344 | return err; |
3342 | 3345 | ||
3343 | return load_module(&info, uargs, 0); | 3346 | return load_module(&info, uargs, 0); |
3344 | } | 3347 | } |
3345 | 3348 | ||
3346 | SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) | 3349 | SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) |
3347 | { | 3350 | { |
3348 | int err; | 3351 | int err; |
3349 | struct load_info info = { }; | 3352 | struct load_info info = { }; |
3350 | 3353 | ||
3351 | err = may_init_module(); | 3354 | err = may_init_module(); |
3352 | if (err) | 3355 | if (err) |
3353 | return err; | 3356 | return err; |
3354 | 3357 | ||
3355 | pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); | 3358 | pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); |
3356 | 3359 | ||
3357 | if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS | 3360 | if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS |
3358 | |MODULE_INIT_IGNORE_VERMAGIC)) | 3361 | |MODULE_INIT_IGNORE_VERMAGIC)) |
3359 | return -EINVAL; | 3362 | return -EINVAL; |
3360 | 3363 | ||
3361 | err = copy_module_from_fd(fd, &info); | 3364 | err = copy_module_from_fd(fd, &info); |
3362 | if (err) | 3365 | if (err) |
3363 | return err; | 3366 | return err; |
3364 | 3367 | ||
3365 | return load_module(&info, uargs, flags); | 3368 | return load_module(&info, uargs, flags); |
3366 | } | 3369 | } |
3367 | 3370 | ||
3368 | static inline int within(unsigned long addr, void *start, unsigned long size) | 3371 | static inline int within(unsigned long addr, void *start, unsigned long size) |
3369 | { | 3372 | { |
3370 | return ((void *)addr >= start && (void *)addr < start + size); | 3373 | return ((void *)addr >= start && (void *)addr < start + size); |
3371 | } | 3374 | } |
3372 | 3375 | ||
3373 | #ifdef CONFIG_KALLSYMS | 3376 | #ifdef CONFIG_KALLSYMS |
3374 | /* | 3377 | /* |
3375 | * This ignores the intensely annoying "mapping symbols" found | 3378 | * This ignores the intensely annoying "mapping symbols" found |
3376 | * in ARM ELF files: $a, $t and $d. | 3379 | * in ARM ELF files: $a, $t and $d. |
3377 | */ | 3380 | */ |
3378 | static inline int is_arm_mapping_symbol(const char *str) | 3381 | static inline int is_arm_mapping_symbol(const char *str) |
3379 | { | 3382 | { |
3380 | return str[0] == '$' && strchr("atd", str[1]) | 3383 | return str[0] == '$' && strchr("atd", str[1]) |
3381 | && (str[2] == '\0' || str[2] == '.'); | 3384 | && (str[2] == '\0' || str[2] == '.'); |
3382 | } | 3385 | } |
3383 | 3386 | ||
3384 | static const char *get_ksymbol(struct module *mod, | 3387 | static const char *get_ksymbol(struct module *mod, |
3385 | unsigned long addr, | 3388 | unsigned long addr, |
3386 | unsigned long *size, | 3389 | unsigned long *size, |
3387 | unsigned long *offset) | 3390 | unsigned long *offset) |
3388 | { | 3391 | { |
3389 | unsigned int i, best = 0; | 3392 | unsigned int i, best = 0; |
3390 | unsigned long nextval; | 3393 | unsigned long nextval; |
3391 | 3394 | ||
3392 | /* At worse, next value is at end of module */ | 3395 | /* At worse, next value is at end of module */ |
3393 | if (within_module_init(addr, mod)) | 3396 | if (within_module_init(addr, mod)) |
3394 | nextval = (unsigned long)mod->module_init+mod->init_text_size; | 3397 | nextval = (unsigned long)mod->module_init+mod->init_text_size; |
3395 | else | 3398 | else |
3396 | nextval = (unsigned long)mod->module_core+mod->core_text_size; | 3399 | nextval = (unsigned long)mod->module_core+mod->core_text_size; |
3397 | 3400 | ||
3398 | /* Scan for closest preceding symbol, and next symbol. (ELF | 3401 | /* Scan for closest preceding symbol, and next symbol. (ELF |
3399 | starts real symbols at 1). */ | 3402 | starts real symbols at 1). */ |
3400 | for (i = 1; i < mod->num_symtab; i++) { | 3403 | for (i = 1; i < mod->num_symtab; i++) { |
3401 | if (mod->symtab[i].st_shndx == SHN_UNDEF) | 3404 | if (mod->symtab[i].st_shndx == SHN_UNDEF) |
3402 | continue; | 3405 | continue; |
3403 | 3406 | ||
3404 | /* We ignore unnamed symbols: they're uninformative | 3407 | /* We ignore unnamed symbols: they're uninformative |
3405 | * and inserted at a whim. */ | 3408 | * and inserted at a whim. */ |
3406 | if (mod->symtab[i].st_value <= addr | 3409 | if (mod->symtab[i].st_value <= addr |
3407 | && mod->symtab[i].st_value > mod->symtab[best].st_value | 3410 | && mod->symtab[i].st_value > mod->symtab[best].st_value |
3408 | && *(mod->strtab + mod->symtab[i].st_name) != '\0' | 3411 | && *(mod->strtab + mod->symtab[i].st_name) != '\0' |
3409 | && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) | 3412 | && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) |
3410 | best = i; | 3413 | best = i; |
3411 | if (mod->symtab[i].st_value > addr | 3414 | if (mod->symtab[i].st_value > addr |
3412 | && mod->symtab[i].st_value < nextval | 3415 | && mod->symtab[i].st_value < nextval |
3413 | && *(mod->strtab + mod->symtab[i].st_name) != '\0' | 3416 | && *(mod->strtab + mod->symtab[i].st_name) != '\0' |
3414 | && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) | 3417 | && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) |
3415 | nextval = mod->symtab[i].st_value; | 3418 | nextval = mod->symtab[i].st_value; |
3416 | } | 3419 | } |
3417 | 3420 | ||
3418 | if (!best) | 3421 | if (!best) |
3419 | return NULL; | 3422 | return NULL; |
3420 | 3423 | ||
3421 | if (size) | 3424 | if (size) |
3422 | *size = nextval - mod->symtab[best].st_value; | 3425 | *size = nextval - mod->symtab[best].st_value; |
3423 | if (offset) | 3426 | if (offset) |
3424 | *offset = addr - mod->symtab[best].st_value; | 3427 | *offset = addr - mod->symtab[best].st_value; |
3425 | return mod->strtab + mod->symtab[best].st_name; | 3428 | return mod->strtab + mod->symtab[best].st_name; |
3426 | } | 3429 | } |
3427 | 3430 | ||
3428 | /* For kallsyms to ask for address resolution. NULL means not found. Careful | 3431 | /* For kallsyms to ask for address resolution. NULL means not found. Careful |
3429 | * not to lock to avoid deadlock on oopses, simply disable preemption. */ | 3432 | * not to lock to avoid deadlock on oopses, simply disable preemption. */ |
3430 | const char *module_address_lookup(unsigned long addr, | 3433 | const char *module_address_lookup(unsigned long addr, |
3431 | unsigned long *size, | 3434 | unsigned long *size, |
3432 | unsigned long *offset, | 3435 | unsigned long *offset, |
3433 | char **modname, | 3436 | char **modname, |
3434 | char *namebuf) | 3437 | char *namebuf) |
3435 | { | 3438 | { |
3436 | struct module *mod; | 3439 | struct module *mod; |
3437 | const char *ret = NULL; | 3440 | const char *ret = NULL; |
3438 | 3441 | ||
3439 | preempt_disable(); | 3442 | preempt_disable(); |
3440 | list_for_each_entry_rcu(mod, &modules, list) { | 3443 | list_for_each_entry_rcu(mod, &modules, list) { |
3441 | if (mod->state == MODULE_STATE_UNFORMED) | 3444 | if (mod->state == MODULE_STATE_UNFORMED) |
3442 | continue; | 3445 | continue; |
3443 | if (within_module_init(addr, mod) || | 3446 | if (within_module_init(addr, mod) || |
3444 | within_module_core(addr, mod)) { | 3447 | within_module_core(addr, mod)) { |
3445 | if (modname) | 3448 | if (modname) |
3446 | *modname = mod->name; | 3449 | *modname = mod->name; |
3447 | ret = get_ksymbol(mod, addr, size, offset); | 3450 | ret = get_ksymbol(mod, addr, size, offset); |
3448 | break; | 3451 | break; |
3449 | } | 3452 | } |
3450 | } | 3453 | } |
3451 | /* Make a copy in here where it's safe */ | 3454 | /* Make a copy in here where it's safe */ |
3452 | if (ret) { | 3455 | if (ret) { |
3453 | strncpy(namebuf, ret, KSYM_NAME_LEN - 1); | 3456 | strncpy(namebuf, ret, KSYM_NAME_LEN - 1); |
3454 | ret = namebuf; | 3457 | ret = namebuf; |
3455 | } | 3458 | } |
3456 | preempt_enable(); | 3459 | preempt_enable(); |
3457 | return ret; | 3460 | return ret; |
3458 | } | 3461 | } |
3459 | 3462 | ||
3460 | int lookup_module_symbol_name(unsigned long addr, char *symname) | 3463 | int lookup_module_symbol_name(unsigned long addr, char *symname) |
3461 | { | 3464 | { |
3462 | struct module *mod; | 3465 | struct module *mod; |
3463 | 3466 | ||
3464 | preempt_disable(); | 3467 | preempt_disable(); |
3465 | list_for_each_entry_rcu(mod, &modules, list) { | 3468 | list_for_each_entry_rcu(mod, &modules, list) { |
3466 | if (mod->state == MODULE_STATE_UNFORMED) | 3469 | if (mod->state == MODULE_STATE_UNFORMED) |
3467 | continue; | 3470 | continue; |
3468 | if (within_module_init(addr, mod) || | 3471 | if (within_module_init(addr, mod) || |
3469 | within_module_core(addr, mod)) { | 3472 | within_module_core(addr, mod)) { |
3470 | const char *sym; | 3473 | const char *sym; |
3471 | 3474 | ||
3472 | sym = get_ksymbol(mod, addr, NULL, NULL); | 3475 | sym = get_ksymbol(mod, addr, NULL, NULL); |
3473 | if (!sym) | 3476 | if (!sym) |
3474 | goto out; | 3477 | goto out; |
3475 | strlcpy(symname, sym, KSYM_NAME_LEN); | 3478 | strlcpy(symname, sym, KSYM_NAME_LEN); |
3476 | preempt_enable(); | 3479 | preempt_enable(); |
3477 | return 0; | 3480 | return 0; |
3478 | } | 3481 | } |
3479 | } | 3482 | } |
3480 | out: | 3483 | out: |
3481 | preempt_enable(); | 3484 | preempt_enable(); |
3482 | return -ERANGE; | 3485 | return -ERANGE; |
3483 | } | 3486 | } |
3484 | 3487 | ||
3485 | int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, | 3488 | int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, |
3486 | unsigned long *offset, char *modname, char *name) | 3489 | unsigned long *offset, char *modname, char *name) |
3487 | { | 3490 | { |
3488 | struct module *mod; | 3491 | struct module *mod; |
3489 | 3492 | ||
3490 | preempt_disable(); | 3493 | preempt_disable(); |
3491 | list_for_each_entry_rcu(mod, &modules, list) { | 3494 | list_for_each_entry_rcu(mod, &modules, list) { |
3492 | if (mod->state == MODULE_STATE_UNFORMED) | 3495 | if (mod->state == MODULE_STATE_UNFORMED) |
3493 | continue; | 3496 | continue; |
3494 | if (within_module_init(addr, mod) || | 3497 | if (within_module_init(addr, mod) || |
3495 | within_module_core(addr, mod)) { | 3498 | within_module_core(addr, mod)) { |
3496 | const char *sym; | 3499 | const char *sym; |
3497 | 3500 | ||
3498 | sym = get_ksymbol(mod, addr, size, offset); | 3501 | sym = get_ksymbol(mod, addr, size, offset); |
3499 | if (!sym) | 3502 | if (!sym) |
3500 | goto out; | 3503 | goto out; |
3501 | if (modname) | 3504 | if (modname) |
3502 | strlcpy(modname, mod->name, MODULE_NAME_LEN); | 3505 | strlcpy(modname, mod->name, MODULE_NAME_LEN); |
3503 | if (name) | 3506 | if (name) |
3504 | strlcpy(name, sym, KSYM_NAME_LEN); | 3507 | strlcpy(name, sym, KSYM_NAME_LEN); |
3505 | preempt_enable(); | 3508 | preempt_enable(); |
3506 | return 0; | 3509 | return 0; |
3507 | } | 3510 | } |
3508 | } | 3511 | } |
3509 | out: | 3512 | out: |
3510 | preempt_enable(); | 3513 | preempt_enable(); |
3511 | return -ERANGE; | 3514 | return -ERANGE; |
3512 | } | 3515 | } |
3513 | 3516 | ||
3514 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | 3517 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, |
3515 | char *name, char *module_name, int *exported) | 3518 | char *name, char *module_name, int *exported) |
3516 | { | 3519 | { |
3517 | struct module *mod; | 3520 | struct module *mod; |
3518 | 3521 | ||
3519 | preempt_disable(); | 3522 | preempt_disable(); |
3520 | list_for_each_entry_rcu(mod, &modules, list) { | 3523 | list_for_each_entry_rcu(mod, &modules, list) { |
3521 | if (mod->state == MODULE_STATE_UNFORMED) | 3524 | if (mod->state == MODULE_STATE_UNFORMED) |
3522 | continue; | 3525 | continue; |
3523 | if (symnum < mod->num_symtab) { | 3526 | if (symnum < mod->num_symtab) { |
3524 | *value = mod->symtab[symnum].st_value; | 3527 | *value = mod->symtab[symnum].st_value; |
3525 | *type = mod->symtab[symnum].st_info; | 3528 | *type = mod->symtab[symnum].st_info; |
3526 | strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, | 3529 | strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, |
3527 | KSYM_NAME_LEN); | 3530 | KSYM_NAME_LEN); |
3528 | strlcpy(module_name, mod->name, MODULE_NAME_LEN); | 3531 | strlcpy(module_name, mod->name, MODULE_NAME_LEN); |
3529 | *exported = is_exported(name, *value, mod); | 3532 | *exported = is_exported(name, *value, mod); |
3530 | preempt_enable(); | 3533 | preempt_enable(); |
3531 | return 0; | 3534 | return 0; |
3532 | } | 3535 | } |
3533 | symnum -= mod->num_symtab; | 3536 | symnum -= mod->num_symtab; |
3534 | } | 3537 | } |
3535 | preempt_enable(); | 3538 | preempt_enable(); |
3536 | return -ERANGE; | 3539 | return -ERANGE; |
3537 | } | 3540 | } |
3538 | 3541 | ||
3539 | static unsigned long mod_find_symname(struct module *mod, const char *name) | 3542 | static unsigned long mod_find_symname(struct module *mod, const char *name) |
3540 | { | 3543 | { |
3541 | unsigned int i; | 3544 | unsigned int i; |
3542 | 3545 | ||
3543 | for (i = 0; i < mod->num_symtab; i++) | 3546 | for (i = 0; i < mod->num_symtab; i++) |
3544 | if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && | 3547 | if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && |
3545 | mod->symtab[i].st_info != 'U') | 3548 | mod->symtab[i].st_info != 'U') |
3546 | return mod->symtab[i].st_value; | 3549 | return mod->symtab[i].st_value; |
3547 | return 0; | 3550 | return 0; |
3548 | } | 3551 | } |
3549 | 3552 | ||
3550 | /* Look for this name: can be of form module:name. */ | 3553 | /* Look for this name: can be of form module:name. */ |
3551 | unsigned long module_kallsyms_lookup_name(const char *name) | 3554 | unsigned long module_kallsyms_lookup_name(const char *name) |
3552 | { | 3555 | { |
3553 | struct module *mod; | 3556 | struct module *mod; |
3554 | char *colon; | 3557 | char *colon; |
3555 | unsigned long ret = 0; | 3558 | unsigned long ret = 0; |
3556 | 3559 | ||
3557 | /* Don't lock: we're in enough trouble already. */ | 3560 | /* Don't lock: we're in enough trouble already. */ |
3558 | preempt_disable(); | 3561 | preempt_disable(); |
3559 | if ((colon = strchr(name, ':')) != NULL) { | 3562 | if ((colon = strchr(name, ':')) != NULL) { |
3560 | if ((mod = find_module_all(name, colon - name, false)) != NULL) | 3563 | if ((mod = find_module_all(name, colon - name, false)) != NULL) |
3561 | ret = mod_find_symname(mod, colon+1); | 3564 | ret = mod_find_symname(mod, colon+1); |
3562 | } else { | 3565 | } else { |
3563 | list_for_each_entry_rcu(mod, &modules, list) { | 3566 | list_for_each_entry_rcu(mod, &modules, list) { |
3564 | if (mod->state == MODULE_STATE_UNFORMED) | 3567 | if (mod->state == MODULE_STATE_UNFORMED) |
3565 | continue; | 3568 | continue; |
3566 | if ((ret = mod_find_symname(mod, name)) != 0) | 3569 | if ((ret = mod_find_symname(mod, name)) != 0) |
3567 | break; | 3570 | break; |
3568 | } | 3571 | } |
3569 | } | 3572 | } |
3570 | preempt_enable(); | 3573 | preempt_enable(); |
3571 | return ret; | 3574 | return ret; |
3572 | } | 3575 | } |
3573 | 3576 | ||
3574 | int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | 3577 | int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, |
3575 | struct module *, unsigned long), | 3578 | struct module *, unsigned long), |
3576 | void *data) | 3579 | void *data) |
3577 | { | 3580 | { |
3578 | struct module *mod; | 3581 | struct module *mod; |
3579 | unsigned int i; | 3582 | unsigned int i; |
3580 | int ret; | 3583 | int ret; |
3581 | 3584 | ||
3582 | list_for_each_entry(mod, &modules, list) { | 3585 | list_for_each_entry(mod, &modules, list) { |
3583 | if (mod->state == MODULE_STATE_UNFORMED) | 3586 | if (mod->state == MODULE_STATE_UNFORMED) |
3584 | continue; | 3587 | continue; |
3585 | for (i = 0; i < mod->num_symtab; i++) { | 3588 | for (i = 0; i < mod->num_symtab; i++) { |
3586 | ret = fn(data, mod->strtab + mod->symtab[i].st_name, | 3589 | ret = fn(data, mod->strtab + mod->symtab[i].st_name, |
3587 | mod, mod->symtab[i].st_value); | 3590 | mod, mod->symtab[i].st_value); |
3588 | if (ret != 0) | 3591 | if (ret != 0) |
3589 | return ret; | 3592 | return ret; |
3590 | } | 3593 | } |
3591 | } | 3594 | } |
3592 | return 0; | 3595 | return 0; |
3593 | } | 3596 | } |
3594 | #endif /* CONFIG_KALLSYMS */ | 3597 | #endif /* CONFIG_KALLSYMS */ |
3595 | 3598 | ||
3596 | static char *module_flags(struct module *mod, char *buf) | 3599 | static char *module_flags(struct module *mod, char *buf) |
3597 | { | 3600 | { |
3598 | int bx = 0; | 3601 | int bx = 0; |
3599 | 3602 | ||
3600 | BUG_ON(mod->state == MODULE_STATE_UNFORMED); | 3603 | BUG_ON(mod->state == MODULE_STATE_UNFORMED); |
3601 | if (mod->taints || | 3604 | if (mod->taints || |
3602 | mod->state == MODULE_STATE_GOING || | 3605 | mod->state == MODULE_STATE_GOING || |
3603 | mod->state == MODULE_STATE_COMING) { | 3606 | mod->state == MODULE_STATE_COMING) { |
3604 | buf[bx++] = '('; | 3607 | buf[bx++] = '('; |
3605 | bx += module_flags_taint(mod, buf + bx); | 3608 | bx += module_flags_taint(mod, buf + bx); |
3606 | /* Show a - for module-is-being-unloaded */ | 3609 | /* Show a - for module-is-being-unloaded */ |
3607 | if (mod->state == MODULE_STATE_GOING) | 3610 | if (mod->state == MODULE_STATE_GOING) |
3608 | buf[bx++] = '-'; | 3611 | buf[bx++] = '-'; |
3609 | /* Show a + for module-is-being-loaded */ | 3612 | /* Show a + for module-is-being-loaded */ |
3610 | if (mod->state == MODULE_STATE_COMING) | 3613 | if (mod->state == MODULE_STATE_COMING) |
3611 | buf[bx++] = '+'; | 3614 | buf[bx++] = '+'; |
3612 | buf[bx++] = ')'; | 3615 | buf[bx++] = ')'; |
3613 | } | 3616 | } |
3614 | buf[bx] = '\0'; | 3617 | buf[bx] = '\0'; |
3615 | 3618 | ||
3616 | return buf; | 3619 | return buf; |
3617 | } | 3620 | } |
3618 | 3621 | ||
3619 | #ifdef CONFIG_PROC_FS | 3622 | #ifdef CONFIG_PROC_FS |
3620 | /* Called by the /proc file system to return a list of modules. */ | 3623 | /* Called by the /proc file system to return a list of modules. */ |
3621 | static void *m_start(struct seq_file *m, loff_t *pos) | 3624 | static void *m_start(struct seq_file *m, loff_t *pos) |
3622 | { | 3625 | { |
3623 | mutex_lock(&module_mutex); | 3626 | mutex_lock(&module_mutex); |
3624 | return seq_list_start(&modules, *pos); | 3627 | return seq_list_start(&modules, *pos); |
3625 | } | 3628 | } |
3626 | 3629 | ||
3627 | static void *m_next(struct seq_file *m, void *p, loff_t *pos) | 3630 | static void *m_next(struct seq_file *m, void *p, loff_t *pos) |
3628 | { | 3631 | { |
3629 | return seq_list_next(p, &modules, pos); | 3632 | return seq_list_next(p, &modules, pos); |
3630 | } | 3633 | } |
3631 | 3634 | ||
3632 | static void m_stop(struct seq_file *m, void *p) | 3635 | static void m_stop(struct seq_file *m, void *p) |
3633 | { | 3636 | { |
3634 | mutex_unlock(&module_mutex); | 3637 | mutex_unlock(&module_mutex); |
3635 | } | 3638 | } |
3636 | 3639 | ||
3637 | static int m_show(struct seq_file *m, void *p) | 3640 | static int m_show(struct seq_file *m, void *p) |
3638 | { | 3641 | { |
3639 | struct module *mod = list_entry(p, struct module, list); | 3642 | struct module *mod = list_entry(p, struct module, list); |
3640 | char buf[8]; | 3643 | char buf[8]; |
3641 | 3644 | ||
3642 | /* We always ignore unformed modules. */ | 3645 | /* We always ignore unformed modules. */ |
3643 | if (mod->state == MODULE_STATE_UNFORMED) | 3646 | if (mod->state == MODULE_STATE_UNFORMED) |
3644 | return 0; | 3647 | return 0; |
3645 | 3648 | ||
3646 | seq_printf(m, "%s %u", | 3649 | seq_printf(m, "%s %u", |
3647 | mod->name, mod->init_size + mod->core_size); | 3650 | mod->name, mod->init_size + mod->core_size); |
3648 | print_unload_info(m, mod); | 3651 | print_unload_info(m, mod); |
3649 | 3652 | ||
3650 | /* Informative for users. */ | 3653 | /* Informative for users. */ |
3651 | seq_printf(m, " %s", | 3654 | seq_printf(m, " %s", |
3652 | mod->state == MODULE_STATE_GOING ? "Unloading": | 3655 | mod->state == MODULE_STATE_GOING ? "Unloading": |
3653 | mod->state == MODULE_STATE_COMING ? "Loading": | 3656 | mod->state == MODULE_STATE_COMING ? "Loading": |
3654 | "Live"); | 3657 | "Live"); |
3655 | /* Used by oprofile and other similar tools. */ | 3658 | /* Used by oprofile and other similar tools. */ |
3656 | seq_printf(m, " 0x%pK", mod->module_core); | 3659 | seq_printf(m, " 0x%pK", mod->module_core); |
3657 | 3660 | ||
3658 | /* Taints info */ | 3661 | /* Taints info */ |
3659 | if (mod->taints) | 3662 | if (mod->taints) |
3660 | seq_printf(m, " %s", module_flags(mod, buf)); | 3663 | seq_printf(m, " %s", module_flags(mod, buf)); |
3661 | 3664 | ||
3662 | seq_printf(m, "\n"); | 3665 | seq_printf(m, "\n"); |
3663 | return 0; | 3666 | return 0; |
3664 | } | 3667 | } |
3665 | 3668 | ||
3666 | /* Format: modulename size refcount deps address | 3669 | /* Format: modulename size refcount deps address |
3667 | 3670 | ||
3668 | Where refcount is a number or -, and deps is a comma-separated list | 3671 | Where refcount is a number or -, and deps is a comma-separated list |
3669 | of depends or -. | 3672 | of depends or -. |
3670 | */ | 3673 | */ |
3671 | static const struct seq_operations modules_op = { | 3674 | static const struct seq_operations modules_op = { |
3672 | .start = m_start, | 3675 | .start = m_start, |
3673 | .next = m_next, | 3676 | .next = m_next, |
3674 | .stop = m_stop, | 3677 | .stop = m_stop, |
3675 | .show = m_show | 3678 | .show = m_show |
3676 | }; | 3679 | }; |
3677 | 3680 | ||
3678 | static int modules_open(struct inode *inode, struct file *file) | 3681 | static int modules_open(struct inode *inode, struct file *file) |
3679 | { | 3682 | { |
3680 | return seq_open(file, &modules_op); | 3683 | return seq_open(file, &modules_op); |
3681 | } | 3684 | } |
3682 | 3685 | ||
3683 | static const struct file_operations proc_modules_operations = { | 3686 | static const struct file_operations proc_modules_operations = { |
3684 | .open = modules_open, | 3687 | .open = modules_open, |
3685 | .read = seq_read, | 3688 | .read = seq_read, |
3686 | .llseek = seq_lseek, | 3689 | .llseek = seq_lseek, |
3687 | .release = seq_release, | 3690 | .release = seq_release, |
3688 | }; | 3691 | }; |
3689 | 3692 | ||
3690 | static int __init proc_modules_init(void) | 3693 | static int __init proc_modules_init(void) |
3691 | { | 3694 | { |
3692 | proc_create("modules", 0, NULL, &proc_modules_operations); | 3695 | proc_create("modules", 0, NULL, &proc_modules_operations); |
3693 | return 0; | 3696 | return 0; |
3694 | } | 3697 | } |
3695 | module_init(proc_modules_init); | 3698 | module_init(proc_modules_init); |
3696 | #endif | 3699 | #endif |
3697 | 3700 | ||
3698 | /* Given an address, look for it in the module exception tables. */ | 3701 | /* Given an address, look for it in the module exception tables. */ |
3699 | const struct exception_table_entry *search_module_extables(unsigned long addr) | 3702 | const struct exception_table_entry *search_module_extables(unsigned long addr) |
3700 | { | 3703 | { |
3701 | const struct exception_table_entry *e = NULL; | 3704 | const struct exception_table_entry *e = NULL; |
3702 | struct module *mod; | 3705 | struct module *mod; |
3703 | 3706 | ||
3704 | preempt_disable(); | 3707 | preempt_disable(); |
3705 | list_for_each_entry_rcu(mod, &modules, list) { | 3708 | list_for_each_entry_rcu(mod, &modules, list) { |
3706 | if (mod->state == MODULE_STATE_UNFORMED) | 3709 | if (mod->state == MODULE_STATE_UNFORMED) |
3707 | continue; | 3710 | continue; |
3708 | if (mod->num_exentries == 0) | 3711 | if (mod->num_exentries == 0) |
3709 | continue; | 3712 | continue; |
3710 | 3713 | ||
3711 | e = search_extable(mod->extable, | 3714 | e = search_extable(mod->extable, |
3712 | mod->extable + mod->num_exentries - 1, | 3715 | mod->extable + mod->num_exentries - 1, |
3713 | addr); | 3716 | addr); |
3714 | if (e) | 3717 | if (e) |
3715 | break; | 3718 | break; |
3716 | } | 3719 | } |
3717 | preempt_enable(); | 3720 | preempt_enable(); |
3718 | 3721 | ||
3719 | /* Now, if we found one, we are running inside it now, hence | 3722 | /* Now, if we found one, we are running inside it now, hence |
3720 | we cannot unload the module, hence no refcnt needed. */ | 3723 | we cannot unload the module, hence no refcnt needed. */ |
3721 | return e; | 3724 | return e; |
3722 | } | 3725 | } |
3723 | 3726 | ||
3724 | /* | 3727 | /* |
3725 | * is_module_address - is this address inside a module? | 3728 | * is_module_address - is this address inside a module? |
3726 | * @addr: the address to check. | 3729 | * @addr: the address to check. |
3727 | * | 3730 | * |
3728 | * See is_module_text_address() if you simply want to see if the address | 3731 | * See is_module_text_address() if you simply want to see if the address |
3729 | * is code (not data). | 3732 | * is code (not data). |
3730 | */ | 3733 | */ |
3731 | bool is_module_address(unsigned long addr) | 3734 | bool is_module_address(unsigned long addr) |
3732 | { | 3735 | { |
3733 | bool ret; | 3736 | bool ret; |
3734 | 3737 | ||
3735 | preempt_disable(); | 3738 | preempt_disable(); |
3736 | ret = __module_address(addr) != NULL; | 3739 | ret = __module_address(addr) != NULL; |
3737 | preempt_enable(); | 3740 | preempt_enable(); |
3738 | 3741 | ||
3739 | return ret; | 3742 | return ret; |
3740 | } | 3743 | } |
3741 | 3744 | ||
3742 | /* | 3745 | /* |
3743 | * __module_address - get the module which contains an address. | 3746 | * __module_address - get the module which contains an address. |
3744 | * @addr: the address. | 3747 | * @addr: the address. |
3745 | * | 3748 | * |
3746 | * Must be called with preempt disabled or module mutex held so that | 3749 | * Must be called with preempt disabled or module mutex held so that |
3747 | * module doesn't get freed during this. | 3750 | * module doesn't get freed during this. |
3748 | */ | 3751 | */ |
3749 | struct module *__module_address(unsigned long addr) | 3752 | struct module *__module_address(unsigned long addr) |
3750 | { | 3753 | { |
3751 | struct module *mod; | 3754 | struct module *mod; |
3752 | 3755 | ||
3753 | if (addr < module_addr_min || addr > module_addr_max) | 3756 | if (addr < module_addr_min || addr > module_addr_max) |
3754 | return NULL; | 3757 | return NULL; |
3755 | 3758 | ||
3756 | list_for_each_entry_rcu(mod, &modules, list) { | 3759 | list_for_each_entry_rcu(mod, &modules, list) { |
3757 | if (mod->state == MODULE_STATE_UNFORMED) | 3760 | if (mod->state == MODULE_STATE_UNFORMED) |
3758 | continue; | 3761 | continue; |
3759 | if (within_module_core(addr, mod) | 3762 | if (within_module_core(addr, mod) |
3760 | || within_module_init(addr, mod)) | 3763 | || within_module_init(addr, mod)) |
3761 | return mod; | 3764 | return mod; |
3762 | } | 3765 | } |
3763 | return NULL; | 3766 | return NULL; |
3764 | } | 3767 | } |
3765 | EXPORT_SYMBOL_GPL(__module_address); | 3768 | EXPORT_SYMBOL_GPL(__module_address); |
3766 | 3769 | ||
3767 | /* | 3770 | /* |
3768 | * is_module_text_address - is this address inside module code? | 3771 | * is_module_text_address - is this address inside module code? |
3769 | * @addr: the address to check. | 3772 | * @addr: the address to check. |
3770 | * | 3773 | * |
3771 | * See is_module_address() if you simply want to see if the address is | 3774 | * See is_module_address() if you simply want to see if the address is |
3772 | * anywhere in a module. See kernel_text_address() for testing if an | 3775 | * anywhere in a module. See kernel_text_address() for testing if an |
3773 | * address corresponds to kernel or module code. | 3776 | * address corresponds to kernel or module code. |
3774 | */ | 3777 | */ |
3775 | bool is_module_text_address(unsigned long addr) | 3778 | bool is_module_text_address(unsigned long addr) |
3776 | { | 3779 | { |
3777 | bool ret; | 3780 | bool ret; |
3778 | 3781 | ||
3779 | preempt_disable(); | 3782 | preempt_disable(); |
3780 | ret = __module_text_address(addr) != NULL; | 3783 | ret = __module_text_address(addr) != NULL; |
3781 | preempt_enable(); | 3784 | preempt_enable(); |
3782 | 3785 | ||
3783 | return ret; | 3786 | return ret; |
3784 | } | 3787 | } |
3785 | 3788 | ||
3786 | /* | 3789 | /* |
3787 | * __module_text_address - get the module whose code contains an address. | 3790 | * __module_text_address - get the module whose code contains an address. |
3788 | * @addr: the address. | 3791 | * @addr: the address. |
3789 | * | 3792 | * |
3790 | * Must be called with preempt disabled or module mutex held so that | 3793 | * Must be called with preempt disabled or module mutex held so that |
3791 | * module doesn't get freed during this. | 3794 | * module doesn't get freed during this. |
3792 | */ | 3795 | */ |
3793 | struct module *__module_text_address(unsigned long addr) | 3796 | struct module *__module_text_address(unsigned long addr) |
3794 | { | 3797 | { |
3795 | struct module *mod = __module_address(addr); | 3798 | struct module *mod = __module_address(addr); |
3796 | if (mod) { | 3799 | if (mod) { |
3797 | /* Make sure it's within the text section. */ | 3800 | /* Make sure it's within the text section. */ |
3798 | if (!within(addr, mod->module_init, mod->init_text_size) | 3801 | if (!within(addr, mod->module_init, mod->init_text_size) |
3799 | && !within(addr, mod->module_core, mod->core_text_size)) | 3802 | && !within(addr, mod->module_core, mod->core_text_size)) |
3800 | mod = NULL; | 3803 | mod = NULL; |
3801 | } | 3804 | } |
3802 | return mod; | 3805 | return mod; |
3803 | } | 3806 | } |
3804 | EXPORT_SYMBOL_GPL(__module_text_address); | 3807 | EXPORT_SYMBOL_GPL(__module_text_address); |
3805 | 3808 | ||
3806 | /* Don't grab lock, we're oopsing. */ | 3809 | /* Don't grab lock, we're oopsing. */ |
3807 | void print_modules(void) | 3810 | void print_modules(void) |
3808 | { | 3811 | { |
3809 | struct module *mod; | 3812 | struct module *mod; |
3810 | char buf[8]; | 3813 | char buf[8]; |
3811 | 3814 | ||
3812 | printk(KERN_DEFAULT "Modules linked in:"); | 3815 | printk(KERN_DEFAULT "Modules linked in:"); |
3813 | /* Most callers should already have preempt disabled, but make sure */ | 3816 | /* Most callers should already have preempt disabled, but make sure */ |
3814 | preempt_disable(); | 3817 | preempt_disable(); |
3815 | list_for_each_entry_rcu(mod, &modules, list) { | 3818 | list_for_each_entry_rcu(mod, &modules, list) { |
3816 | if (mod->state == MODULE_STATE_UNFORMED) | 3819 | if (mod->state == MODULE_STATE_UNFORMED) |
3817 | continue; | 3820 | continue; |
3818 | pr_cont(" %s%s", mod->name, module_flags(mod, buf)); | 3821 | pr_cont(" %s%s", mod->name, module_flags(mod, buf)); |
3819 | } | 3822 | } |
3820 | preempt_enable(); | 3823 | preempt_enable(); |
3821 | if (last_unloaded_module[0]) | 3824 | if (last_unloaded_module[0]) |
3822 | pr_cont(" [last unloaded: %s]", last_unloaded_module); | 3825 | pr_cont(" [last unloaded: %s]", last_unloaded_module); |
3823 | pr_cont("\n"); | 3826 | pr_cont("\n"); |
3824 | } | 3827 | } |
3825 | 3828 | ||
3826 | #ifdef CONFIG_MODVERSIONS | 3829 | #ifdef CONFIG_MODVERSIONS |
3827 | /* Generate the signature for all relevant module structures here. | 3830 | /* Generate the signature for all relevant module structures here. |
3828 | * If these change, we don't want to try to parse the module. */ | 3831 | * If these change, we don't want to try to parse the module. */ |
3829 | void module_layout(struct module *mod, | 3832 | void module_layout(struct module *mod, |
3830 | struct modversion_info *ver, | 3833 | struct modversion_info *ver, |
3831 | struct kernel_param *kp, | 3834 | struct kernel_param *kp, |
3832 | struct kernel_symbol *ks, | 3835 | struct kernel_symbol *ks, |
3833 | struct tracepoint * const *tp) | 3836 | struct tracepoint * const *tp) |
3834 | { | 3837 | { |
3835 | } | 3838 | } |
3836 | EXPORT_SYMBOL(module_layout); | 3839 | EXPORT_SYMBOL(module_layout); |
3837 | #endif | 3840 | #endif |
3838 | 3841 |
kernel/trace/ftrace.c
1 | /* | 1 | /* |
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. | 2 | * Infrastructure for profiling code inserted by 'gcc -pg'. |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> |
6 | * | 6 | * |
7 | * Originally ported from the -rt patch by: | 7 | * Originally ported from the -rt patch by: |
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | 8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
9 | * | 9 | * |
10 | * Based on code in the latency_tracer, that is: | 10 | * Based on code in the latency_tracer, that is: |
11 | * | 11 | * |
12 | * Copyright (C) 2004-2006 Ingo Molnar | 12 | * Copyright (C) 2004-2006 Ingo Molnar |
13 | * Copyright (C) 2004 Nadia Yvette Chambers | 13 | * Copyright (C) 2004 Nadia Yvette Chambers |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/stop_machine.h> | 16 | #include <linux/stop_machine.h> |
17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
21 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
22 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
23 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
24 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | #include <linux/bsearch.h> | 25 | #include <linux/bsearch.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/ftrace.h> | 27 | #include <linux/ftrace.h> |
28 | #include <linux/sysctl.h> | 28 | #include <linux/sysctl.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
31 | #include <linux/sort.h> | 31 | #include <linux/sort.h> |
32 | #include <linux/list.h> | 32 | #include <linux/list.h> |
33 | #include <linux/hash.h> | 33 | #include <linux/hash.h> |
34 | #include <linux/rcupdate.h> | 34 | #include <linux/rcupdate.h> |
35 | 35 | ||
36 | #include <trace/events/sched.h> | 36 | #include <trace/events/sched.h> |
37 | 37 | ||
38 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
39 | 39 | ||
40 | #include "trace_output.h" | 40 | #include "trace_output.h" |
41 | #include "trace_stat.h" | 41 | #include "trace_stat.h" |
42 | 42 | ||
43 | #define FTRACE_WARN_ON(cond) \ | 43 | #define FTRACE_WARN_ON(cond) \ |
44 | ({ \ | 44 | ({ \ |
45 | int ___r = cond; \ | 45 | int ___r = cond; \ |
46 | if (WARN_ON(___r)) \ | 46 | if (WARN_ON(___r)) \ |
47 | ftrace_kill(); \ | 47 | ftrace_kill(); \ |
48 | ___r; \ | 48 | ___r; \ |
49 | }) | 49 | }) |
50 | 50 | ||
51 | #define FTRACE_WARN_ON_ONCE(cond) \ | 51 | #define FTRACE_WARN_ON_ONCE(cond) \ |
52 | ({ \ | 52 | ({ \ |
53 | int ___r = cond; \ | 53 | int ___r = cond; \ |
54 | if (WARN_ON_ONCE(___r)) \ | 54 | if (WARN_ON_ONCE(___r)) \ |
55 | ftrace_kill(); \ | 55 | ftrace_kill(); \ |
56 | ___r; \ | 56 | ___r; \ |
57 | }) | 57 | }) |
58 | 58 | ||
59 | /* hash bits for specific function selection */ | 59 | /* hash bits for specific function selection */ |
60 | #define FTRACE_HASH_BITS 7 | 60 | #define FTRACE_HASH_BITS 7 |
61 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | 61 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
62 | #define FTRACE_HASH_DEFAULT_BITS 10 | 62 | #define FTRACE_HASH_DEFAULT_BITS 10 |
63 | #define FTRACE_HASH_MAX_BITS 12 | 63 | #define FTRACE_HASH_MAX_BITS 12 |
64 | 64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) |
66 | 66 | ||
67 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
68 | #define INIT_REGEX_LOCK(opsname) \ | 68 | #define INIT_REGEX_LOCK(opsname) \ |
69 | .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), | 69 | .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), |
70 | #else | 70 | #else |
71 | #define INIT_REGEX_LOCK(opsname) | 71 | #define INIT_REGEX_LOCK(opsname) |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 74 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
75 | .func = ftrace_stub, | 75 | .func = ftrace_stub, |
76 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | 76 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | /* ftrace_enabled is a method to turn ftrace on or off */ | 79 | /* ftrace_enabled is a method to turn ftrace on or off */ |
80 | int ftrace_enabled __read_mostly; | 80 | int ftrace_enabled __read_mostly; |
81 | static int last_ftrace_enabled; | 81 | static int last_ftrace_enabled; |
82 | 82 | ||
83 | /* Quick disabling of function tracer. */ | 83 | /* Quick disabling of function tracer. */ |
84 | int function_trace_stop __read_mostly; | 84 | int function_trace_stop __read_mostly; |
85 | 85 | ||
86 | /* Current function tracing op */ | 86 | /* Current function tracing op */ |
87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | 87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; |
88 | /* What to set function_trace_op to */ | 88 | /* What to set function_trace_op to */ |
89 | static struct ftrace_ops *set_function_trace_op; | 89 | static struct ftrace_ops *set_function_trace_op; |
90 | 90 | ||
91 | /* List for set_ftrace_pid's pids. */ | 91 | /* List for set_ftrace_pid's pids. */ |
92 | LIST_HEAD(ftrace_pids); | 92 | LIST_HEAD(ftrace_pids); |
93 | struct ftrace_pid { | 93 | struct ftrace_pid { |
94 | struct list_head list; | 94 | struct list_head list; |
95 | struct pid *pid; | 95 | struct pid *pid; |
96 | }; | 96 | }; |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * ftrace_disabled is set when an anomaly is discovered. | 99 | * ftrace_disabled is set when an anomaly is discovered. |
100 | * ftrace_disabled is much stronger than ftrace_enabled. | 100 | * ftrace_disabled is much stronger than ftrace_enabled. |
101 | */ | 101 | */ |
102 | static int ftrace_disabled __read_mostly; | 102 | static int ftrace_disabled __read_mostly; |
103 | 103 | ||
104 | static DEFINE_MUTEX(ftrace_lock); | 104 | static DEFINE_MUTEX(ftrace_lock); |
105 | 105 | ||
106 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | 106 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
107 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 107 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
108 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 108 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
109 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 109 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
110 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 110 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
111 | static struct ftrace_ops global_ops; | 111 | static struct ftrace_ops global_ops; |
112 | static struct ftrace_ops control_ops; | 112 | static struct ftrace_ops control_ops; |
113 | 113 | ||
114 | #if ARCH_SUPPORTS_FTRACE_OPS | 114 | #if ARCH_SUPPORTS_FTRACE_OPS |
115 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 115 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
116 | struct ftrace_ops *op, struct pt_regs *regs); | 116 | struct ftrace_ops *op, struct pt_regs *regs); |
117 | #else | 117 | #else |
118 | /* See comment below, where ftrace_ops_list_func is defined */ | 118 | /* See comment below, where ftrace_ops_list_func is defined */ |
119 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | 119 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); |
120 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | 120 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) |
121 | #endif | 121 | #endif |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | 124 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
125 | * can use rcu_dereference_raw_notrace() is that elements removed from this list | 125 | * can use rcu_dereference_raw_notrace() is that elements removed from this list |
126 | * are simply leaked, so there is no need to interact with a grace-period | 126 | * are simply leaked, so there is no need to interact with a grace-period |
127 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle | 127 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle |
128 | * concurrent insertions into the ftrace_global_list. | 128 | * concurrent insertions into the ftrace_global_list. |
129 | * | 129 | * |
130 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 130 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
131 | */ | 131 | */ |
132 | #define do_for_each_ftrace_op(op, list) \ | 132 | #define do_for_each_ftrace_op(op, list) \ |
133 | op = rcu_dereference_raw_notrace(list); \ | 133 | op = rcu_dereference_raw_notrace(list); \ |
134 | do | 134 | do |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Optimized for just a single item in the list (as that is the normal case). | 137 | * Optimized for just a single item in the list (as that is the normal case). |
138 | */ | 138 | */ |
139 | #define while_for_each_ftrace_op(op) \ | 139 | #define while_for_each_ftrace_op(op) \ |
140 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ | 140 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ |
141 | unlikely((op) != &ftrace_list_end)) | 141 | unlikely((op) != &ftrace_list_end)) |
142 | 142 | ||
143 | static inline void ftrace_ops_init(struct ftrace_ops *ops) | 143 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
144 | { | 144 | { |
145 | #ifdef CONFIG_DYNAMIC_FTRACE | 145 | #ifdef CONFIG_DYNAMIC_FTRACE |
146 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | 146 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
147 | mutex_init(&ops->regex_lock); | 147 | mutex_init(&ops->regex_lock); |
148 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; | 148 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
149 | } | 149 | } |
150 | #endif | 150 | #endif |
151 | } | 151 | } |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * ftrace_nr_registered_ops - return number of ops registered | 154 | * ftrace_nr_registered_ops - return number of ops registered |
155 | * | 155 | * |
156 | * Returns the number of ftrace_ops registered and tracing functions | 156 | * Returns the number of ftrace_ops registered and tracing functions |
157 | */ | 157 | */ |
158 | int ftrace_nr_registered_ops(void) | 158 | int ftrace_nr_registered_ops(void) |
159 | { | 159 | { |
160 | struct ftrace_ops *ops; | 160 | struct ftrace_ops *ops; |
161 | int cnt = 0; | 161 | int cnt = 0; |
162 | 162 | ||
163 | mutex_lock(&ftrace_lock); | 163 | mutex_lock(&ftrace_lock); |
164 | 164 | ||
165 | for (ops = ftrace_ops_list; | 165 | for (ops = ftrace_ops_list; |
166 | ops != &ftrace_list_end; ops = ops->next) | 166 | ops != &ftrace_list_end; ops = ops->next) |
167 | cnt++; | 167 | cnt++; |
168 | 168 | ||
169 | mutex_unlock(&ftrace_lock); | 169 | mutex_unlock(&ftrace_lock); |
170 | 170 | ||
171 | return cnt; | 171 | return cnt; |
172 | } | 172 | } |
173 | 173 | ||
174 | static void | 174 | static void |
175 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, | 175 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, |
176 | struct ftrace_ops *op, struct pt_regs *regs) | 176 | struct ftrace_ops *op, struct pt_regs *regs) |
177 | { | 177 | { |
178 | int bit; | 178 | int bit; |
179 | 179 | ||
180 | bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX); | 180 | bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX); |
181 | if (bit < 0) | 181 | if (bit < 0) |
182 | return; | 182 | return; |
183 | 183 | ||
184 | do_for_each_ftrace_op(op, ftrace_global_list) { | 184 | do_for_each_ftrace_op(op, ftrace_global_list) { |
185 | op->func(ip, parent_ip, op, regs); | 185 | op->func(ip, parent_ip, op, regs); |
186 | } while_for_each_ftrace_op(op); | 186 | } while_for_each_ftrace_op(op); |
187 | 187 | ||
188 | trace_clear_recursion(bit); | 188 | trace_clear_recursion(bit); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | 191 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
192 | struct ftrace_ops *op, struct pt_regs *regs) | 192 | struct ftrace_ops *op, struct pt_regs *regs) |
193 | { | 193 | { |
194 | if (!test_tsk_trace_trace(current)) | 194 | if (!test_tsk_trace_trace(current)) |
195 | return; | 195 | return; |
196 | 196 | ||
197 | ftrace_pid_function(ip, parent_ip, op, regs); | 197 | ftrace_pid_function(ip, parent_ip, op, regs); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void set_ftrace_pid_function(ftrace_func_t func) | 200 | static void set_ftrace_pid_function(ftrace_func_t func) |
201 | { | 201 | { |
202 | /* do not set ftrace_pid_function to itself! */ | 202 | /* do not set ftrace_pid_function to itself! */ |
203 | if (func != ftrace_pid_func) | 203 | if (func != ftrace_pid_func) |
204 | ftrace_pid_function = func; | 204 | ftrace_pid_function = func; |
205 | } | 205 | } |
206 | 206 | ||
207 | /** | 207 | /** |
208 | * clear_ftrace_function - reset the ftrace function | 208 | * clear_ftrace_function - reset the ftrace function |
209 | * | 209 | * |
210 | * This NULLs the ftrace function and in essence stops | 210 | * This NULLs the ftrace function and in essence stops |
211 | * tracing. There may be lag | 211 | * tracing. There may be lag |
212 | */ | 212 | */ |
213 | void clear_ftrace_function(void) | 213 | void clear_ftrace_function(void) |
214 | { | 214 | { |
215 | ftrace_trace_function = ftrace_stub; | 215 | ftrace_trace_function = ftrace_stub; |
216 | ftrace_pid_function = ftrace_stub; | 216 | ftrace_pid_function = ftrace_stub; |
217 | } | 217 | } |
218 | 218 | ||
219 | static void control_ops_disable_all(struct ftrace_ops *ops) | 219 | static void control_ops_disable_all(struct ftrace_ops *ops) |
220 | { | 220 | { |
221 | int cpu; | 221 | int cpu; |
222 | 222 | ||
223 | for_each_possible_cpu(cpu) | 223 | for_each_possible_cpu(cpu) |
224 | *per_cpu_ptr(ops->disabled, cpu) = 1; | 224 | *per_cpu_ptr(ops->disabled, cpu) = 1; |
225 | } | 225 | } |
226 | 226 | ||
227 | static int control_ops_alloc(struct ftrace_ops *ops) | 227 | static int control_ops_alloc(struct ftrace_ops *ops) |
228 | { | 228 | { |
229 | int __percpu *disabled; | 229 | int __percpu *disabled; |
230 | 230 | ||
231 | disabled = alloc_percpu(int); | 231 | disabled = alloc_percpu(int); |
232 | if (!disabled) | 232 | if (!disabled) |
233 | return -ENOMEM; | 233 | return -ENOMEM; |
234 | 234 | ||
235 | ops->disabled = disabled; | 235 | ops->disabled = disabled; |
236 | control_ops_disable_all(ops); | 236 | control_ops_disable_all(ops); |
237 | return 0; | 237 | return 0; |
238 | } | 238 | } |
239 | 239 | ||
240 | static void update_global_ops(void) | 240 | static void update_global_ops(void) |
241 | { | 241 | { |
242 | ftrace_func_t func = ftrace_global_list_func; | 242 | ftrace_func_t func = ftrace_global_list_func; |
243 | void *private = NULL; | 243 | void *private = NULL; |
244 | 244 | ||
245 | /* The list has its own recursion protection. */ | 245 | /* The list has its own recursion protection. */ |
246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | 246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; |
247 | 247 | ||
248 | /* | 248 | /* |
249 | * If there's only one function registered, then call that | 249 | * If there's only one function registered, then call that |
250 | * function directly. Otherwise, we need to iterate over the | 250 | * function directly. Otherwise, we need to iterate over the |
251 | * registered callers. | 251 | * registered callers. |
252 | */ | 252 | */ |
253 | if (ftrace_global_list == &ftrace_list_end || | 253 | if (ftrace_global_list == &ftrace_list_end || |
254 | ftrace_global_list->next == &ftrace_list_end) { | 254 | ftrace_global_list->next == &ftrace_list_end) { |
255 | func = ftrace_global_list->func; | 255 | func = ftrace_global_list->func; |
256 | private = ftrace_global_list->private; | 256 | private = ftrace_global_list->private; |
257 | /* | 257 | /* |
258 | * As we are calling the function directly. | 258 | * As we are calling the function directly. |
259 | * If it does not have recursion protection, | 259 | * If it does not have recursion protection, |
260 | * the function_trace_op needs to be updated | 260 | * the function_trace_op needs to be updated |
261 | * accordingly. | 261 | * accordingly. |
262 | */ | 262 | */ |
263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) | 263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) |
264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | 264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; |
265 | } | 265 | } |
266 | 266 | ||
267 | /* If we filter on pids, update to use the pid function */ | 267 | /* If we filter on pids, update to use the pid function */ |
268 | if (!list_empty(&ftrace_pids)) { | 268 | if (!list_empty(&ftrace_pids)) { |
269 | set_ftrace_pid_function(func); | 269 | set_ftrace_pid_function(func); |
270 | func = ftrace_pid_func; | 270 | func = ftrace_pid_func; |
271 | } | 271 | } |
272 | 272 | ||
273 | global_ops.func = func; | 273 | global_ops.func = func; |
274 | global_ops.private = private; | 274 | global_ops.private = private; |
275 | } | 275 | } |
276 | 276 | ||
277 | static void ftrace_sync(struct work_struct *work) | 277 | static void ftrace_sync(struct work_struct *work) |
278 | { | 278 | { |
279 | /* | 279 | /* |
280 | * This function is just a stub to implement a hard force | 280 | * This function is just a stub to implement a hard force |
281 | * of synchronize_sched(). This requires synchronizing | 281 | * of synchronize_sched(). This requires synchronizing |
282 | * tasks even in userspace and idle. | 282 | * tasks even in userspace and idle. |
283 | * | 283 | * |
284 | * Yes, function tracing is rude. | 284 | * Yes, function tracing is rude. |
285 | */ | 285 | */ |
286 | } | 286 | } |
287 | 287 | ||
288 | static void ftrace_sync_ipi(void *data) | 288 | static void ftrace_sync_ipi(void *data) |
289 | { | 289 | { |
290 | /* Probably not needed, but do it anyway */ | 290 | /* Probably not needed, but do it anyway */ |
291 | smp_rmb(); | 291 | smp_rmb(); |
292 | } | 292 | } |
293 | 293 | ||
294 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 294 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
295 | static void update_function_graph_func(void); | 295 | static void update_function_graph_func(void); |
296 | #else | 296 | #else |
297 | static inline void update_function_graph_func(void) { } | 297 | static inline void update_function_graph_func(void) { } |
298 | #endif | 298 | #endif |
299 | 299 | ||
300 | static void update_ftrace_function(void) | 300 | static void update_ftrace_function(void) |
301 | { | 301 | { |
302 | ftrace_func_t func; | 302 | ftrace_func_t func; |
303 | 303 | ||
304 | update_global_ops(); | 304 | update_global_ops(); |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * If we are at the end of the list and this ops is | 307 | * If we are at the end of the list and this ops is |
308 | * recursion safe and not dynamic and the arch supports passing ops, | 308 | * recursion safe and not dynamic and the arch supports passing ops, |
309 | * then have the mcount trampoline call the function directly. | 309 | * then have the mcount trampoline call the function directly. |
310 | */ | 310 | */ |
311 | if (ftrace_ops_list == &ftrace_list_end || | 311 | if (ftrace_ops_list == &ftrace_list_end || |
312 | (ftrace_ops_list->next == &ftrace_list_end && | 312 | (ftrace_ops_list->next == &ftrace_list_end && |
313 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && | 313 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && |
314 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && | 314 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && |
315 | !FTRACE_FORCE_LIST_FUNC)) { | 315 | !FTRACE_FORCE_LIST_FUNC)) { |
316 | /* Set the ftrace_ops that the arch callback uses */ | 316 | /* Set the ftrace_ops that the arch callback uses */ |
317 | if (ftrace_ops_list == &global_ops) | 317 | if (ftrace_ops_list == &global_ops) |
318 | set_function_trace_op = ftrace_global_list; | 318 | set_function_trace_op = ftrace_global_list; |
319 | else | 319 | else |
320 | set_function_trace_op = ftrace_ops_list; | 320 | set_function_trace_op = ftrace_ops_list; |
321 | func = ftrace_ops_list->func; | 321 | func = ftrace_ops_list->func; |
322 | } else { | 322 | } else { |
323 | /* Just use the default ftrace_ops */ | 323 | /* Just use the default ftrace_ops */ |
324 | set_function_trace_op = &ftrace_list_end; | 324 | set_function_trace_op = &ftrace_list_end; |
325 | func = ftrace_ops_list_func; | 325 | func = ftrace_ops_list_func; |
326 | } | 326 | } |
327 | 327 | ||
328 | /* If there's no change, then do nothing more here */ | 328 | /* If there's no change, then do nothing more here */ |
329 | if (ftrace_trace_function == func) | 329 | if (ftrace_trace_function == func) |
330 | return; | 330 | return; |
331 | 331 | ||
332 | update_function_graph_func(); | 332 | update_function_graph_func(); |
333 | 333 | ||
334 | /* | 334 | /* |
335 | * If we are using the list function, it doesn't care | 335 | * If we are using the list function, it doesn't care |
336 | * about the function_trace_ops. | 336 | * about the function_trace_ops. |
337 | */ | 337 | */ |
338 | if (func == ftrace_ops_list_func) { | 338 | if (func == ftrace_ops_list_func) { |
339 | ftrace_trace_function = func; | 339 | ftrace_trace_function = func; |
340 | /* | 340 | /* |
341 | * Don't even bother setting function_trace_ops, | 341 | * Don't even bother setting function_trace_ops, |
342 | * it would be racy to do so anyway. | 342 | * it would be racy to do so anyway. |
343 | */ | 343 | */ |
344 | return; | 344 | return; |
345 | } | 345 | } |
346 | 346 | ||
347 | #ifndef CONFIG_DYNAMIC_FTRACE | 347 | #ifndef CONFIG_DYNAMIC_FTRACE |
348 | /* | 348 | /* |
349 | * For static tracing, we need to be a bit more careful. | 349 | * For static tracing, we need to be a bit more careful. |
350 | * The function change takes affect immediately. Thus, | 350 | * The function change takes affect immediately. Thus, |
351 | * we need to coorditate the setting of the function_trace_ops | 351 | * we need to coorditate the setting of the function_trace_ops |
352 | * with the setting of the ftrace_trace_function. | 352 | * with the setting of the ftrace_trace_function. |
353 | * | 353 | * |
354 | * Set the function to the list ops, which will call the | 354 | * Set the function to the list ops, which will call the |
355 | * function we want, albeit indirectly, but it handles the | 355 | * function we want, albeit indirectly, but it handles the |
356 | * ftrace_ops and doesn't depend on function_trace_op. | 356 | * ftrace_ops and doesn't depend on function_trace_op. |
357 | */ | 357 | */ |
358 | ftrace_trace_function = ftrace_ops_list_func; | 358 | ftrace_trace_function = ftrace_ops_list_func; |
359 | /* | 359 | /* |
360 | * Make sure all CPUs see this. Yes this is slow, but static | 360 | * Make sure all CPUs see this. Yes this is slow, but static |
361 | * tracing is slow and nasty to have enabled. | 361 | * tracing is slow and nasty to have enabled. |
362 | */ | 362 | */ |
363 | schedule_on_each_cpu(ftrace_sync); | 363 | schedule_on_each_cpu(ftrace_sync); |
364 | /* Now all cpus are using the list ops. */ | 364 | /* Now all cpus are using the list ops. */ |
365 | function_trace_op = set_function_trace_op; | 365 | function_trace_op = set_function_trace_op; |
366 | /* Make sure the function_trace_op is visible on all CPUs */ | 366 | /* Make sure the function_trace_op is visible on all CPUs */ |
367 | smp_wmb(); | 367 | smp_wmb(); |
368 | /* Nasty way to force a rmb on all cpus */ | 368 | /* Nasty way to force a rmb on all cpus */ |
369 | smp_call_function(ftrace_sync_ipi, NULL, 1); | 369 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
370 | /* OK, we are all set to update the ftrace_trace_function now! */ | 370 | /* OK, we are all set to update the ftrace_trace_function now! */ |
371 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 371 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
372 | 372 | ||
373 | ftrace_trace_function = func; | 373 | ftrace_trace_function = func; |
374 | } | 374 | } |
375 | 375 | ||
376 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 376 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
377 | { | 377 | { |
378 | ops->next = *list; | 378 | ops->next = *list; |
379 | /* | 379 | /* |
380 | * We are entering ops into the list but another | 380 | * We are entering ops into the list but another |
381 | * CPU might be walking that list. We need to make sure | 381 | * CPU might be walking that list. We need to make sure |
382 | * the ops->next pointer is valid before another CPU sees | 382 | * the ops->next pointer is valid before another CPU sees |
383 | * the ops pointer included into the list. | 383 | * the ops pointer included into the list. |
384 | */ | 384 | */ |
385 | rcu_assign_pointer(*list, ops); | 385 | rcu_assign_pointer(*list, ops); |
386 | } | 386 | } |
387 | 387 | ||
388 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 388 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
389 | { | 389 | { |
390 | struct ftrace_ops **p; | 390 | struct ftrace_ops **p; |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * If we are removing the last function, then simply point | 393 | * If we are removing the last function, then simply point |
394 | * to the ftrace_stub. | 394 | * to the ftrace_stub. |
395 | */ | 395 | */ |
396 | if (*list == ops && ops->next == &ftrace_list_end) { | 396 | if (*list == ops && ops->next == &ftrace_list_end) { |
397 | *list = &ftrace_list_end; | 397 | *list = &ftrace_list_end; |
398 | return 0; | 398 | return 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) | 401 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
402 | if (*p == ops) | 402 | if (*p == ops) |
403 | break; | 403 | break; |
404 | 404 | ||
405 | if (*p != ops) | 405 | if (*p != ops) |
406 | return -1; | 406 | return -1; |
407 | 407 | ||
408 | *p = (*p)->next; | 408 | *p = (*p)->next; |
409 | return 0; | 409 | return 0; |
410 | } | 410 | } |
411 | 411 | ||
412 | static void add_ftrace_list_ops(struct ftrace_ops **list, | 412 | static void add_ftrace_list_ops(struct ftrace_ops **list, |
413 | struct ftrace_ops *main_ops, | 413 | struct ftrace_ops *main_ops, |
414 | struct ftrace_ops *ops) | 414 | struct ftrace_ops *ops) |
415 | { | 415 | { |
416 | int first = *list == &ftrace_list_end; | 416 | int first = *list == &ftrace_list_end; |
417 | add_ftrace_ops(list, ops); | 417 | add_ftrace_ops(list, ops); |
418 | if (first) | 418 | if (first) |
419 | add_ftrace_ops(&ftrace_ops_list, main_ops); | 419 | add_ftrace_ops(&ftrace_ops_list, main_ops); |
420 | } | 420 | } |
421 | 421 | ||
422 | static int remove_ftrace_list_ops(struct ftrace_ops **list, | 422 | static int remove_ftrace_list_ops(struct ftrace_ops **list, |
423 | struct ftrace_ops *main_ops, | 423 | struct ftrace_ops *main_ops, |
424 | struct ftrace_ops *ops) | 424 | struct ftrace_ops *ops) |
425 | { | 425 | { |
426 | int ret = remove_ftrace_ops(list, ops); | 426 | int ret = remove_ftrace_ops(list, ops); |
427 | if (!ret && *list == &ftrace_list_end) | 427 | if (!ret && *list == &ftrace_list_end) |
428 | ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); | 428 | ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); |
429 | return ret; | 429 | return ret; |
430 | } | 430 | } |
431 | 431 | ||
432 | static int __register_ftrace_function(struct ftrace_ops *ops) | 432 | static int __register_ftrace_function(struct ftrace_ops *ops) |
433 | { | 433 | { |
434 | if (ops->flags & FTRACE_OPS_FL_DELETED) | 434 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
435 | return -EINVAL; | 435 | return -EINVAL; |
436 | 436 | ||
437 | if (FTRACE_WARN_ON(ops == &global_ops)) | 437 | if (FTRACE_WARN_ON(ops == &global_ops)) |
438 | return -EINVAL; | 438 | return -EINVAL; |
439 | 439 | ||
440 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) | 440 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
441 | return -EBUSY; | 441 | return -EBUSY; |
442 | 442 | ||
443 | /* We don't support both control and global flags set. */ | 443 | /* We don't support both control and global flags set. */ |
444 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | 444 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) |
445 | return -EINVAL; | 445 | return -EINVAL; |
446 | 446 | ||
447 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 447 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
448 | /* | 448 | /* |
449 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | 449 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used |
450 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | 450 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. |
451 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. | 451 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. |
452 | */ | 452 | */ |
453 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && | 453 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && |
454 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) | 454 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) |
455 | return -EINVAL; | 455 | return -EINVAL; |
456 | 456 | ||
457 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) | 457 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) |
458 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; | 458 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; |
459 | #endif | 459 | #endif |
460 | 460 | ||
461 | if (!core_kernel_data((unsigned long)ops)) | 461 | if (!core_kernel_data((unsigned long)ops)) |
462 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 462 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
463 | 463 | ||
464 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 464 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
465 | add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops); | 465 | add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops); |
466 | ops->flags |= FTRACE_OPS_FL_ENABLED; | 466 | ops->flags |= FTRACE_OPS_FL_ENABLED; |
467 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | 467 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
468 | if (control_ops_alloc(ops)) | 468 | if (control_ops_alloc(ops)) |
469 | return -ENOMEM; | 469 | return -ENOMEM; |
470 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | 470 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); |
471 | } else | 471 | } else |
472 | add_ftrace_ops(&ftrace_ops_list, ops); | 472 | add_ftrace_ops(&ftrace_ops_list, ops); |
473 | 473 | ||
474 | if (ftrace_enabled) | 474 | if (ftrace_enabled) |
475 | update_ftrace_function(); | 475 | update_ftrace_function(); |
476 | 476 | ||
477 | return 0; | 477 | return 0; |
478 | } | 478 | } |
479 | 479 | ||
480 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 480 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
481 | { | 481 | { |
482 | int ret; | 482 | int ret; |
483 | 483 | ||
484 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) | 484 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
485 | return -EBUSY; | 485 | return -EBUSY; |
486 | 486 | ||
487 | if (FTRACE_WARN_ON(ops == &global_ops)) | 487 | if (FTRACE_WARN_ON(ops == &global_ops)) |
488 | return -EINVAL; | 488 | return -EINVAL; |
489 | 489 | ||
490 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 490 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
491 | ret = remove_ftrace_list_ops(&ftrace_global_list, | 491 | ret = remove_ftrace_list_ops(&ftrace_global_list, |
492 | &global_ops, ops); | 492 | &global_ops, ops); |
493 | if (!ret) | 493 | if (!ret) |
494 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | 494 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
495 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | 495 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
496 | ret = remove_ftrace_list_ops(&ftrace_control_list, | 496 | ret = remove_ftrace_list_ops(&ftrace_control_list, |
497 | &control_ops, ops); | 497 | &control_ops, ops); |
498 | } else | 498 | } else |
499 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | 499 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
500 | 500 | ||
501 | if (ret < 0) | 501 | if (ret < 0) |
502 | return ret; | 502 | return ret; |
503 | 503 | ||
504 | if (ftrace_enabled) | 504 | if (ftrace_enabled) |
505 | update_ftrace_function(); | 505 | update_ftrace_function(); |
506 | 506 | ||
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
509 | 509 | ||
510 | static void ftrace_update_pid_func(void) | 510 | static void ftrace_update_pid_func(void) |
511 | { | 511 | { |
512 | /* Only do something if we are tracing something */ | 512 | /* Only do something if we are tracing something */ |
513 | if (ftrace_trace_function == ftrace_stub) | 513 | if (ftrace_trace_function == ftrace_stub) |
514 | return; | 514 | return; |
515 | 515 | ||
516 | update_ftrace_function(); | 516 | update_ftrace_function(); |
517 | } | 517 | } |
518 | 518 | ||
519 | #ifdef CONFIG_FUNCTION_PROFILER | 519 | #ifdef CONFIG_FUNCTION_PROFILER |
520 | struct ftrace_profile { | 520 | struct ftrace_profile { |
521 | struct hlist_node node; | 521 | struct hlist_node node; |
522 | unsigned long ip; | 522 | unsigned long ip; |
523 | unsigned long counter; | 523 | unsigned long counter; |
524 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 524 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
525 | unsigned long long time; | 525 | unsigned long long time; |
526 | unsigned long long time_squared; | 526 | unsigned long long time_squared; |
527 | #endif | 527 | #endif |
528 | }; | 528 | }; |
529 | 529 | ||
530 | struct ftrace_profile_page { | 530 | struct ftrace_profile_page { |
531 | struct ftrace_profile_page *next; | 531 | struct ftrace_profile_page *next; |
532 | unsigned long index; | 532 | unsigned long index; |
533 | struct ftrace_profile records[]; | 533 | struct ftrace_profile records[]; |
534 | }; | 534 | }; |
535 | 535 | ||
536 | struct ftrace_profile_stat { | 536 | struct ftrace_profile_stat { |
537 | atomic_t disabled; | 537 | atomic_t disabled; |
538 | struct hlist_head *hash; | 538 | struct hlist_head *hash; |
539 | struct ftrace_profile_page *pages; | 539 | struct ftrace_profile_page *pages; |
540 | struct ftrace_profile_page *start; | 540 | struct ftrace_profile_page *start; |
541 | struct tracer_stat stat; | 541 | struct tracer_stat stat; |
542 | }; | 542 | }; |
543 | 543 | ||
544 | #define PROFILE_RECORDS_SIZE \ | 544 | #define PROFILE_RECORDS_SIZE \ |
545 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | 545 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) |
546 | 546 | ||
547 | #define PROFILES_PER_PAGE \ | 547 | #define PROFILES_PER_PAGE \ |
548 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | 548 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) |
549 | 549 | ||
550 | static int ftrace_profile_enabled __read_mostly; | 550 | static int ftrace_profile_enabled __read_mostly; |
551 | 551 | ||
552 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | 552 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ |
553 | static DEFINE_MUTEX(ftrace_profile_lock); | 553 | static DEFINE_MUTEX(ftrace_profile_lock); |
554 | 554 | ||
555 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); | 555 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
556 | 556 | ||
557 | #define FTRACE_PROFILE_HASH_BITS 10 | 557 | #define FTRACE_PROFILE_HASH_BITS 10 |
558 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) | 558 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) |
559 | 559 | ||
560 | static void * | 560 | static void * |
561 | function_stat_next(void *v, int idx) | 561 | function_stat_next(void *v, int idx) |
562 | { | 562 | { |
563 | struct ftrace_profile *rec = v; | 563 | struct ftrace_profile *rec = v; |
564 | struct ftrace_profile_page *pg; | 564 | struct ftrace_profile_page *pg; |
565 | 565 | ||
566 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | 566 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
567 | 567 | ||
568 | again: | 568 | again: |
569 | if (idx != 0) | 569 | if (idx != 0) |
570 | rec++; | 570 | rec++; |
571 | 571 | ||
572 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | 572 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
573 | pg = pg->next; | 573 | pg = pg->next; |
574 | if (!pg) | 574 | if (!pg) |
575 | return NULL; | 575 | return NULL; |
576 | rec = &pg->records[0]; | 576 | rec = &pg->records[0]; |
577 | if (!rec->counter) | 577 | if (!rec->counter) |
578 | goto again; | 578 | goto again; |
579 | } | 579 | } |
580 | 580 | ||
581 | return rec; | 581 | return rec; |
582 | } | 582 | } |
583 | 583 | ||
584 | static void *function_stat_start(struct tracer_stat *trace) | 584 | static void *function_stat_start(struct tracer_stat *trace) |
585 | { | 585 | { |
586 | struct ftrace_profile_stat *stat = | 586 | struct ftrace_profile_stat *stat = |
587 | container_of(trace, struct ftrace_profile_stat, stat); | 587 | container_of(trace, struct ftrace_profile_stat, stat); |
588 | 588 | ||
589 | if (!stat || !stat->start) | 589 | if (!stat || !stat->start) |
590 | return NULL; | 590 | return NULL; |
591 | 591 | ||
592 | return function_stat_next(&stat->start->records[0], 0); | 592 | return function_stat_next(&stat->start->records[0], 0); |
593 | } | 593 | } |
594 | 594 | ||
595 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 595 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
596 | /* function graph compares on total time */ | 596 | /* function graph compares on total time */ |
597 | static int function_stat_cmp(void *p1, void *p2) | 597 | static int function_stat_cmp(void *p1, void *p2) |
598 | { | 598 | { |
599 | struct ftrace_profile *a = p1; | 599 | struct ftrace_profile *a = p1; |
600 | struct ftrace_profile *b = p2; | 600 | struct ftrace_profile *b = p2; |
601 | 601 | ||
602 | if (a->time < b->time) | 602 | if (a->time < b->time) |
603 | return -1; | 603 | return -1; |
604 | if (a->time > b->time) | 604 | if (a->time > b->time) |
605 | return 1; | 605 | return 1; |
606 | else | 606 | else |
607 | return 0; | 607 | return 0; |
608 | } | 608 | } |
609 | #else | 609 | #else |
610 | /* not function graph compares against hits */ | 610 | /* not function graph compares against hits */ |
611 | static int function_stat_cmp(void *p1, void *p2) | 611 | static int function_stat_cmp(void *p1, void *p2) |
612 | { | 612 | { |
613 | struct ftrace_profile *a = p1; | 613 | struct ftrace_profile *a = p1; |
614 | struct ftrace_profile *b = p2; | 614 | struct ftrace_profile *b = p2; |
615 | 615 | ||
616 | if (a->counter < b->counter) | 616 | if (a->counter < b->counter) |
617 | return -1; | 617 | return -1; |
618 | if (a->counter > b->counter) | 618 | if (a->counter > b->counter) |
619 | return 1; | 619 | return 1; |
620 | else | 620 | else |
621 | return 0; | 621 | return 0; |
622 | } | 622 | } |
623 | #endif | 623 | #endif |
624 | 624 | ||
625 | static int function_stat_headers(struct seq_file *m) | 625 | static int function_stat_headers(struct seq_file *m) |
626 | { | 626 | { |
627 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 627 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
628 | seq_printf(m, " Function " | 628 | seq_printf(m, " Function " |
629 | "Hit Time Avg s^2\n" | 629 | "Hit Time Avg s^2\n" |
630 | " -------- " | 630 | " -------- " |
631 | "--- ---- --- ---\n"); | 631 | "--- ---- --- ---\n"); |
632 | #else | 632 | #else |
633 | seq_printf(m, " Function Hit\n" | 633 | seq_printf(m, " Function Hit\n" |
634 | " -------- ---\n"); | 634 | " -------- ---\n"); |
635 | #endif | 635 | #endif |
636 | return 0; | 636 | return 0; |
637 | } | 637 | } |
638 | 638 | ||
639 | static int function_stat_show(struct seq_file *m, void *v) | 639 | static int function_stat_show(struct seq_file *m, void *v) |
640 | { | 640 | { |
641 | struct ftrace_profile *rec = v; | 641 | struct ftrace_profile *rec = v; |
642 | char str[KSYM_SYMBOL_LEN]; | 642 | char str[KSYM_SYMBOL_LEN]; |
643 | int ret = 0; | 643 | int ret = 0; |
644 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 644 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
645 | static struct trace_seq s; | 645 | static struct trace_seq s; |
646 | unsigned long long avg; | 646 | unsigned long long avg; |
647 | unsigned long long stddev; | 647 | unsigned long long stddev; |
648 | #endif | 648 | #endif |
649 | mutex_lock(&ftrace_profile_lock); | 649 | mutex_lock(&ftrace_profile_lock); |
650 | 650 | ||
651 | /* we raced with function_profile_reset() */ | 651 | /* we raced with function_profile_reset() */ |
652 | if (unlikely(rec->counter == 0)) { | 652 | if (unlikely(rec->counter == 0)) { |
653 | ret = -EBUSY; | 653 | ret = -EBUSY; |
654 | goto out; | 654 | goto out; |
655 | } | 655 | } |
656 | 656 | ||
657 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 657 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
658 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 658 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
659 | 659 | ||
660 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 660 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
661 | seq_printf(m, " "); | 661 | seq_printf(m, " "); |
662 | avg = rec->time; | 662 | avg = rec->time; |
663 | do_div(avg, rec->counter); | 663 | do_div(avg, rec->counter); |
664 | 664 | ||
665 | /* Sample standard deviation (s^2) */ | 665 | /* Sample standard deviation (s^2) */ |
666 | if (rec->counter <= 1) | 666 | if (rec->counter <= 1) |
667 | stddev = 0; | 667 | stddev = 0; |
668 | else { | 668 | else { |
669 | /* | 669 | /* |
670 | * Apply Welford's method: | 670 | * Apply Welford's method: |
671 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) | 671 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) |
672 | */ | 672 | */ |
673 | stddev = rec->counter * rec->time_squared - | 673 | stddev = rec->counter * rec->time_squared - |
674 | rec->time * rec->time; | 674 | rec->time * rec->time; |
675 | 675 | ||
676 | /* | 676 | /* |
677 | * Divide only 1000 for ns^2 -> us^2 conversion. | 677 | * Divide only 1000 for ns^2 -> us^2 conversion. |
678 | * trace_print_graph_duration will divide 1000 again. | 678 | * trace_print_graph_duration will divide 1000 again. |
679 | */ | 679 | */ |
680 | do_div(stddev, rec->counter * (rec->counter - 1) * 1000); | 680 | do_div(stddev, rec->counter * (rec->counter - 1) * 1000); |
681 | } | 681 | } |
682 | 682 | ||
683 | trace_seq_init(&s); | 683 | trace_seq_init(&s); |
684 | trace_print_graph_duration(rec->time, &s); | 684 | trace_print_graph_duration(rec->time, &s); |
685 | trace_seq_puts(&s, " "); | 685 | trace_seq_puts(&s, " "); |
686 | trace_print_graph_duration(avg, &s); | 686 | trace_print_graph_duration(avg, &s); |
687 | trace_seq_puts(&s, " "); | 687 | trace_seq_puts(&s, " "); |
688 | trace_print_graph_duration(stddev, &s); | 688 | trace_print_graph_duration(stddev, &s); |
689 | trace_print_seq(m, &s); | 689 | trace_print_seq(m, &s); |
690 | #endif | 690 | #endif |
691 | seq_putc(m, '\n'); | 691 | seq_putc(m, '\n'); |
692 | out: | 692 | out: |
693 | mutex_unlock(&ftrace_profile_lock); | 693 | mutex_unlock(&ftrace_profile_lock); |
694 | 694 | ||
695 | return ret; | 695 | return ret; |
696 | } | 696 | } |
697 | 697 | ||
698 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | 698 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
699 | { | 699 | { |
700 | struct ftrace_profile_page *pg; | 700 | struct ftrace_profile_page *pg; |
701 | 701 | ||
702 | pg = stat->pages = stat->start; | 702 | pg = stat->pages = stat->start; |
703 | 703 | ||
704 | while (pg) { | 704 | while (pg) { |
705 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | 705 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); |
706 | pg->index = 0; | 706 | pg->index = 0; |
707 | pg = pg->next; | 707 | pg = pg->next; |
708 | } | 708 | } |
709 | 709 | ||
710 | memset(stat->hash, 0, | 710 | memset(stat->hash, 0, |
711 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | 711 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
712 | } | 712 | } |
713 | 713 | ||
714 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | 714 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
715 | { | 715 | { |
716 | struct ftrace_profile_page *pg; | 716 | struct ftrace_profile_page *pg; |
717 | int functions; | 717 | int functions; |
718 | int pages; | 718 | int pages; |
719 | int i; | 719 | int i; |
720 | 720 | ||
721 | /* If we already allocated, do nothing */ | 721 | /* If we already allocated, do nothing */ |
722 | if (stat->pages) | 722 | if (stat->pages) |
723 | return 0; | 723 | return 0; |
724 | 724 | ||
725 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); | 725 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
726 | if (!stat->pages) | 726 | if (!stat->pages) |
727 | return -ENOMEM; | 727 | return -ENOMEM; |
728 | 728 | ||
729 | #ifdef CONFIG_DYNAMIC_FTRACE | 729 | #ifdef CONFIG_DYNAMIC_FTRACE |
730 | functions = ftrace_update_tot_cnt; | 730 | functions = ftrace_update_tot_cnt; |
731 | #else | 731 | #else |
732 | /* | 732 | /* |
733 | * We do not know the number of functions that exist because | 733 | * We do not know the number of functions that exist because |
734 | * dynamic tracing is what counts them. With past experience | 734 | * dynamic tracing is what counts them. With past experience |
735 | * we have around 20K functions. That should be more than enough. | 735 | * we have around 20K functions. That should be more than enough. |
736 | * It is highly unlikely we will execute every function in | 736 | * It is highly unlikely we will execute every function in |
737 | * the kernel. | 737 | * the kernel. |
738 | */ | 738 | */ |
739 | functions = 20000; | 739 | functions = 20000; |
740 | #endif | 740 | #endif |
741 | 741 | ||
742 | pg = stat->start = stat->pages; | 742 | pg = stat->start = stat->pages; |
743 | 743 | ||
744 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); | 744 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
745 | 745 | ||
746 | for (i = 1; i < pages; i++) { | 746 | for (i = 1; i < pages; i++) { |
747 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 747 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
748 | if (!pg->next) | 748 | if (!pg->next) |
749 | goto out_free; | 749 | goto out_free; |
750 | pg = pg->next; | 750 | pg = pg->next; |
751 | } | 751 | } |
752 | 752 | ||
753 | return 0; | 753 | return 0; |
754 | 754 | ||
755 | out_free: | 755 | out_free: |
756 | pg = stat->start; | 756 | pg = stat->start; |
757 | while (pg) { | 757 | while (pg) { |
758 | unsigned long tmp = (unsigned long)pg; | 758 | unsigned long tmp = (unsigned long)pg; |
759 | 759 | ||
760 | pg = pg->next; | 760 | pg = pg->next; |
761 | free_page(tmp); | 761 | free_page(tmp); |
762 | } | 762 | } |
763 | 763 | ||
764 | stat->pages = NULL; | 764 | stat->pages = NULL; |
765 | stat->start = NULL; | 765 | stat->start = NULL; |
766 | 766 | ||
767 | return -ENOMEM; | 767 | return -ENOMEM; |
768 | } | 768 | } |
769 | 769 | ||
770 | static int ftrace_profile_init_cpu(int cpu) | 770 | static int ftrace_profile_init_cpu(int cpu) |
771 | { | 771 | { |
772 | struct ftrace_profile_stat *stat; | 772 | struct ftrace_profile_stat *stat; |
773 | int size; | 773 | int size; |
774 | 774 | ||
775 | stat = &per_cpu(ftrace_profile_stats, cpu); | 775 | stat = &per_cpu(ftrace_profile_stats, cpu); |
776 | 776 | ||
777 | if (stat->hash) { | 777 | if (stat->hash) { |
778 | /* If the profile is already created, simply reset it */ | 778 | /* If the profile is already created, simply reset it */ |
779 | ftrace_profile_reset(stat); | 779 | ftrace_profile_reset(stat); |
780 | return 0; | 780 | return 0; |
781 | } | 781 | } |
782 | 782 | ||
783 | /* | 783 | /* |
784 | * We are profiling all functions, but usually only a few thousand | 784 | * We are profiling all functions, but usually only a few thousand |
785 | * functions are hit. We'll make a hash of 1024 items. | 785 | * functions are hit. We'll make a hash of 1024 items. |
786 | */ | 786 | */ |
787 | size = FTRACE_PROFILE_HASH_SIZE; | 787 | size = FTRACE_PROFILE_HASH_SIZE; |
788 | 788 | ||
789 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); | 789 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); |
790 | 790 | ||
791 | if (!stat->hash) | 791 | if (!stat->hash) |
792 | return -ENOMEM; | 792 | return -ENOMEM; |
793 | 793 | ||
794 | /* Preallocate the function profiling pages */ | 794 | /* Preallocate the function profiling pages */ |
795 | if (ftrace_profile_pages_init(stat) < 0) { | 795 | if (ftrace_profile_pages_init(stat) < 0) { |
796 | kfree(stat->hash); | 796 | kfree(stat->hash); |
797 | stat->hash = NULL; | 797 | stat->hash = NULL; |
798 | return -ENOMEM; | 798 | return -ENOMEM; |
799 | } | 799 | } |
800 | 800 | ||
801 | return 0; | 801 | return 0; |
802 | } | 802 | } |
803 | 803 | ||
804 | static int ftrace_profile_init(void) | 804 | static int ftrace_profile_init(void) |
805 | { | 805 | { |
806 | int cpu; | 806 | int cpu; |
807 | int ret = 0; | 807 | int ret = 0; |
808 | 808 | ||
809 | for_each_possible_cpu(cpu) { | 809 | for_each_possible_cpu(cpu) { |
810 | ret = ftrace_profile_init_cpu(cpu); | 810 | ret = ftrace_profile_init_cpu(cpu); |
811 | if (ret) | 811 | if (ret) |
812 | break; | 812 | break; |
813 | } | 813 | } |
814 | 814 | ||
815 | return ret; | 815 | return ret; |
816 | } | 816 | } |
817 | 817 | ||
818 | /* interrupts must be disabled */ | 818 | /* interrupts must be disabled */ |
819 | static struct ftrace_profile * | 819 | static struct ftrace_profile * |
820 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | 820 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) |
821 | { | 821 | { |
822 | struct ftrace_profile *rec; | 822 | struct ftrace_profile *rec; |
823 | struct hlist_head *hhd; | 823 | struct hlist_head *hhd; |
824 | unsigned long key; | 824 | unsigned long key; |
825 | 825 | ||
826 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); | 826 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); |
827 | hhd = &stat->hash[key]; | 827 | hhd = &stat->hash[key]; |
828 | 828 | ||
829 | if (hlist_empty(hhd)) | 829 | if (hlist_empty(hhd)) |
830 | return NULL; | 830 | return NULL; |
831 | 831 | ||
832 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { | 832 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
833 | if (rec->ip == ip) | 833 | if (rec->ip == ip) |
834 | return rec; | 834 | return rec; |
835 | } | 835 | } |
836 | 836 | ||
837 | return NULL; | 837 | return NULL; |
838 | } | 838 | } |
839 | 839 | ||
840 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, | 840 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
841 | struct ftrace_profile *rec) | 841 | struct ftrace_profile *rec) |
842 | { | 842 | { |
843 | unsigned long key; | 843 | unsigned long key; |
844 | 844 | ||
845 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); | 845 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); |
846 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); | 846 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); |
847 | } | 847 | } |
848 | 848 | ||
849 | /* | 849 | /* |
850 | * The memory is already allocated, this simply finds a new record to use. | 850 | * The memory is already allocated, this simply finds a new record to use. |
851 | */ | 851 | */ |
852 | static struct ftrace_profile * | 852 | static struct ftrace_profile * |
853 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | 853 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
854 | { | 854 | { |
855 | struct ftrace_profile *rec = NULL; | 855 | struct ftrace_profile *rec = NULL; |
856 | 856 | ||
857 | /* prevent recursion (from NMIs) */ | 857 | /* prevent recursion (from NMIs) */ |
858 | if (atomic_inc_return(&stat->disabled) != 1) | 858 | if (atomic_inc_return(&stat->disabled) != 1) |
859 | goto out; | 859 | goto out; |
860 | 860 | ||
861 | /* | 861 | /* |
862 | * Try to find the function again since an NMI | 862 | * Try to find the function again since an NMI |
863 | * could have added it | 863 | * could have added it |
864 | */ | 864 | */ |
865 | rec = ftrace_find_profiled_func(stat, ip); | 865 | rec = ftrace_find_profiled_func(stat, ip); |
866 | if (rec) | 866 | if (rec) |
867 | goto out; | 867 | goto out; |
868 | 868 | ||
869 | if (stat->pages->index == PROFILES_PER_PAGE) { | 869 | if (stat->pages->index == PROFILES_PER_PAGE) { |
870 | if (!stat->pages->next) | 870 | if (!stat->pages->next) |
871 | goto out; | 871 | goto out; |
872 | stat->pages = stat->pages->next; | 872 | stat->pages = stat->pages->next; |
873 | } | 873 | } |
874 | 874 | ||
875 | rec = &stat->pages->records[stat->pages->index++]; | 875 | rec = &stat->pages->records[stat->pages->index++]; |
876 | rec->ip = ip; | 876 | rec->ip = ip; |
877 | ftrace_add_profile(stat, rec); | 877 | ftrace_add_profile(stat, rec); |
878 | 878 | ||
879 | out: | 879 | out: |
880 | atomic_dec(&stat->disabled); | 880 | atomic_dec(&stat->disabled); |
881 | 881 | ||
882 | return rec; | 882 | return rec; |
883 | } | 883 | } |
884 | 884 | ||
885 | static void | 885 | static void |
886 | function_profile_call(unsigned long ip, unsigned long parent_ip, | 886 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
887 | struct ftrace_ops *ops, struct pt_regs *regs) | 887 | struct ftrace_ops *ops, struct pt_regs *regs) |
888 | { | 888 | { |
889 | struct ftrace_profile_stat *stat; | 889 | struct ftrace_profile_stat *stat; |
890 | struct ftrace_profile *rec; | 890 | struct ftrace_profile *rec; |
891 | unsigned long flags; | 891 | unsigned long flags; |
892 | 892 | ||
893 | if (!ftrace_profile_enabled) | 893 | if (!ftrace_profile_enabled) |
894 | return; | 894 | return; |
895 | 895 | ||
896 | local_irq_save(flags); | 896 | local_irq_save(flags); |
897 | 897 | ||
898 | stat = &__get_cpu_var(ftrace_profile_stats); | 898 | stat = &__get_cpu_var(ftrace_profile_stats); |
899 | if (!stat->hash || !ftrace_profile_enabled) | 899 | if (!stat->hash || !ftrace_profile_enabled) |
900 | goto out; | 900 | goto out; |
901 | 901 | ||
902 | rec = ftrace_find_profiled_func(stat, ip); | 902 | rec = ftrace_find_profiled_func(stat, ip); |
903 | if (!rec) { | 903 | if (!rec) { |
904 | rec = ftrace_profile_alloc(stat, ip); | 904 | rec = ftrace_profile_alloc(stat, ip); |
905 | if (!rec) | 905 | if (!rec) |
906 | goto out; | 906 | goto out; |
907 | } | 907 | } |
908 | 908 | ||
909 | rec->counter++; | 909 | rec->counter++; |
910 | out: | 910 | out: |
911 | local_irq_restore(flags); | 911 | local_irq_restore(flags); |
912 | } | 912 | } |
913 | 913 | ||
914 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 914 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
915 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | 915 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
916 | { | 916 | { |
917 | function_profile_call(trace->func, 0, NULL, NULL); | 917 | function_profile_call(trace->func, 0, NULL, NULL); |
918 | return 1; | 918 | return 1; |
919 | } | 919 | } |
920 | 920 | ||
921 | static void profile_graph_return(struct ftrace_graph_ret *trace) | 921 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
922 | { | 922 | { |
923 | struct ftrace_profile_stat *stat; | 923 | struct ftrace_profile_stat *stat; |
924 | unsigned long long calltime; | 924 | unsigned long long calltime; |
925 | struct ftrace_profile *rec; | 925 | struct ftrace_profile *rec; |
926 | unsigned long flags; | 926 | unsigned long flags; |
927 | 927 | ||
928 | local_irq_save(flags); | 928 | local_irq_save(flags); |
929 | stat = &__get_cpu_var(ftrace_profile_stats); | 929 | stat = &__get_cpu_var(ftrace_profile_stats); |
930 | if (!stat->hash || !ftrace_profile_enabled) | 930 | if (!stat->hash || !ftrace_profile_enabled) |
931 | goto out; | 931 | goto out; |
932 | 932 | ||
933 | /* If the calltime was zero'd ignore it */ | 933 | /* If the calltime was zero'd ignore it */ |
934 | if (!trace->calltime) | 934 | if (!trace->calltime) |
935 | goto out; | 935 | goto out; |
936 | 936 | ||
937 | calltime = trace->rettime - trace->calltime; | 937 | calltime = trace->rettime - trace->calltime; |
938 | 938 | ||
939 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | 939 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { |
940 | int index; | 940 | int index; |
941 | 941 | ||
942 | index = trace->depth; | 942 | index = trace->depth; |
943 | 943 | ||
944 | /* Append this call time to the parent time to subtract */ | 944 | /* Append this call time to the parent time to subtract */ |
945 | if (index) | 945 | if (index) |
946 | current->ret_stack[index - 1].subtime += calltime; | 946 | current->ret_stack[index - 1].subtime += calltime; |
947 | 947 | ||
948 | if (current->ret_stack[index].subtime < calltime) | 948 | if (current->ret_stack[index].subtime < calltime) |
949 | calltime -= current->ret_stack[index].subtime; | 949 | calltime -= current->ret_stack[index].subtime; |
950 | else | 950 | else |
951 | calltime = 0; | 951 | calltime = 0; |
952 | } | 952 | } |
953 | 953 | ||
954 | rec = ftrace_find_profiled_func(stat, trace->func); | 954 | rec = ftrace_find_profiled_func(stat, trace->func); |
955 | if (rec) { | 955 | if (rec) { |
956 | rec->time += calltime; | 956 | rec->time += calltime; |
957 | rec->time_squared += calltime * calltime; | 957 | rec->time_squared += calltime * calltime; |
958 | } | 958 | } |
959 | 959 | ||
960 | out: | 960 | out: |
961 | local_irq_restore(flags); | 961 | local_irq_restore(flags); |
962 | } | 962 | } |
963 | 963 | ||
964 | static int register_ftrace_profiler(void) | 964 | static int register_ftrace_profiler(void) |
965 | { | 965 | { |
966 | return register_ftrace_graph(&profile_graph_return, | 966 | return register_ftrace_graph(&profile_graph_return, |
967 | &profile_graph_entry); | 967 | &profile_graph_entry); |
968 | } | 968 | } |
969 | 969 | ||
970 | static void unregister_ftrace_profiler(void) | 970 | static void unregister_ftrace_profiler(void) |
971 | { | 971 | { |
972 | unregister_ftrace_graph(); | 972 | unregister_ftrace_graph(); |
973 | } | 973 | } |
974 | #else | 974 | #else |
975 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 975 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
976 | .func = function_profile_call, | 976 | .func = function_profile_call, |
977 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 977 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
978 | INIT_REGEX_LOCK(ftrace_profile_ops) | 978 | INIT_REGEX_LOCK(ftrace_profile_ops) |
979 | }; | 979 | }; |
980 | 980 | ||
981 | static int register_ftrace_profiler(void) | 981 | static int register_ftrace_profiler(void) |
982 | { | 982 | { |
983 | return register_ftrace_function(&ftrace_profile_ops); | 983 | return register_ftrace_function(&ftrace_profile_ops); |
984 | } | 984 | } |
985 | 985 | ||
986 | static void unregister_ftrace_profiler(void) | 986 | static void unregister_ftrace_profiler(void) |
987 | { | 987 | { |
988 | unregister_ftrace_function(&ftrace_profile_ops); | 988 | unregister_ftrace_function(&ftrace_profile_ops); |
989 | } | 989 | } |
990 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 990 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
991 | 991 | ||
992 | static ssize_t | 992 | static ssize_t |
993 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | 993 | ftrace_profile_write(struct file *filp, const char __user *ubuf, |
994 | size_t cnt, loff_t *ppos) | 994 | size_t cnt, loff_t *ppos) |
995 | { | 995 | { |
996 | unsigned long val; | 996 | unsigned long val; |
997 | int ret; | 997 | int ret; |
998 | 998 | ||
999 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 999 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
1000 | if (ret) | 1000 | if (ret) |
1001 | return ret; | 1001 | return ret; |
1002 | 1002 | ||
1003 | val = !!val; | 1003 | val = !!val; |
1004 | 1004 | ||
1005 | mutex_lock(&ftrace_profile_lock); | 1005 | mutex_lock(&ftrace_profile_lock); |
1006 | if (ftrace_profile_enabled ^ val) { | 1006 | if (ftrace_profile_enabled ^ val) { |
1007 | if (val) { | 1007 | if (val) { |
1008 | ret = ftrace_profile_init(); | 1008 | ret = ftrace_profile_init(); |
1009 | if (ret < 0) { | 1009 | if (ret < 0) { |
1010 | cnt = ret; | 1010 | cnt = ret; |
1011 | goto out; | 1011 | goto out; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | ret = register_ftrace_profiler(); | 1014 | ret = register_ftrace_profiler(); |
1015 | if (ret < 0) { | 1015 | if (ret < 0) { |
1016 | cnt = ret; | 1016 | cnt = ret; |
1017 | goto out; | 1017 | goto out; |
1018 | } | 1018 | } |
1019 | ftrace_profile_enabled = 1; | 1019 | ftrace_profile_enabled = 1; |
1020 | } else { | 1020 | } else { |
1021 | ftrace_profile_enabled = 0; | 1021 | ftrace_profile_enabled = 0; |
1022 | /* | 1022 | /* |
1023 | * unregister_ftrace_profiler calls stop_machine | 1023 | * unregister_ftrace_profiler calls stop_machine |
1024 | * so this acts like an synchronize_sched. | 1024 | * so this acts like an synchronize_sched. |
1025 | */ | 1025 | */ |
1026 | unregister_ftrace_profiler(); | 1026 | unregister_ftrace_profiler(); |
1027 | } | 1027 | } |
1028 | } | 1028 | } |
1029 | out: | 1029 | out: |
1030 | mutex_unlock(&ftrace_profile_lock); | 1030 | mutex_unlock(&ftrace_profile_lock); |
1031 | 1031 | ||
1032 | *ppos += cnt; | 1032 | *ppos += cnt; |
1033 | 1033 | ||
1034 | return cnt; | 1034 | return cnt; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | static ssize_t | 1037 | static ssize_t |
1038 | ftrace_profile_read(struct file *filp, char __user *ubuf, | 1038 | ftrace_profile_read(struct file *filp, char __user *ubuf, |
1039 | size_t cnt, loff_t *ppos) | 1039 | size_t cnt, loff_t *ppos) |
1040 | { | 1040 | { |
1041 | char buf[64]; /* big enough to hold a number */ | 1041 | char buf[64]; /* big enough to hold a number */ |
1042 | int r; | 1042 | int r; |
1043 | 1043 | ||
1044 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | 1044 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); |
1045 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 1045 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | static const struct file_operations ftrace_profile_fops = { | 1048 | static const struct file_operations ftrace_profile_fops = { |
1049 | .open = tracing_open_generic, | 1049 | .open = tracing_open_generic, |
1050 | .read = ftrace_profile_read, | 1050 | .read = ftrace_profile_read, |
1051 | .write = ftrace_profile_write, | 1051 | .write = ftrace_profile_write, |
1052 | .llseek = default_llseek, | 1052 | .llseek = default_llseek, |
1053 | }; | 1053 | }; |
1054 | 1054 | ||
1055 | /* used to initialize the real stat files */ | 1055 | /* used to initialize the real stat files */ |
1056 | static struct tracer_stat function_stats __initdata = { | 1056 | static struct tracer_stat function_stats __initdata = { |
1057 | .name = "functions", | 1057 | .name = "functions", |
1058 | .stat_start = function_stat_start, | 1058 | .stat_start = function_stat_start, |
1059 | .stat_next = function_stat_next, | 1059 | .stat_next = function_stat_next, |
1060 | .stat_cmp = function_stat_cmp, | 1060 | .stat_cmp = function_stat_cmp, |
1061 | .stat_headers = function_stat_headers, | 1061 | .stat_headers = function_stat_headers, |
1062 | .stat_show = function_stat_show | 1062 | .stat_show = function_stat_show |
1063 | }; | 1063 | }; |
1064 | 1064 | ||
1065 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 1065 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
1066 | { | 1066 | { |
1067 | struct ftrace_profile_stat *stat; | 1067 | struct ftrace_profile_stat *stat; |
1068 | struct dentry *entry; | 1068 | struct dentry *entry; |
1069 | char *name; | 1069 | char *name; |
1070 | int ret; | 1070 | int ret; |
1071 | int cpu; | 1071 | int cpu; |
1072 | 1072 | ||
1073 | for_each_possible_cpu(cpu) { | 1073 | for_each_possible_cpu(cpu) { |
1074 | stat = &per_cpu(ftrace_profile_stats, cpu); | 1074 | stat = &per_cpu(ftrace_profile_stats, cpu); |
1075 | 1075 | ||
1076 | /* allocate enough for function name + cpu number */ | 1076 | /* allocate enough for function name + cpu number */ |
1077 | name = kmalloc(32, GFP_KERNEL); | 1077 | name = kmalloc(32, GFP_KERNEL); |
1078 | if (!name) { | 1078 | if (!name) { |
1079 | /* | 1079 | /* |
1080 | * The files created are permanent, if something happens | 1080 | * The files created are permanent, if something happens |
1081 | * we still do not free memory. | 1081 | * we still do not free memory. |
1082 | */ | 1082 | */ |
1083 | WARN(1, | 1083 | WARN(1, |
1084 | "Could not allocate stat file for cpu %d\n", | 1084 | "Could not allocate stat file for cpu %d\n", |
1085 | cpu); | 1085 | cpu); |
1086 | return; | 1086 | return; |
1087 | } | 1087 | } |
1088 | stat->stat = function_stats; | 1088 | stat->stat = function_stats; |
1089 | snprintf(name, 32, "function%d", cpu); | 1089 | snprintf(name, 32, "function%d", cpu); |
1090 | stat->stat.name = name; | 1090 | stat->stat.name = name; |
1091 | ret = register_stat_tracer(&stat->stat); | 1091 | ret = register_stat_tracer(&stat->stat); |
1092 | if (ret) { | 1092 | if (ret) { |
1093 | WARN(1, | 1093 | WARN(1, |
1094 | "Could not register function stat for cpu %d\n", | 1094 | "Could not register function stat for cpu %d\n", |
1095 | cpu); | 1095 | cpu); |
1096 | kfree(name); | 1096 | kfree(name); |
1097 | return; | 1097 | return; |
1098 | } | 1098 | } |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | entry = debugfs_create_file("function_profile_enabled", 0644, | 1101 | entry = debugfs_create_file("function_profile_enabled", 0644, |
1102 | d_tracer, NULL, &ftrace_profile_fops); | 1102 | d_tracer, NULL, &ftrace_profile_fops); |
1103 | if (!entry) | 1103 | if (!entry) |
1104 | pr_warning("Could not create debugfs " | 1104 | pr_warning("Could not create debugfs " |
1105 | "'function_profile_enabled' entry\n"); | 1105 | "'function_profile_enabled' entry\n"); |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | #else /* CONFIG_FUNCTION_PROFILER */ | 1108 | #else /* CONFIG_FUNCTION_PROFILER */ |
1109 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 1109 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
1110 | { | 1110 | { |
1111 | } | 1111 | } |
1112 | #endif /* CONFIG_FUNCTION_PROFILER */ | 1112 | #endif /* CONFIG_FUNCTION_PROFILER */ |
1113 | 1113 | ||
1114 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1114 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1115 | 1115 | ||
1116 | #ifdef CONFIG_DYNAMIC_FTRACE | 1116 | #ifdef CONFIG_DYNAMIC_FTRACE |
1117 | 1117 | ||
1118 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 1118 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
1119 | # error Dynamic ftrace depends on MCOUNT_RECORD | 1119 | # error Dynamic ftrace depends on MCOUNT_RECORD |
1120 | #endif | 1120 | #endif |
1121 | 1121 | ||
1122 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; | 1122 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; |
1123 | 1123 | ||
1124 | struct ftrace_func_probe { | 1124 | struct ftrace_func_probe { |
1125 | struct hlist_node node; | 1125 | struct hlist_node node; |
1126 | struct ftrace_probe_ops *ops; | 1126 | struct ftrace_probe_ops *ops; |
1127 | unsigned long flags; | 1127 | unsigned long flags; |
1128 | unsigned long ip; | 1128 | unsigned long ip; |
1129 | void *data; | 1129 | void *data; |
1130 | struct list_head free_list; | 1130 | struct list_head free_list; |
1131 | }; | 1131 | }; |
1132 | 1132 | ||
1133 | struct ftrace_func_entry { | 1133 | struct ftrace_func_entry { |
1134 | struct hlist_node hlist; | 1134 | struct hlist_node hlist; |
1135 | unsigned long ip; | 1135 | unsigned long ip; |
1136 | }; | 1136 | }; |
1137 | 1137 | ||
1138 | struct ftrace_hash { | 1138 | struct ftrace_hash { |
1139 | unsigned long size_bits; | 1139 | unsigned long size_bits; |
1140 | struct hlist_head *buckets; | 1140 | struct hlist_head *buckets; |
1141 | unsigned long count; | 1141 | unsigned long count; |
1142 | struct rcu_head rcu; | 1142 | struct rcu_head rcu; |
1143 | }; | 1143 | }; |
1144 | 1144 | ||
1145 | /* | 1145 | /* |
1146 | * We make these constant because no one should touch them, | 1146 | * We make these constant because no one should touch them, |
1147 | * but they are used as the default "empty hash", to avoid allocating | 1147 | * but they are used as the default "empty hash", to avoid allocating |
1148 | * it all the time. These are in a read only section such that if | 1148 | * it all the time. These are in a read only section such that if |
1149 | * anyone does try to modify it, it will cause an exception. | 1149 | * anyone does try to modify it, it will cause an exception. |
1150 | */ | 1150 | */ |
1151 | static const struct hlist_head empty_buckets[1]; | 1151 | static const struct hlist_head empty_buckets[1]; |
1152 | static const struct ftrace_hash empty_hash = { | 1152 | static const struct ftrace_hash empty_hash = { |
1153 | .buckets = (struct hlist_head *)empty_buckets, | 1153 | .buckets = (struct hlist_head *)empty_buckets, |
1154 | }; | 1154 | }; |
1155 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | 1155 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
1156 | 1156 | ||
1157 | static struct ftrace_ops global_ops = { | 1157 | static struct ftrace_ops global_ops = { |
1158 | .func = ftrace_stub, | 1158 | .func = ftrace_stub, |
1159 | .notrace_hash = EMPTY_HASH, | 1159 | .notrace_hash = EMPTY_HASH, |
1160 | .filter_hash = EMPTY_HASH, | 1160 | .filter_hash = EMPTY_HASH, |
1161 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 1161 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
1162 | INIT_REGEX_LOCK(global_ops) | 1162 | INIT_REGEX_LOCK(global_ops) |
1163 | }; | 1163 | }; |
1164 | 1164 | ||
1165 | struct ftrace_page { | 1165 | struct ftrace_page { |
1166 | struct ftrace_page *next; | 1166 | struct ftrace_page *next; |
1167 | struct dyn_ftrace *records; | 1167 | struct dyn_ftrace *records; |
1168 | int index; | 1168 | int index; |
1169 | int size; | 1169 | int size; |
1170 | }; | 1170 | }; |
1171 | 1171 | ||
1172 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) | 1172 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
1173 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | 1173 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
1174 | 1174 | ||
1175 | /* estimate from running different kernels */ | 1175 | /* estimate from running different kernels */ |
1176 | #define NR_TO_INIT 10000 | 1176 | #define NR_TO_INIT 10000 |
1177 | 1177 | ||
1178 | static struct ftrace_page *ftrace_pages_start; | 1178 | static struct ftrace_page *ftrace_pages_start; |
1179 | static struct ftrace_page *ftrace_pages; | 1179 | static struct ftrace_page *ftrace_pages; |
1180 | 1180 | ||
1181 | static bool ftrace_hash_empty(struct ftrace_hash *hash) | 1181 | static bool ftrace_hash_empty(struct ftrace_hash *hash) |
1182 | { | 1182 | { |
1183 | return !hash || !hash->count; | 1183 | return !hash || !hash->count; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | static struct ftrace_func_entry * | 1186 | static struct ftrace_func_entry * |
1187 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | 1187 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) |
1188 | { | 1188 | { |
1189 | unsigned long key; | 1189 | unsigned long key; |
1190 | struct ftrace_func_entry *entry; | 1190 | struct ftrace_func_entry *entry; |
1191 | struct hlist_head *hhd; | 1191 | struct hlist_head *hhd; |
1192 | 1192 | ||
1193 | if (ftrace_hash_empty(hash)) | 1193 | if (ftrace_hash_empty(hash)) |
1194 | return NULL; | 1194 | return NULL; |
1195 | 1195 | ||
1196 | if (hash->size_bits > 0) | 1196 | if (hash->size_bits > 0) |
1197 | key = hash_long(ip, hash->size_bits); | 1197 | key = hash_long(ip, hash->size_bits); |
1198 | else | 1198 | else |
1199 | key = 0; | 1199 | key = 0; |
1200 | 1200 | ||
1201 | hhd = &hash->buckets[key]; | 1201 | hhd = &hash->buckets[key]; |
1202 | 1202 | ||
1203 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { | 1203 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
1204 | if (entry->ip == ip) | 1204 | if (entry->ip == ip) |
1205 | return entry; | 1205 | return entry; |
1206 | } | 1206 | } |
1207 | return NULL; | 1207 | return NULL; |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | static void __add_hash_entry(struct ftrace_hash *hash, | 1210 | static void __add_hash_entry(struct ftrace_hash *hash, |
1211 | struct ftrace_func_entry *entry) | 1211 | struct ftrace_func_entry *entry) |
1212 | { | 1212 | { |
1213 | struct hlist_head *hhd; | 1213 | struct hlist_head *hhd; |
1214 | unsigned long key; | 1214 | unsigned long key; |
1215 | 1215 | ||
1216 | if (hash->size_bits) | 1216 | if (hash->size_bits) |
1217 | key = hash_long(entry->ip, hash->size_bits); | 1217 | key = hash_long(entry->ip, hash->size_bits); |
1218 | else | 1218 | else |
1219 | key = 0; | 1219 | key = 0; |
1220 | 1220 | ||
1221 | hhd = &hash->buckets[key]; | 1221 | hhd = &hash->buckets[key]; |
1222 | hlist_add_head(&entry->hlist, hhd); | 1222 | hlist_add_head(&entry->hlist, hhd); |
1223 | hash->count++; | 1223 | hash->count++; |
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | 1226 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) |
1227 | { | 1227 | { |
1228 | struct ftrace_func_entry *entry; | 1228 | struct ftrace_func_entry *entry; |
1229 | 1229 | ||
1230 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 1230 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
1231 | if (!entry) | 1231 | if (!entry) |
1232 | return -ENOMEM; | 1232 | return -ENOMEM; |
1233 | 1233 | ||
1234 | entry->ip = ip; | 1234 | entry->ip = ip; |
1235 | __add_hash_entry(hash, entry); | 1235 | __add_hash_entry(hash, entry); |
1236 | 1236 | ||
1237 | return 0; | 1237 | return 0; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static void | 1240 | static void |
1241 | free_hash_entry(struct ftrace_hash *hash, | 1241 | free_hash_entry(struct ftrace_hash *hash, |
1242 | struct ftrace_func_entry *entry) | 1242 | struct ftrace_func_entry *entry) |
1243 | { | 1243 | { |
1244 | hlist_del(&entry->hlist); | 1244 | hlist_del(&entry->hlist); |
1245 | kfree(entry); | 1245 | kfree(entry); |
1246 | hash->count--; | 1246 | hash->count--; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | static void | 1249 | static void |
1250 | remove_hash_entry(struct ftrace_hash *hash, | 1250 | remove_hash_entry(struct ftrace_hash *hash, |
1251 | struct ftrace_func_entry *entry) | 1251 | struct ftrace_func_entry *entry) |
1252 | { | 1252 | { |
1253 | hlist_del(&entry->hlist); | 1253 | hlist_del(&entry->hlist); |
1254 | hash->count--; | 1254 | hash->count--; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | static void ftrace_hash_clear(struct ftrace_hash *hash) | 1257 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
1258 | { | 1258 | { |
1259 | struct hlist_head *hhd; | 1259 | struct hlist_head *hhd; |
1260 | struct hlist_node *tn; | 1260 | struct hlist_node *tn; |
1261 | struct ftrace_func_entry *entry; | 1261 | struct ftrace_func_entry *entry; |
1262 | int size = 1 << hash->size_bits; | 1262 | int size = 1 << hash->size_bits; |
1263 | int i; | 1263 | int i; |
1264 | 1264 | ||
1265 | if (!hash->count) | 1265 | if (!hash->count) |
1266 | return; | 1266 | return; |
1267 | 1267 | ||
1268 | for (i = 0; i < size; i++) { | 1268 | for (i = 0; i < size; i++) { |
1269 | hhd = &hash->buckets[i]; | 1269 | hhd = &hash->buckets[i]; |
1270 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) | 1270 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) |
1271 | free_hash_entry(hash, entry); | 1271 | free_hash_entry(hash, entry); |
1272 | } | 1272 | } |
1273 | FTRACE_WARN_ON(hash->count); | 1273 | FTRACE_WARN_ON(hash->count); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | static void free_ftrace_hash(struct ftrace_hash *hash) | 1276 | static void free_ftrace_hash(struct ftrace_hash *hash) |
1277 | { | 1277 | { |
1278 | if (!hash || hash == EMPTY_HASH) | 1278 | if (!hash || hash == EMPTY_HASH) |
1279 | return; | 1279 | return; |
1280 | ftrace_hash_clear(hash); | 1280 | ftrace_hash_clear(hash); |
1281 | kfree(hash->buckets); | 1281 | kfree(hash->buckets); |
1282 | kfree(hash); | 1282 | kfree(hash); |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) | 1285 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) |
1286 | { | 1286 | { |
1287 | struct ftrace_hash *hash; | 1287 | struct ftrace_hash *hash; |
1288 | 1288 | ||
1289 | hash = container_of(rcu, struct ftrace_hash, rcu); | 1289 | hash = container_of(rcu, struct ftrace_hash, rcu); |
1290 | free_ftrace_hash(hash); | 1290 | free_ftrace_hash(hash); |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | 1293 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) |
1294 | { | 1294 | { |
1295 | if (!hash || hash == EMPTY_HASH) | 1295 | if (!hash || hash == EMPTY_HASH) |
1296 | return; | 1296 | return; |
1297 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | 1297 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); |
1298 | } | 1298 | } |
1299 | 1299 | ||
1300 | void ftrace_free_filter(struct ftrace_ops *ops) | 1300 | void ftrace_free_filter(struct ftrace_ops *ops) |
1301 | { | 1301 | { |
1302 | ftrace_ops_init(ops); | 1302 | ftrace_ops_init(ops); |
1303 | free_ftrace_hash(ops->filter_hash); | 1303 | free_ftrace_hash(ops->filter_hash); |
1304 | free_ftrace_hash(ops->notrace_hash); | 1304 | free_ftrace_hash(ops->notrace_hash); |
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | 1307 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
1308 | { | 1308 | { |
1309 | struct ftrace_hash *hash; | 1309 | struct ftrace_hash *hash; |
1310 | int size; | 1310 | int size; |
1311 | 1311 | ||
1312 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | 1312 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); |
1313 | if (!hash) | 1313 | if (!hash) |
1314 | return NULL; | 1314 | return NULL; |
1315 | 1315 | ||
1316 | size = 1 << size_bits; | 1316 | size = 1 << size_bits; |
1317 | hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); | 1317 | hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); |
1318 | 1318 | ||
1319 | if (!hash->buckets) { | 1319 | if (!hash->buckets) { |
1320 | kfree(hash); | 1320 | kfree(hash); |
1321 | return NULL; | 1321 | return NULL; |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | hash->size_bits = size_bits; | 1324 | hash->size_bits = size_bits; |
1325 | 1325 | ||
1326 | return hash; | 1326 | return hash; |
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | static struct ftrace_hash * | 1329 | static struct ftrace_hash * |
1330 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | 1330 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) |
1331 | { | 1331 | { |
1332 | struct ftrace_func_entry *entry; | 1332 | struct ftrace_func_entry *entry; |
1333 | struct ftrace_hash *new_hash; | 1333 | struct ftrace_hash *new_hash; |
1334 | int size; | 1334 | int size; |
1335 | int ret; | 1335 | int ret; |
1336 | int i; | 1336 | int i; |
1337 | 1337 | ||
1338 | new_hash = alloc_ftrace_hash(size_bits); | 1338 | new_hash = alloc_ftrace_hash(size_bits); |
1339 | if (!new_hash) | 1339 | if (!new_hash) |
1340 | return NULL; | 1340 | return NULL; |
1341 | 1341 | ||
1342 | /* Empty hash? */ | 1342 | /* Empty hash? */ |
1343 | if (ftrace_hash_empty(hash)) | 1343 | if (ftrace_hash_empty(hash)) |
1344 | return new_hash; | 1344 | return new_hash; |
1345 | 1345 | ||
1346 | size = 1 << hash->size_bits; | 1346 | size = 1 << hash->size_bits; |
1347 | for (i = 0; i < size; i++) { | 1347 | for (i = 0; i < size; i++) { |
1348 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | 1348 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
1349 | ret = add_hash_entry(new_hash, entry->ip); | 1349 | ret = add_hash_entry(new_hash, entry->ip); |
1350 | if (ret < 0) | 1350 | if (ret < 0) |
1351 | goto free_hash; | 1351 | goto free_hash; |
1352 | } | 1352 | } |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | FTRACE_WARN_ON(new_hash->count != hash->count); | 1355 | FTRACE_WARN_ON(new_hash->count != hash->count); |
1356 | 1356 | ||
1357 | return new_hash; | 1357 | return new_hash; |
1358 | 1358 | ||
1359 | free_hash: | 1359 | free_hash: |
1360 | free_ftrace_hash(new_hash); | 1360 | free_ftrace_hash(new_hash); |
1361 | return NULL; | 1361 | return NULL; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | static void | 1364 | static void |
1365 | ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); | 1365 | ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); |
1366 | static void | 1366 | static void |
1367 | ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); | 1367 | ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); |
1368 | 1368 | ||
1369 | static int | 1369 | static int |
1370 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1370 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
1371 | struct ftrace_hash **dst, struct ftrace_hash *src) | 1371 | struct ftrace_hash **dst, struct ftrace_hash *src) |
1372 | { | 1372 | { |
1373 | struct ftrace_func_entry *entry; | 1373 | struct ftrace_func_entry *entry; |
1374 | struct hlist_node *tn; | 1374 | struct hlist_node *tn; |
1375 | struct hlist_head *hhd; | 1375 | struct hlist_head *hhd; |
1376 | struct ftrace_hash *old_hash; | 1376 | struct ftrace_hash *old_hash; |
1377 | struct ftrace_hash *new_hash; | 1377 | struct ftrace_hash *new_hash; |
1378 | int size = src->count; | 1378 | int size = src->count; |
1379 | int bits = 0; | 1379 | int bits = 0; |
1380 | int ret; | 1380 | int ret; |
1381 | int i; | 1381 | int i; |
1382 | 1382 | ||
1383 | /* | 1383 | /* |
1384 | * Remove the current set, update the hash and add | 1384 | * Remove the current set, update the hash and add |
1385 | * them back. | 1385 | * them back. |
1386 | */ | 1386 | */ |
1387 | ftrace_hash_rec_disable(ops, enable); | 1387 | ftrace_hash_rec_disable(ops, enable); |
1388 | 1388 | ||
1389 | /* | 1389 | /* |
1390 | * If the new source is empty, just free dst and assign it | 1390 | * If the new source is empty, just free dst and assign it |
1391 | * the empty_hash. | 1391 | * the empty_hash. |
1392 | */ | 1392 | */ |
1393 | if (!src->count) { | 1393 | if (!src->count) { |
1394 | free_ftrace_hash_rcu(*dst); | 1394 | free_ftrace_hash_rcu(*dst); |
1395 | rcu_assign_pointer(*dst, EMPTY_HASH); | 1395 | rcu_assign_pointer(*dst, EMPTY_HASH); |
1396 | /* still need to update the function records */ | 1396 | /* still need to update the function records */ |
1397 | ret = 0; | 1397 | ret = 0; |
1398 | goto out; | 1398 | goto out; |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | /* | 1401 | /* |
1402 | * Make the hash size about 1/2 the # found | 1402 | * Make the hash size about 1/2 the # found |
1403 | */ | 1403 | */ |
1404 | for (size /= 2; size; size >>= 1) | 1404 | for (size /= 2; size; size >>= 1) |
1405 | bits++; | 1405 | bits++; |
1406 | 1406 | ||
1407 | /* Don't allocate too much */ | 1407 | /* Don't allocate too much */ |
1408 | if (bits > FTRACE_HASH_MAX_BITS) | 1408 | if (bits > FTRACE_HASH_MAX_BITS) |
1409 | bits = FTRACE_HASH_MAX_BITS; | 1409 | bits = FTRACE_HASH_MAX_BITS; |
1410 | 1410 | ||
1411 | ret = -ENOMEM; | 1411 | ret = -ENOMEM; |
1412 | new_hash = alloc_ftrace_hash(bits); | 1412 | new_hash = alloc_ftrace_hash(bits); |
1413 | if (!new_hash) | 1413 | if (!new_hash) |
1414 | goto out; | 1414 | goto out; |
1415 | 1415 | ||
1416 | size = 1 << src->size_bits; | 1416 | size = 1 << src->size_bits; |
1417 | for (i = 0; i < size; i++) { | 1417 | for (i = 0; i < size; i++) { |
1418 | hhd = &src->buckets[i]; | 1418 | hhd = &src->buckets[i]; |
1419 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { | 1419 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { |
1420 | remove_hash_entry(src, entry); | 1420 | remove_hash_entry(src, entry); |
1421 | __add_hash_entry(new_hash, entry); | 1421 | __add_hash_entry(new_hash, entry); |
1422 | } | 1422 | } |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | old_hash = *dst; | 1425 | old_hash = *dst; |
1426 | rcu_assign_pointer(*dst, new_hash); | 1426 | rcu_assign_pointer(*dst, new_hash); |
1427 | free_ftrace_hash_rcu(old_hash); | 1427 | free_ftrace_hash_rcu(old_hash); |
1428 | 1428 | ||
1429 | ret = 0; | 1429 | ret = 0; |
1430 | out: | 1430 | out: |
1431 | /* | 1431 | /* |
1432 | * Enable regardless of ret: | 1432 | * Enable regardless of ret: |
1433 | * On success, we enable the new hash. | 1433 | * On success, we enable the new hash. |
1434 | * On failure, we re-enable the original hash. | 1434 | * On failure, we re-enable the original hash. |
1435 | */ | 1435 | */ |
1436 | ftrace_hash_rec_enable(ops, enable); | 1436 | ftrace_hash_rec_enable(ops, enable); |
1437 | 1437 | ||
1438 | return ret; | 1438 | return ret; |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | /* | 1441 | /* |
1442 | * Test the hashes for this ops to see if we want to call | 1442 | * Test the hashes for this ops to see if we want to call |
1443 | * the ops->func or not. | 1443 | * the ops->func or not. |
1444 | * | 1444 | * |
1445 | * It's a match if the ip is in the ops->filter_hash or | 1445 | * It's a match if the ip is in the ops->filter_hash or |
1446 | * the filter_hash does not exist or is empty, | 1446 | * the filter_hash does not exist or is empty, |
1447 | * AND | 1447 | * AND |
1448 | * the ip is not in the ops->notrace_hash. | 1448 | * the ip is not in the ops->notrace_hash. |
1449 | * | 1449 | * |
1450 | * This needs to be called with preemption disabled as | 1450 | * This needs to be called with preemption disabled as |
1451 | * the hashes are freed with call_rcu_sched(). | 1451 | * the hashes are freed with call_rcu_sched(). |
1452 | */ | 1452 | */ |
1453 | static int | 1453 | static int |
1454 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 1454 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
1455 | { | 1455 | { |
1456 | struct ftrace_hash *filter_hash; | 1456 | struct ftrace_hash *filter_hash; |
1457 | struct ftrace_hash *notrace_hash; | 1457 | struct ftrace_hash *notrace_hash; |
1458 | int ret; | 1458 | int ret; |
1459 | 1459 | ||
1460 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 1460 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
1461 | /* | 1461 | /* |
1462 | * There's a small race when adding ops that the ftrace handler | 1462 | * There's a small race when adding ops that the ftrace handler |
1463 | * that wants regs, may be called without them. We can not | 1463 | * that wants regs, may be called without them. We can not |
1464 | * allow that handler to be called if regs is NULL. | 1464 | * allow that handler to be called if regs is NULL. |
1465 | */ | 1465 | */ |
1466 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) | 1466 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) |
1467 | return 0; | 1467 | return 0; |
1468 | #endif | 1468 | #endif |
1469 | 1469 | ||
1470 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1470 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); |
1471 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1471 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); |
1472 | 1472 | ||
1473 | if ((ftrace_hash_empty(filter_hash) || | 1473 | if ((ftrace_hash_empty(filter_hash) || |
1474 | ftrace_lookup_ip(filter_hash, ip)) && | 1474 | ftrace_lookup_ip(filter_hash, ip)) && |
1475 | (ftrace_hash_empty(notrace_hash) || | 1475 | (ftrace_hash_empty(notrace_hash) || |
1476 | !ftrace_lookup_ip(notrace_hash, ip))) | 1476 | !ftrace_lookup_ip(notrace_hash, ip))) |
1477 | ret = 1; | 1477 | ret = 1; |
1478 | else | 1478 | else |
1479 | ret = 0; | 1479 | ret = 0; |
1480 | 1480 | ||
1481 | return ret; | 1481 | return ret; |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | /* | 1484 | /* |
1485 | * This is a double for. Do not use 'break' to break out of the loop, | 1485 | * This is a double for. Do not use 'break' to break out of the loop, |
1486 | * you must use a goto. | 1486 | * you must use a goto. |
1487 | */ | 1487 | */ |
1488 | #define do_for_each_ftrace_rec(pg, rec) \ | 1488 | #define do_for_each_ftrace_rec(pg, rec) \ |
1489 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | 1489 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ |
1490 | int _____i; \ | 1490 | int _____i; \ |
1491 | for (_____i = 0; _____i < pg->index; _____i++) { \ | 1491 | for (_____i = 0; _____i < pg->index; _____i++) { \ |
1492 | rec = &pg->records[_____i]; | 1492 | rec = &pg->records[_____i]; |
1493 | 1493 | ||
1494 | #define while_for_each_ftrace_rec() \ | 1494 | #define while_for_each_ftrace_rec() \ |
1495 | } \ | 1495 | } \ |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | 1498 | ||
1499 | static int ftrace_cmp_recs(const void *a, const void *b) | 1499 | static int ftrace_cmp_recs(const void *a, const void *b) |
1500 | { | 1500 | { |
1501 | const struct dyn_ftrace *key = a; | 1501 | const struct dyn_ftrace *key = a; |
1502 | const struct dyn_ftrace *rec = b; | 1502 | const struct dyn_ftrace *rec = b; |
1503 | 1503 | ||
1504 | if (key->flags < rec->ip) | 1504 | if (key->flags < rec->ip) |
1505 | return -1; | 1505 | return -1; |
1506 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) | 1506 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) |
1507 | return 1; | 1507 | return 1; |
1508 | return 0; | 1508 | return 0; |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | static unsigned long ftrace_location_range(unsigned long start, unsigned long end) | 1511 | static unsigned long ftrace_location_range(unsigned long start, unsigned long end) |
1512 | { | 1512 | { |
1513 | struct ftrace_page *pg; | 1513 | struct ftrace_page *pg; |
1514 | struct dyn_ftrace *rec; | 1514 | struct dyn_ftrace *rec; |
1515 | struct dyn_ftrace key; | 1515 | struct dyn_ftrace key; |
1516 | 1516 | ||
1517 | key.ip = start; | 1517 | key.ip = start; |
1518 | key.flags = end; /* overload flags, as it is unsigned long */ | 1518 | key.flags = end; /* overload flags, as it is unsigned long */ |
1519 | 1519 | ||
1520 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 1520 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
1521 | if (end < pg->records[0].ip || | 1521 | if (end < pg->records[0].ip || |
1522 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) | 1522 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
1523 | continue; | 1523 | continue; |
1524 | rec = bsearch(&key, pg->records, pg->index, | 1524 | rec = bsearch(&key, pg->records, pg->index, |
1525 | sizeof(struct dyn_ftrace), | 1525 | sizeof(struct dyn_ftrace), |
1526 | ftrace_cmp_recs); | 1526 | ftrace_cmp_recs); |
1527 | if (rec) | 1527 | if (rec) |
1528 | return rec->ip; | 1528 | return rec->ip; |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | return 0; | 1531 | return 0; |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | /** | 1534 | /** |
1535 | * ftrace_location - return true if the ip giving is a traced location | 1535 | * ftrace_location - return true if the ip giving is a traced location |
1536 | * @ip: the instruction pointer to check | 1536 | * @ip: the instruction pointer to check |
1537 | * | 1537 | * |
1538 | * Returns rec->ip if @ip given is a pointer to a ftrace location. | 1538 | * Returns rec->ip if @ip given is a pointer to a ftrace location. |
1539 | * That is, the instruction that is either a NOP or call to | 1539 | * That is, the instruction that is either a NOP or call to |
1540 | * the function tracer. It checks the ftrace internal tables to | 1540 | * the function tracer. It checks the ftrace internal tables to |
1541 | * determine if the address belongs or not. | 1541 | * determine if the address belongs or not. |
1542 | */ | 1542 | */ |
1543 | unsigned long ftrace_location(unsigned long ip) | 1543 | unsigned long ftrace_location(unsigned long ip) |
1544 | { | 1544 | { |
1545 | return ftrace_location_range(ip, ip); | 1545 | return ftrace_location_range(ip, ip); |
1546 | } | 1546 | } |
1547 | 1547 | ||
1548 | /** | 1548 | /** |
1549 | * ftrace_text_reserved - return true if range contains an ftrace location | 1549 | * ftrace_text_reserved - return true if range contains an ftrace location |
1550 | * @start: start of range to search | 1550 | * @start: start of range to search |
1551 | * @end: end of range to search (inclusive). @end points to the last byte to check. | 1551 | * @end: end of range to search (inclusive). @end points to the last byte to check. |
1552 | * | 1552 | * |
1553 | * Returns 1 if @start and @end contains a ftrace location. | 1553 | * Returns 1 if @start and @end contains a ftrace location. |
1554 | * That is, the instruction that is either a NOP or call to | 1554 | * That is, the instruction that is either a NOP or call to |
1555 | * the function tracer. It checks the ftrace internal tables to | 1555 | * the function tracer. It checks the ftrace internal tables to |
1556 | * determine if the address belongs or not. | 1556 | * determine if the address belongs or not. |
1557 | */ | 1557 | */ |
1558 | int ftrace_text_reserved(const void *start, const void *end) | 1558 | int ftrace_text_reserved(const void *start, const void *end) |
1559 | { | 1559 | { |
1560 | unsigned long ret; | 1560 | unsigned long ret; |
1561 | 1561 | ||
1562 | ret = ftrace_location_range((unsigned long)start, | 1562 | ret = ftrace_location_range((unsigned long)start, |
1563 | (unsigned long)end); | 1563 | (unsigned long)end); |
1564 | 1564 | ||
1565 | return (int)!!ret; | 1565 | return (int)!!ret; |
1566 | } | 1566 | } |
1567 | 1567 | ||
1568 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | 1568 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, |
1569 | int filter_hash, | 1569 | int filter_hash, |
1570 | bool inc) | 1570 | bool inc) |
1571 | { | 1571 | { |
1572 | struct ftrace_hash *hash; | 1572 | struct ftrace_hash *hash; |
1573 | struct ftrace_hash *other_hash; | 1573 | struct ftrace_hash *other_hash; |
1574 | struct ftrace_page *pg; | 1574 | struct ftrace_page *pg; |
1575 | struct dyn_ftrace *rec; | 1575 | struct dyn_ftrace *rec; |
1576 | int count = 0; | 1576 | int count = 0; |
1577 | int all = 0; | 1577 | int all = 0; |
1578 | 1578 | ||
1579 | /* Only update if the ops has been registered */ | 1579 | /* Only update if the ops has been registered */ |
1580 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | 1580 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
1581 | return; | 1581 | return; |
1582 | 1582 | ||
1583 | /* | 1583 | /* |
1584 | * In the filter_hash case: | 1584 | * In the filter_hash case: |
1585 | * If the count is zero, we update all records. | 1585 | * If the count is zero, we update all records. |
1586 | * Otherwise we just update the items in the hash. | 1586 | * Otherwise we just update the items in the hash. |
1587 | * | 1587 | * |
1588 | * In the notrace_hash case: | 1588 | * In the notrace_hash case: |
1589 | * We enable the update in the hash. | 1589 | * We enable the update in the hash. |
1590 | * As disabling notrace means enabling the tracing, | 1590 | * As disabling notrace means enabling the tracing, |
1591 | * and enabling notrace means disabling, the inc variable | 1591 | * and enabling notrace means disabling, the inc variable |
1592 | * gets inversed. | 1592 | * gets inversed. |
1593 | */ | 1593 | */ |
1594 | if (filter_hash) { | 1594 | if (filter_hash) { |
1595 | hash = ops->filter_hash; | 1595 | hash = ops->filter_hash; |
1596 | other_hash = ops->notrace_hash; | 1596 | other_hash = ops->notrace_hash; |
1597 | if (ftrace_hash_empty(hash)) | 1597 | if (ftrace_hash_empty(hash)) |
1598 | all = 1; | 1598 | all = 1; |
1599 | } else { | 1599 | } else { |
1600 | inc = !inc; | 1600 | inc = !inc; |
1601 | hash = ops->notrace_hash; | 1601 | hash = ops->notrace_hash; |
1602 | other_hash = ops->filter_hash; | 1602 | other_hash = ops->filter_hash; |
1603 | /* | 1603 | /* |
1604 | * If the notrace hash has no items, | 1604 | * If the notrace hash has no items, |
1605 | * then there's nothing to do. | 1605 | * then there's nothing to do. |
1606 | */ | 1606 | */ |
1607 | if (ftrace_hash_empty(hash)) | 1607 | if (ftrace_hash_empty(hash)) |
1608 | return; | 1608 | return; |
1609 | } | 1609 | } |
1610 | 1610 | ||
1611 | do_for_each_ftrace_rec(pg, rec) { | 1611 | do_for_each_ftrace_rec(pg, rec) { |
1612 | int in_other_hash = 0; | 1612 | int in_other_hash = 0; |
1613 | int in_hash = 0; | 1613 | int in_hash = 0; |
1614 | int match = 0; | 1614 | int match = 0; |
1615 | 1615 | ||
1616 | if (all) { | 1616 | if (all) { |
1617 | /* | 1617 | /* |
1618 | * Only the filter_hash affects all records. | 1618 | * Only the filter_hash affects all records. |
1619 | * Update if the record is not in the notrace hash. | 1619 | * Update if the record is not in the notrace hash. |
1620 | */ | 1620 | */ |
1621 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) | 1621 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) |
1622 | match = 1; | 1622 | match = 1; |
1623 | } else { | 1623 | } else { |
1624 | in_hash = !!ftrace_lookup_ip(hash, rec->ip); | 1624 | in_hash = !!ftrace_lookup_ip(hash, rec->ip); |
1625 | in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); | 1625 | in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); |
1626 | 1626 | ||
1627 | /* | 1627 | /* |
1628 | * | 1628 | * |
1629 | */ | 1629 | */ |
1630 | if (filter_hash && in_hash && !in_other_hash) | 1630 | if (filter_hash && in_hash && !in_other_hash) |
1631 | match = 1; | 1631 | match = 1; |
1632 | else if (!filter_hash && in_hash && | 1632 | else if (!filter_hash && in_hash && |
1633 | (in_other_hash || ftrace_hash_empty(other_hash))) | 1633 | (in_other_hash || ftrace_hash_empty(other_hash))) |
1634 | match = 1; | 1634 | match = 1; |
1635 | } | 1635 | } |
1636 | if (!match) | 1636 | if (!match) |
1637 | continue; | 1637 | continue; |
1638 | 1638 | ||
1639 | if (inc) { | 1639 | if (inc) { |
1640 | rec->flags++; | 1640 | rec->flags++; |
1641 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | 1641 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) |
1642 | return; | 1642 | return; |
1643 | /* | 1643 | /* |
1644 | * If any ops wants regs saved for this function | 1644 | * If any ops wants regs saved for this function |
1645 | * then all ops will get saved regs. | 1645 | * then all ops will get saved regs. |
1646 | */ | 1646 | */ |
1647 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | 1647 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
1648 | rec->flags |= FTRACE_FL_REGS; | 1648 | rec->flags |= FTRACE_FL_REGS; |
1649 | } else { | 1649 | } else { |
1650 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | 1650 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) |
1651 | return; | 1651 | return; |
1652 | rec->flags--; | 1652 | rec->flags--; |
1653 | } | 1653 | } |
1654 | count++; | 1654 | count++; |
1655 | /* Shortcut, if we handled all records, we are done. */ | 1655 | /* Shortcut, if we handled all records, we are done. */ |
1656 | if (!all && count == hash->count) | 1656 | if (!all && count == hash->count) |
1657 | return; | 1657 | return; |
1658 | } while_for_each_ftrace_rec(); | 1658 | } while_for_each_ftrace_rec(); |
1659 | } | 1659 | } |
1660 | 1660 | ||
1661 | static void ftrace_hash_rec_disable(struct ftrace_ops *ops, | 1661 | static void ftrace_hash_rec_disable(struct ftrace_ops *ops, |
1662 | int filter_hash) | 1662 | int filter_hash) |
1663 | { | 1663 | { |
1664 | __ftrace_hash_rec_update(ops, filter_hash, 0); | 1664 | __ftrace_hash_rec_update(ops, filter_hash, 0); |
1665 | } | 1665 | } |
1666 | 1666 | ||
1667 | static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | 1667 | static void ftrace_hash_rec_enable(struct ftrace_ops *ops, |
1668 | int filter_hash) | 1668 | int filter_hash) |
1669 | { | 1669 | { |
1670 | __ftrace_hash_rec_update(ops, filter_hash, 1); | 1670 | __ftrace_hash_rec_update(ops, filter_hash, 1); |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1673 | static void print_ip_ins(const char *fmt, unsigned char *p) |
1674 | { | 1674 | { |
1675 | int i; | 1675 | int i; |
1676 | 1676 | ||
1677 | printk(KERN_CONT "%s", fmt); | 1677 | printk(KERN_CONT "%s", fmt); |
1678 | 1678 | ||
1679 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | 1679 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) |
1680 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | 1680 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); |
1681 | } | 1681 | } |
1682 | 1682 | ||
1683 | /** | 1683 | /** |
1684 | * ftrace_bug - report and shutdown function tracer | 1684 | * ftrace_bug - report and shutdown function tracer |
1685 | * @failed: The failed type (EFAULT, EINVAL, EPERM) | 1685 | * @failed: The failed type (EFAULT, EINVAL, EPERM) |
1686 | * @ip: The address that failed | 1686 | * @ip: The address that failed |
1687 | * | 1687 | * |
1688 | * The arch code that enables or disables the function tracing | 1688 | * The arch code that enables or disables the function tracing |
1689 | * can call ftrace_bug() when it has detected a problem in | 1689 | * can call ftrace_bug() when it has detected a problem in |
1690 | * modifying the code. @failed should be one of either: | 1690 | * modifying the code. @failed should be one of either: |
1691 | * EFAULT - if the problem happens on reading the @ip address | 1691 | * EFAULT - if the problem happens on reading the @ip address |
1692 | * EINVAL - if what is read at @ip is not what was expected | 1692 | * EINVAL - if what is read at @ip is not what was expected |
1693 | * EPERM - if the problem happens on writting to the @ip address | 1693 | * EPERM - if the problem happens on writting to the @ip address |
1694 | */ | 1694 | */ |
1695 | void ftrace_bug(int failed, unsigned long ip) | 1695 | void ftrace_bug(int failed, unsigned long ip) |
1696 | { | 1696 | { |
1697 | switch (failed) { | 1697 | switch (failed) { |
1698 | case -EFAULT: | 1698 | case -EFAULT: |
1699 | FTRACE_WARN_ON_ONCE(1); | 1699 | FTRACE_WARN_ON_ONCE(1); |
1700 | pr_info("ftrace faulted on modifying "); | 1700 | pr_info("ftrace faulted on modifying "); |
1701 | print_ip_sym(ip); | 1701 | print_ip_sym(ip); |
1702 | break; | 1702 | break; |
1703 | case -EINVAL: | 1703 | case -EINVAL: |
1704 | FTRACE_WARN_ON_ONCE(1); | 1704 | FTRACE_WARN_ON_ONCE(1); |
1705 | pr_info("ftrace failed to modify "); | 1705 | pr_info("ftrace failed to modify "); |
1706 | print_ip_sym(ip); | 1706 | print_ip_sym(ip); |
1707 | print_ip_ins(" actual: ", (unsigned char *)ip); | 1707 | print_ip_ins(" actual: ", (unsigned char *)ip); |
1708 | printk(KERN_CONT "\n"); | 1708 | printk(KERN_CONT "\n"); |
1709 | break; | 1709 | break; |
1710 | case -EPERM: | 1710 | case -EPERM: |
1711 | FTRACE_WARN_ON_ONCE(1); | 1711 | FTRACE_WARN_ON_ONCE(1); |
1712 | pr_info("ftrace faulted on writing "); | 1712 | pr_info("ftrace faulted on writing "); |
1713 | print_ip_sym(ip); | 1713 | print_ip_sym(ip); |
1714 | break; | 1714 | break; |
1715 | default: | 1715 | default: |
1716 | FTRACE_WARN_ON_ONCE(1); | 1716 | FTRACE_WARN_ON_ONCE(1); |
1717 | pr_info("ftrace faulted on unknown error "); | 1717 | pr_info("ftrace faulted on unknown error "); |
1718 | print_ip_sym(ip); | 1718 | print_ip_sym(ip); |
1719 | } | 1719 | } |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | 1722 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) |
1723 | { | 1723 | { |
1724 | unsigned long flag = 0UL; | 1724 | unsigned long flag = 0UL; |
1725 | 1725 | ||
1726 | /* | 1726 | /* |
1727 | * If we are updating calls: | 1727 | * If we are updating calls: |
1728 | * | 1728 | * |
1729 | * If the record has a ref count, then we need to enable it | 1729 | * If the record has a ref count, then we need to enable it |
1730 | * because someone is using it. | 1730 | * because someone is using it. |
1731 | * | 1731 | * |
1732 | * Otherwise we make sure its disabled. | 1732 | * Otherwise we make sure its disabled. |
1733 | * | 1733 | * |
1734 | * If we are disabling calls, then disable all records that | 1734 | * If we are disabling calls, then disable all records that |
1735 | * are enabled. | 1735 | * are enabled. |
1736 | */ | 1736 | */ |
1737 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) | 1737 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) |
1738 | flag = FTRACE_FL_ENABLED; | 1738 | flag = FTRACE_FL_ENABLED; |
1739 | 1739 | ||
1740 | /* | 1740 | /* |
1741 | * If enabling and the REGS flag does not match the REGS_EN, then | 1741 | * If enabling and the REGS flag does not match the REGS_EN, then |
1742 | * do not ignore this record. Set flags to fail the compare against | 1742 | * do not ignore this record. Set flags to fail the compare against |
1743 | * ENABLED. | 1743 | * ENABLED. |
1744 | */ | 1744 | */ |
1745 | if (flag && | 1745 | if (flag && |
1746 | (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) | 1746 | (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) |
1747 | flag |= FTRACE_FL_REGS; | 1747 | flag |= FTRACE_FL_REGS; |
1748 | 1748 | ||
1749 | /* If the state of this record hasn't changed, then do nothing */ | 1749 | /* If the state of this record hasn't changed, then do nothing */ |
1750 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1750 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
1751 | return FTRACE_UPDATE_IGNORE; | 1751 | return FTRACE_UPDATE_IGNORE; |
1752 | 1752 | ||
1753 | if (flag) { | 1753 | if (flag) { |
1754 | /* Save off if rec is being enabled (for return value) */ | 1754 | /* Save off if rec is being enabled (for return value) */ |
1755 | flag ^= rec->flags & FTRACE_FL_ENABLED; | 1755 | flag ^= rec->flags & FTRACE_FL_ENABLED; |
1756 | 1756 | ||
1757 | if (update) { | 1757 | if (update) { |
1758 | rec->flags |= FTRACE_FL_ENABLED; | 1758 | rec->flags |= FTRACE_FL_ENABLED; |
1759 | if (flag & FTRACE_FL_REGS) { | 1759 | if (flag & FTRACE_FL_REGS) { |
1760 | if (rec->flags & FTRACE_FL_REGS) | 1760 | if (rec->flags & FTRACE_FL_REGS) |
1761 | rec->flags |= FTRACE_FL_REGS_EN; | 1761 | rec->flags |= FTRACE_FL_REGS_EN; |
1762 | else | 1762 | else |
1763 | rec->flags &= ~FTRACE_FL_REGS_EN; | 1763 | rec->flags &= ~FTRACE_FL_REGS_EN; |
1764 | } | 1764 | } |
1765 | } | 1765 | } |
1766 | 1766 | ||
1767 | /* | 1767 | /* |
1768 | * If this record is being updated from a nop, then | 1768 | * If this record is being updated from a nop, then |
1769 | * return UPDATE_MAKE_CALL. | 1769 | * return UPDATE_MAKE_CALL. |
1770 | * Otherwise, if the EN flag is set, then return | 1770 | * Otherwise, if the EN flag is set, then return |
1771 | * UPDATE_MODIFY_CALL_REGS to tell the caller to convert | 1771 | * UPDATE_MODIFY_CALL_REGS to tell the caller to convert |
1772 | * from the non-save regs, to a save regs function. | 1772 | * from the non-save regs, to a save regs function. |
1773 | * Otherwise, | 1773 | * Otherwise, |
1774 | * return UPDATE_MODIFY_CALL to tell the caller to convert | 1774 | * return UPDATE_MODIFY_CALL to tell the caller to convert |
1775 | * from the save regs, to a non-save regs function. | 1775 | * from the save regs, to a non-save regs function. |
1776 | */ | 1776 | */ |
1777 | if (flag & FTRACE_FL_ENABLED) | 1777 | if (flag & FTRACE_FL_ENABLED) |
1778 | return FTRACE_UPDATE_MAKE_CALL; | 1778 | return FTRACE_UPDATE_MAKE_CALL; |
1779 | else if (rec->flags & FTRACE_FL_REGS_EN) | 1779 | else if (rec->flags & FTRACE_FL_REGS_EN) |
1780 | return FTRACE_UPDATE_MODIFY_CALL_REGS; | 1780 | return FTRACE_UPDATE_MODIFY_CALL_REGS; |
1781 | else | 1781 | else |
1782 | return FTRACE_UPDATE_MODIFY_CALL; | 1782 | return FTRACE_UPDATE_MODIFY_CALL; |
1783 | } | 1783 | } |
1784 | 1784 | ||
1785 | if (update) { | 1785 | if (update) { |
1786 | /* If there's no more users, clear all flags */ | 1786 | /* If there's no more users, clear all flags */ |
1787 | if (!(rec->flags & ~FTRACE_FL_MASK)) | 1787 | if (!(rec->flags & ~FTRACE_FL_MASK)) |
1788 | rec->flags = 0; | 1788 | rec->flags = 0; |
1789 | else | 1789 | else |
1790 | /* Just disable the record (keep REGS state) */ | 1790 | /* Just disable the record (keep REGS state) */ |
1791 | rec->flags &= ~FTRACE_FL_ENABLED; | 1791 | rec->flags &= ~FTRACE_FL_ENABLED; |
1792 | } | 1792 | } |
1793 | 1793 | ||
1794 | return FTRACE_UPDATE_MAKE_NOP; | 1794 | return FTRACE_UPDATE_MAKE_NOP; |
1795 | } | 1795 | } |
1796 | 1796 | ||
1797 | /** | 1797 | /** |
1798 | * ftrace_update_record, set a record that now is tracing or not | 1798 | * ftrace_update_record, set a record that now is tracing or not |
1799 | * @rec: the record to update | 1799 | * @rec: the record to update |
1800 | * @enable: set to 1 if the record is tracing, zero to force disable | 1800 | * @enable: set to 1 if the record is tracing, zero to force disable |
1801 | * | 1801 | * |
1802 | * The records that represent all functions that can be traced need | 1802 | * The records that represent all functions that can be traced need |
1803 | * to be updated when tracing has been enabled. | 1803 | * to be updated when tracing has been enabled. |
1804 | */ | 1804 | */ |
1805 | int ftrace_update_record(struct dyn_ftrace *rec, int enable) | 1805 | int ftrace_update_record(struct dyn_ftrace *rec, int enable) |
1806 | { | 1806 | { |
1807 | return ftrace_check_record(rec, enable, 1); | 1807 | return ftrace_check_record(rec, enable, 1); |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | /** | 1810 | /** |
1811 | * ftrace_test_record, check if the record has been enabled or not | 1811 | * ftrace_test_record, check if the record has been enabled or not |
1812 | * @rec: the record to test | 1812 | * @rec: the record to test |
1813 | * @enable: set to 1 to check if enabled, 0 if it is disabled | 1813 | * @enable: set to 1 to check if enabled, 0 if it is disabled |
1814 | * | 1814 | * |
1815 | * The arch code may need to test if a record is already set to | 1815 | * The arch code may need to test if a record is already set to |
1816 | * tracing to determine how to modify the function code that it | 1816 | * tracing to determine how to modify the function code that it |
1817 | * represents. | 1817 | * represents. |
1818 | */ | 1818 | */ |
1819 | int ftrace_test_record(struct dyn_ftrace *rec, int enable) | 1819 | int ftrace_test_record(struct dyn_ftrace *rec, int enable) |
1820 | { | 1820 | { |
1821 | return ftrace_check_record(rec, enable, 0); | 1821 | return ftrace_check_record(rec, enable, 0); |
1822 | } | 1822 | } |
1823 | 1823 | ||
1824 | static int | 1824 | static int |
1825 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 1825 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
1826 | { | 1826 | { |
1827 | unsigned long ftrace_old_addr; | 1827 | unsigned long ftrace_old_addr; |
1828 | unsigned long ftrace_addr; | 1828 | unsigned long ftrace_addr; |
1829 | int ret; | 1829 | int ret; |
1830 | 1830 | ||
1831 | ret = ftrace_update_record(rec, enable); | 1831 | ret = ftrace_update_record(rec, enable); |
1832 | 1832 | ||
1833 | if (rec->flags & FTRACE_FL_REGS) | 1833 | if (rec->flags & FTRACE_FL_REGS) |
1834 | ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; | 1834 | ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; |
1835 | else | 1835 | else |
1836 | ftrace_addr = (unsigned long)FTRACE_ADDR; | 1836 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
1837 | 1837 | ||
1838 | switch (ret) { | 1838 | switch (ret) { |
1839 | case FTRACE_UPDATE_IGNORE: | 1839 | case FTRACE_UPDATE_IGNORE: |
1840 | return 0; | 1840 | return 0; |
1841 | 1841 | ||
1842 | case FTRACE_UPDATE_MAKE_CALL: | 1842 | case FTRACE_UPDATE_MAKE_CALL: |
1843 | return ftrace_make_call(rec, ftrace_addr); | 1843 | return ftrace_make_call(rec, ftrace_addr); |
1844 | 1844 | ||
1845 | case FTRACE_UPDATE_MAKE_NOP: | 1845 | case FTRACE_UPDATE_MAKE_NOP: |
1846 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 1846 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
1847 | 1847 | ||
1848 | case FTRACE_UPDATE_MODIFY_CALL_REGS: | 1848 | case FTRACE_UPDATE_MODIFY_CALL_REGS: |
1849 | case FTRACE_UPDATE_MODIFY_CALL: | 1849 | case FTRACE_UPDATE_MODIFY_CALL: |
1850 | if (rec->flags & FTRACE_FL_REGS) | 1850 | if (rec->flags & FTRACE_FL_REGS) |
1851 | ftrace_old_addr = (unsigned long)FTRACE_ADDR; | 1851 | ftrace_old_addr = (unsigned long)FTRACE_ADDR; |
1852 | else | 1852 | else |
1853 | ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR; | 1853 | ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR; |
1854 | 1854 | ||
1855 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 1855 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
1856 | } | 1856 | } |
1857 | 1857 | ||
1858 | return -1; /* unknow ftrace bug */ | 1858 | return -1; /* unknow ftrace bug */ |
1859 | } | 1859 | } |
1860 | 1860 | ||
1861 | void __weak ftrace_replace_code(int enable) | 1861 | void __weak ftrace_replace_code(int enable) |
1862 | { | 1862 | { |
1863 | struct dyn_ftrace *rec; | 1863 | struct dyn_ftrace *rec; |
1864 | struct ftrace_page *pg; | 1864 | struct ftrace_page *pg; |
1865 | int failed; | 1865 | int failed; |
1866 | 1866 | ||
1867 | if (unlikely(ftrace_disabled)) | 1867 | if (unlikely(ftrace_disabled)) |
1868 | return; | 1868 | return; |
1869 | 1869 | ||
1870 | do_for_each_ftrace_rec(pg, rec) { | 1870 | do_for_each_ftrace_rec(pg, rec) { |
1871 | failed = __ftrace_replace_code(rec, enable); | 1871 | failed = __ftrace_replace_code(rec, enable); |
1872 | if (failed) { | 1872 | if (failed) { |
1873 | ftrace_bug(failed, rec->ip); | 1873 | ftrace_bug(failed, rec->ip); |
1874 | /* Stop processing */ | 1874 | /* Stop processing */ |
1875 | return; | 1875 | return; |
1876 | } | 1876 | } |
1877 | } while_for_each_ftrace_rec(); | 1877 | } while_for_each_ftrace_rec(); |
1878 | } | 1878 | } |
1879 | 1879 | ||
1880 | struct ftrace_rec_iter { | 1880 | struct ftrace_rec_iter { |
1881 | struct ftrace_page *pg; | 1881 | struct ftrace_page *pg; |
1882 | int index; | 1882 | int index; |
1883 | }; | 1883 | }; |
1884 | 1884 | ||
1885 | /** | 1885 | /** |
1886 | * ftrace_rec_iter_start, start up iterating over traced functions | 1886 | * ftrace_rec_iter_start, start up iterating over traced functions |
1887 | * | 1887 | * |
1888 | * Returns an iterator handle that is used to iterate over all | 1888 | * Returns an iterator handle that is used to iterate over all |
1889 | * the records that represent address locations where functions | 1889 | * the records that represent address locations where functions |
1890 | * are traced. | 1890 | * are traced. |
1891 | * | 1891 | * |
1892 | * May return NULL if no records are available. | 1892 | * May return NULL if no records are available. |
1893 | */ | 1893 | */ |
1894 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) | 1894 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) |
1895 | { | 1895 | { |
1896 | /* | 1896 | /* |
1897 | * We only use a single iterator. | 1897 | * We only use a single iterator. |
1898 | * Protected by the ftrace_lock mutex. | 1898 | * Protected by the ftrace_lock mutex. |
1899 | */ | 1899 | */ |
1900 | static struct ftrace_rec_iter ftrace_rec_iter; | 1900 | static struct ftrace_rec_iter ftrace_rec_iter; |
1901 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; | 1901 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; |
1902 | 1902 | ||
1903 | iter->pg = ftrace_pages_start; | 1903 | iter->pg = ftrace_pages_start; |
1904 | iter->index = 0; | 1904 | iter->index = 0; |
1905 | 1905 | ||
1906 | /* Could have empty pages */ | 1906 | /* Could have empty pages */ |
1907 | while (iter->pg && !iter->pg->index) | 1907 | while (iter->pg && !iter->pg->index) |
1908 | iter->pg = iter->pg->next; | 1908 | iter->pg = iter->pg->next; |
1909 | 1909 | ||
1910 | if (!iter->pg) | 1910 | if (!iter->pg) |
1911 | return NULL; | 1911 | return NULL; |
1912 | 1912 | ||
1913 | return iter; | 1913 | return iter; |
1914 | } | 1914 | } |
1915 | 1915 | ||
1916 | /** | 1916 | /** |
1917 | * ftrace_rec_iter_next, get the next record to process. | 1917 | * ftrace_rec_iter_next, get the next record to process. |
1918 | * @iter: The handle to the iterator. | 1918 | * @iter: The handle to the iterator. |
1919 | * | 1919 | * |
1920 | * Returns the next iterator after the given iterator @iter. | 1920 | * Returns the next iterator after the given iterator @iter. |
1921 | */ | 1921 | */ |
1922 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) | 1922 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) |
1923 | { | 1923 | { |
1924 | iter->index++; | 1924 | iter->index++; |
1925 | 1925 | ||
1926 | if (iter->index >= iter->pg->index) { | 1926 | if (iter->index >= iter->pg->index) { |
1927 | iter->pg = iter->pg->next; | 1927 | iter->pg = iter->pg->next; |
1928 | iter->index = 0; | 1928 | iter->index = 0; |
1929 | 1929 | ||
1930 | /* Could have empty pages */ | 1930 | /* Could have empty pages */ |
1931 | while (iter->pg && !iter->pg->index) | 1931 | while (iter->pg && !iter->pg->index) |
1932 | iter->pg = iter->pg->next; | 1932 | iter->pg = iter->pg->next; |
1933 | } | 1933 | } |
1934 | 1934 | ||
1935 | if (!iter->pg) | 1935 | if (!iter->pg) |
1936 | return NULL; | 1936 | return NULL; |
1937 | 1937 | ||
1938 | return iter; | 1938 | return iter; |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | /** | 1941 | /** |
1942 | * ftrace_rec_iter_record, get the record at the iterator location | 1942 | * ftrace_rec_iter_record, get the record at the iterator location |
1943 | * @iter: The current iterator location | 1943 | * @iter: The current iterator location |
1944 | * | 1944 | * |
1945 | * Returns the record that the current @iter is at. | 1945 | * Returns the record that the current @iter is at. |
1946 | */ | 1946 | */ |
1947 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) | 1947 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) |
1948 | { | 1948 | { |
1949 | return &iter->pg->records[iter->index]; | 1949 | return &iter->pg->records[iter->index]; |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | static int | 1952 | static int |
1953 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | 1953 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
1954 | { | 1954 | { |
1955 | unsigned long ip; | 1955 | unsigned long ip; |
1956 | int ret; | 1956 | int ret; |
1957 | 1957 | ||
1958 | ip = rec->ip; | 1958 | ip = rec->ip; |
1959 | 1959 | ||
1960 | if (unlikely(ftrace_disabled)) | 1960 | if (unlikely(ftrace_disabled)) |
1961 | return 0; | 1961 | return 0; |
1962 | 1962 | ||
1963 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 1963 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
1964 | if (ret) { | 1964 | if (ret) { |
1965 | ftrace_bug(ret, ip); | 1965 | ftrace_bug(ret, ip); |
1966 | return 0; | 1966 | return 0; |
1967 | } | 1967 | } |
1968 | return 1; | 1968 | return 1; |
1969 | } | 1969 | } |
1970 | 1970 | ||
1971 | /* | 1971 | /* |
1972 | * archs can override this function if they must do something | 1972 | * archs can override this function if they must do something |
1973 | * before the modifying code is performed. | 1973 | * before the modifying code is performed. |
1974 | */ | 1974 | */ |
1975 | int __weak ftrace_arch_code_modify_prepare(void) | 1975 | int __weak ftrace_arch_code_modify_prepare(void) |
1976 | { | 1976 | { |
1977 | return 0; | 1977 | return 0; |
1978 | } | 1978 | } |
1979 | 1979 | ||
1980 | /* | 1980 | /* |
1981 | * archs can override this function if they must do something | 1981 | * archs can override this function if they must do something |
1982 | * after the modifying code is performed. | 1982 | * after the modifying code is performed. |
1983 | */ | 1983 | */ |
1984 | int __weak ftrace_arch_code_modify_post_process(void) | 1984 | int __weak ftrace_arch_code_modify_post_process(void) |
1985 | { | 1985 | { |
1986 | return 0; | 1986 | return 0; |
1987 | } | 1987 | } |
1988 | 1988 | ||
1989 | void ftrace_modify_all_code(int command) | 1989 | void ftrace_modify_all_code(int command) |
1990 | { | 1990 | { |
1991 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | 1991 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
1992 | int err = 0; | 1992 | int err = 0; |
1993 | 1993 | ||
1994 | /* | 1994 | /* |
1995 | * If the ftrace_caller calls a ftrace_ops func directly, | 1995 | * If the ftrace_caller calls a ftrace_ops func directly, |
1996 | * we need to make sure that it only traces functions it | 1996 | * we need to make sure that it only traces functions it |
1997 | * expects to trace. When doing the switch of functions, | 1997 | * expects to trace. When doing the switch of functions, |
1998 | * we need to update to the ftrace_ops_list_func first | 1998 | * we need to update to the ftrace_ops_list_func first |
1999 | * before the transition between old and new calls are set, | 1999 | * before the transition between old and new calls are set, |
2000 | * as the ftrace_ops_list_func will check the ops hashes | 2000 | * as the ftrace_ops_list_func will check the ops hashes |
2001 | * to make sure the ops are having the right functions | 2001 | * to make sure the ops are having the right functions |
2002 | * traced. | 2002 | * traced. |
2003 | */ | 2003 | */ |
2004 | if (update) { | 2004 | if (update) { |
2005 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); | 2005 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); |
2006 | if (FTRACE_WARN_ON(err)) | 2006 | if (FTRACE_WARN_ON(err)) |
2007 | return; | 2007 | return; |
2008 | } | 2008 | } |
2009 | 2009 | ||
2010 | if (command & FTRACE_UPDATE_CALLS) | 2010 | if (command & FTRACE_UPDATE_CALLS) |
2011 | ftrace_replace_code(1); | 2011 | ftrace_replace_code(1); |
2012 | else if (command & FTRACE_DISABLE_CALLS) | 2012 | else if (command & FTRACE_DISABLE_CALLS) |
2013 | ftrace_replace_code(0); | 2013 | ftrace_replace_code(0); |
2014 | 2014 | ||
2015 | if (update && ftrace_trace_function != ftrace_ops_list_func) { | 2015 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
2016 | function_trace_op = set_function_trace_op; | 2016 | function_trace_op = set_function_trace_op; |
2017 | smp_wmb(); | 2017 | smp_wmb(); |
2018 | /* If irqs are disabled, we are in stop machine */ | 2018 | /* If irqs are disabled, we are in stop machine */ |
2019 | if (!irqs_disabled()) | 2019 | if (!irqs_disabled()) |
2020 | smp_call_function(ftrace_sync_ipi, NULL, 1); | 2020 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
2021 | err = ftrace_update_ftrace_func(ftrace_trace_function); | 2021 | err = ftrace_update_ftrace_func(ftrace_trace_function); |
2022 | if (FTRACE_WARN_ON(err)) | 2022 | if (FTRACE_WARN_ON(err)) |
2023 | return; | 2023 | return; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | if (command & FTRACE_START_FUNC_RET) | 2026 | if (command & FTRACE_START_FUNC_RET) |
2027 | err = ftrace_enable_ftrace_graph_caller(); | 2027 | err = ftrace_enable_ftrace_graph_caller(); |
2028 | else if (command & FTRACE_STOP_FUNC_RET) | 2028 | else if (command & FTRACE_STOP_FUNC_RET) |
2029 | err = ftrace_disable_ftrace_graph_caller(); | 2029 | err = ftrace_disable_ftrace_graph_caller(); |
2030 | FTRACE_WARN_ON(err); | 2030 | FTRACE_WARN_ON(err); |
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | static int __ftrace_modify_code(void *data) | 2033 | static int __ftrace_modify_code(void *data) |
2034 | { | 2034 | { |
2035 | int *command = data; | 2035 | int *command = data; |
2036 | 2036 | ||
2037 | ftrace_modify_all_code(*command); | 2037 | ftrace_modify_all_code(*command); |
2038 | 2038 | ||
2039 | return 0; | 2039 | return 0; |
2040 | } | 2040 | } |
2041 | 2041 | ||
2042 | /** | 2042 | /** |
2043 | * ftrace_run_stop_machine, go back to the stop machine method | 2043 | * ftrace_run_stop_machine, go back to the stop machine method |
2044 | * @command: The command to tell ftrace what to do | 2044 | * @command: The command to tell ftrace what to do |
2045 | * | 2045 | * |
2046 | * If an arch needs to fall back to the stop machine method, the | 2046 | * If an arch needs to fall back to the stop machine method, the |
2047 | * it can call this function. | 2047 | * it can call this function. |
2048 | */ | 2048 | */ |
2049 | void ftrace_run_stop_machine(int command) | 2049 | void ftrace_run_stop_machine(int command) |
2050 | { | 2050 | { |
2051 | stop_machine(__ftrace_modify_code, &command, NULL); | 2051 | stop_machine(__ftrace_modify_code, &command, NULL); |
2052 | } | 2052 | } |
2053 | 2053 | ||
2054 | /** | 2054 | /** |
2055 | * arch_ftrace_update_code, modify the code to trace or not trace | 2055 | * arch_ftrace_update_code, modify the code to trace or not trace |
2056 | * @command: The command that needs to be done | 2056 | * @command: The command that needs to be done |
2057 | * | 2057 | * |
2058 | * Archs can override this function if it does not need to | 2058 | * Archs can override this function if it does not need to |
2059 | * run stop_machine() to modify code. | 2059 | * run stop_machine() to modify code. |
2060 | */ | 2060 | */ |
2061 | void __weak arch_ftrace_update_code(int command) | 2061 | void __weak arch_ftrace_update_code(int command) |
2062 | { | 2062 | { |
2063 | ftrace_run_stop_machine(command); | 2063 | ftrace_run_stop_machine(command); |
2064 | } | 2064 | } |
2065 | 2065 | ||
2066 | static void ftrace_run_update_code(int command) | 2066 | static void ftrace_run_update_code(int command) |
2067 | { | 2067 | { |
2068 | int ret; | 2068 | int ret; |
2069 | 2069 | ||
2070 | ret = ftrace_arch_code_modify_prepare(); | 2070 | ret = ftrace_arch_code_modify_prepare(); |
2071 | FTRACE_WARN_ON(ret); | 2071 | FTRACE_WARN_ON(ret); |
2072 | if (ret) | 2072 | if (ret) |
2073 | return; | 2073 | return; |
2074 | /* | 2074 | /* |
2075 | * Do not call function tracer while we update the code. | 2075 | * Do not call function tracer while we update the code. |
2076 | * We are in stop machine. | 2076 | * We are in stop machine. |
2077 | */ | 2077 | */ |
2078 | function_trace_stop++; | 2078 | function_trace_stop++; |
2079 | 2079 | ||
2080 | /* | 2080 | /* |
2081 | * By default we use stop_machine() to modify the code. | 2081 | * By default we use stop_machine() to modify the code. |
2082 | * But archs can do what ever they want as long as it | 2082 | * But archs can do what ever they want as long as it |
2083 | * is safe. The stop_machine() is the safest, but also | 2083 | * is safe. The stop_machine() is the safest, but also |
2084 | * produces the most overhead. | 2084 | * produces the most overhead. |
2085 | */ | 2085 | */ |
2086 | arch_ftrace_update_code(command); | 2086 | arch_ftrace_update_code(command); |
2087 | 2087 | ||
2088 | function_trace_stop--; | 2088 | function_trace_stop--; |
2089 | 2089 | ||
2090 | ret = ftrace_arch_code_modify_post_process(); | 2090 | ret = ftrace_arch_code_modify_post_process(); |
2091 | FTRACE_WARN_ON(ret); | 2091 | FTRACE_WARN_ON(ret); |
2092 | } | 2092 | } |
2093 | 2093 | ||
2094 | static ftrace_func_t saved_ftrace_func; | 2094 | static ftrace_func_t saved_ftrace_func; |
2095 | static int ftrace_start_up; | 2095 | static int ftrace_start_up; |
2096 | static int global_start_up; | 2096 | static int global_start_up; |
2097 | 2097 | ||
2098 | static void control_ops_free(struct ftrace_ops *ops) | 2098 | static void control_ops_free(struct ftrace_ops *ops) |
2099 | { | 2099 | { |
2100 | free_percpu(ops->disabled); | 2100 | free_percpu(ops->disabled); |
2101 | } | 2101 | } |
2102 | 2102 | ||
2103 | static void ftrace_startup_enable(int command) | 2103 | static void ftrace_startup_enable(int command) |
2104 | { | 2104 | { |
2105 | if (saved_ftrace_func != ftrace_trace_function) { | 2105 | if (saved_ftrace_func != ftrace_trace_function) { |
2106 | saved_ftrace_func = ftrace_trace_function; | 2106 | saved_ftrace_func = ftrace_trace_function; |
2107 | command |= FTRACE_UPDATE_TRACE_FUNC; | 2107 | command |= FTRACE_UPDATE_TRACE_FUNC; |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | if (!command || !ftrace_enabled) | 2110 | if (!command || !ftrace_enabled) |
2111 | return; | 2111 | return; |
2112 | 2112 | ||
2113 | ftrace_run_update_code(command); | 2113 | ftrace_run_update_code(command); |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | static int ftrace_startup(struct ftrace_ops *ops, int command) | 2116 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
2117 | { | 2117 | { |
2118 | bool hash_enable = true; | 2118 | bool hash_enable = true; |
2119 | int ret; | 2119 | int ret; |
2120 | 2120 | ||
2121 | if (unlikely(ftrace_disabled)) | 2121 | if (unlikely(ftrace_disabled)) |
2122 | return -ENODEV; | 2122 | return -ENODEV; |
2123 | 2123 | ||
2124 | ret = __register_ftrace_function(ops); | 2124 | ret = __register_ftrace_function(ops); |
2125 | if (ret) | 2125 | if (ret) |
2126 | return ret; | 2126 | return ret; |
2127 | 2127 | ||
2128 | ftrace_start_up++; | 2128 | ftrace_start_up++; |
2129 | command |= FTRACE_UPDATE_CALLS; | 2129 | command |= FTRACE_UPDATE_CALLS; |
2130 | 2130 | ||
2131 | /* ops marked global share the filter hashes */ | 2131 | /* ops marked global share the filter hashes */ |
2132 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 2132 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
2133 | ops = &global_ops; | 2133 | ops = &global_ops; |
2134 | /* Don't update hash if global is already set */ | 2134 | /* Don't update hash if global is already set */ |
2135 | if (global_start_up) | 2135 | if (global_start_up) |
2136 | hash_enable = false; | 2136 | hash_enable = false; |
2137 | global_start_up++; | 2137 | global_start_up++; |
2138 | } | 2138 | } |
2139 | 2139 | ||
2140 | ops->flags |= FTRACE_OPS_FL_ENABLED; | 2140 | ops->flags |= FTRACE_OPS_FL_ENABLED; |
2141 | if (hash_enable) | 2141 | if (hash_enable) |
2142 | ftrace_hash_rec_enable(ops, 1); | 2142 | ftrace_hash_rec_enable(ops, 1); |
2143 | 2143 | ||
2144 | ftrace_startup_enable(command); | 2144 | ftrace_startup_enable(command); |
2145 | 2145 | ||
2146 | return 0; | 2146 | return 0; |
2147 | } | 2147 | } |
2148 | 2148 | ||
2149 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) | 2149 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
2150 | { | 2150 | { |
2151 | bool hash_disable = true; | 2151 | bool hash_disable = true; |
2152 | int ret; | 2152 | int ret; |
2153 | 2153 | ||
2154 | if (unlikely(ftrace_disabled)) | 2154 | if (unlikely(ftrace_disabled)) |
2155 | return -ENODEV; | 2155 | return -ENODEV; |
2156 | 2156 | ||
2157 | ret = __unregister_ftrace_function(ops); | 2157 | ret = __unregister_ftrace_function(ops); |
2158 | if (ret) | 2158 | if (ret) |
2159 | return ret; | 2159 | return ret; |
2160 | 2160 | ||
2161 | ftrace_start_up--; | 2161 | ftrace_start_up--; |
2162 | /* | 2162 | /* |
2163 | * Just warn in case of unbalance, no need to kill ftrace, it's not | 2163 | * Just warn in case of unbalance, no need to kill ftrace, it's not |
2164 | * critical but the ftrace_call callers may be never nopped again after | 2164 | * critical but the ftrace_call callers may be never nopped again after |
2165 | * further ftrace uses. | 2165 | * further ftrace uses. |
2166 | */ | 2166 | */ |
2167 | WARN_ON_ONCE(ftrace_start_up < 0); | 2167 | WARN_ON_ONCE(ftrace_start_up < 0); |
2168 | 2168 | ||
2169 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 2169 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
2170 | ops = &global_ops; | 2170 | ops = &global_ops; |
2171 | global_start_up--; | 2171 | global_start_up--; |
2172 | WARN_ON_ONCE(global_start_up < 0); | 2172 | WARN_ON_ONCE(global_start_up < 0); |
2173 | /* Don't update hash if global still has users */ | 2173 | /* Don't update hash if global still has users */ |
2174 | if (global_start_up) { | 2174 | if (global_start_up) { |
2175 | WARN_ON_ONCE(!ftrace_start_up); | 2175 | WARN_ON_ONCE(!ftrace_start_up); |
2176 | hash_disable = false; | 2176 | hash_disable = false; |
2177 | } | 2177 | } |
2178 | } | 2178 | } |
2179 | 2179 | ||
2180 | if (hash_disable) | 2180 | if (hash_disable) |
2181 | ftrace_hash_rec_disable(ops, 1); | 2181 | ftrace_hash_rec_disable(ops, 1); |
2182 | 2182 | ||
2183 | if (ops != &global_ops || !global_start_up) | 2183 | if (ops != &global_ops || !global_start_up) |
2184 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | 2184 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
2185 | 2185 | ||
2186 | command |= FTRACE_UPDATE_CALLS; | 2186 | command |= FTRACE_UPDATE_CALLS; |
2187 | 2187 | ||
2188 | if (saved_ftrace_func != ftrace_trace_function) { | 2188 | if (saved_ftrace_func != ftrace_trace_function) { |
2189 | saved_ftrace_func = ftrace_trace_function; | 2189 | saved_ftrace_func = ftrace_trace_function; |
2190 | command |= FTRACE_UPDATE_TRACE_FUNC; | 2190 | command |= FTRACE_UPDATE_TRACE_FUNC; |
2191 | } | 2191 | } |
2192 | 2192 | ||
2193 | if (!command || !ftrace_enabled) { | 2193 | if (!command || !ftrace_enabled) { |
2194 | /* | 2194 | /* |
2195 | * If these are control ops, they still need their | 2195 | * If these are control ops, they still need their |
2196 | * per_cpu field freed. Since, function tracing is | 2196 | * per_cpu field freed. Since, function tracing is |
2197 | * not currently active, we can just free them | 2197 | * not currently active, we can just free them |
2198 | * without synchronizing all CPUs. | 2198 | * without synchronizing all CPUs. |
2199 | */ | 2199 | */ |
2200 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2200 | if (ops->flags & FTRACE_OPS_FL_CONTROL) |
2201 | control_ops_free(ops); | 2201 | control_ops_free(ops); |
2202 | return 0; | 2202 | return 0; |
2203 | } | 2203 | } |
2204 | 2204 | ||
2205 | ftrace_run_update_code(command); | 2205 | ftrace_run_update_code(command); |
2206 | 2206 | ||
2207 | /* | 2207 | /* |
2208 | * Dynamic ops may be freed, we must make sure that all | 2208 | * Dynamic ops may be freed, we must make sure that all |
2209 | * callers are done before leaving this function. | 2209 | * callers are done before leaving this function. |
2210 | * The same goes for freeing the per_cpu data of the control | 2210 | * The same goes for freeing the per_cpu data of the control |
2211 | * ops. | 2211 | * ops. |
2212 | * | 2212 | * |
2213 | * Again, normal synchronize_sched() is not good enough. | 2213 | * Again, normal synchronize_sched() is not good enough. |
2214 | * We need to do a hard force of sched synchronization. | 2214 | * We need to do a hard force of sched synchronization. |
2215 | * This is because we use preempt_disable() to do RCU, but | 2215 | * This is because we use preempt_disable() to do RCU, but |
2216 | * the function tracers can be called where RCU is not watching | 2216 | * the function tracers can be called where RCU is not watching |
2217 | * (like before user_exit()). We can not rely on the RCU | 2217 | * (like before user_exit()). We can not rely on the RCU |
2218 | * infrastructure to do the synchronization, thus we must do it | 2218 | * infrastructure to do the synchronization, thus we must do it |
2219 | * ourselves. | 2219 | * ourselves. |
2220 | */ | 2220 | */ |
2221 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | 2221 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { |
2222 | schedule_on_each_cpu(ftrace_sync); | 2222 | schedule_on_each_cpu(ftrace_sync); |
2223 | 2223 | ||
2224 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2224 | if (ops->flags & FTRACE_OPS_FL_CONTROL) |
2225 | control_ops_free(ops); | 2225 | control_ops_free(ops); |
2226 | } | 2226 | } |
2227 | 2227 | ||
2228 | return 0; | 2228 | return 0; |
2229 | } | 2229 | } |
2230 | 2230 | ||
2231 | static void ftrace_startup_sysctl(void) | 2231 | static void ftrace_startup_sysctl(void) |
2232 | { | 2232 | { |
2233 | if (unlikely(ftrace_disabled)) | 2233 | if (unlikely(ftrace_disabled)) |
2234 | return; | 2234 | return; |
2235 | 2235 | ||
2236 | /* Force update next time */ | 2236 | /* Force update next time */ |
2237 | saved_ftrace_func = NULL; | 2237 | saved_ftrace_func = NULL; |
2238 | /* ftrace_start_up is true if we want ftrace running */ | 2238 | /* ftrace_start_up is true if we want ftrace running */ |
2239 | if (ftrace_start_up) | 2239 | if (ftrace_start_up) |
2240 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2240 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); |
2241 | } | 2241 | } |
2242 | 2242 | ||
2243 | static void ftrace_shutdown_sysctl(void) | 2243 | static void ftrace_shutdown_sysctl(void) |
2244 | { | 2244 | { |
2245 | if (unlikely(ftrace_disabled)) | 2245 | if (unlikely(ftrace_disabled)) |
2246 | return; | 2246 | return; |
2247 | 2247 | ||
2248 | /* ftrace_start_up is true if ftrace is running */ | 2248 | /* ftrace_start_up is true if ftrace is running */ |
2249 | if (ftrace_start_up) | 2249 | if (ftrace_start_up) |
2250 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2250 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); |
2251 | } | 2251 | } |
2252 | 2252 | ||
2253 | static cycle_t ftrace_update_time; | 2253 | static cycle_t ftrace_update_time; |
2254 | unsigned long ftrace_update_tot_cnt; | 2254 | unsigned long ftrace_update_tot_cnt; |
2255 | 2255 | ||
2256 | static inline int ops_traces_mod(struct ftrace_ops *ops) | 2256 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
2257 | { | 2257 | { |
2258 | /* | 2258 | /* |
2259 | * Filter_hash being empty will default to trace module. | 2259 | * Filter_hash being empty will default to trace module. |
2260 | * But notrace hash requires a test of individual module functions. | 2260 | * But notrace hash requires a test of individual module functions. |
2261 | */ | 2261 | */ |
2262 | return ftrace_hash_empty(ops->filter_hash) && | 2262 | return ftrace_hash_empty(ops->filter_hash) && |
2263 | ftrace_hash_empty(ops->notrace_hash); | 2263 | ftrace_hash_empty(ops->notrace_hash); |
2264 | } | 2264 | } |
2265 | 2265 | ||
2266 | /* | 2266 | /* |
2267 | * Check if the current ops references the record. | 2267 | * Check if the current ops references the record. |
2268 | * | 2268 | * |
2269 | * If the ops traces all functions, then it was already accounted for. | 2269 | * If the ops traces all functions, then it was already accounted for. |
2270 | * If the ops does not trace the current record function, skip it. | 2270 | * If the ops does not trace the current record function, skip it. |
2271 | * If the ops ignores the function via notrace filter, skip it. | 2271 | * If the ops ignores the function via notrace filter, skip it. |
2272 | */ | 2272 | */ |
2273 | static inline bool | 2273 | static inline bool |
2274 | ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | 2274 | ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) |
2275 | { | 2275 | { |
2276 | /* If ops isn't enabled, ignore it */ | 2276 | /* If ops isn't enabled, ignore it */ |
2277 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | 2277 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
2278 | return 0; | 2278 | return 0; |
2279 | 2279 | ||
2280 | /* If ops traces all mods, we already accounted for it */ | 2280 | /* If ops traces all mods, we already accounted for it */ |
2281 | if (ops_traces_mod(ops)) | 2281 | if (ops_traces_mod(ops)) |
2282 | return 0; | 2282 | return 0; |
2283 | 2283 | ||
2284 | /* The function must be in the filter */ | 2284 | /* The function must be in the filter */ |
2285 | if (!ftrace_hash_empty(ops->filter_hash) && | 2285 | if (!ftrace_hash_empty(ops->filter_hash) && |
2286 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | 2286 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) |
2287 | return 0; | 2287 | return 0; |
2288 | 2288 | ||
2289 | /* If in notrace hash, we ignore it too */ | 2289 | /* If in notrace hash, we ignore it too */ |
2290 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | 2290 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) |
2291 | return 0; | 2291 | return 0; |
2292 | 2292 | ||
2293 | return 1; | 2293 | return 1; |
2294 | } | 2294 | } |
2295 | 2295 | ||
2296 | static int referenced_filters(struct dyn_ftrace *rec) | 2296 | static int referenced_filters(struct dyn_ftrace *rec) |
2297 | { | 2297 | { |
2298 | struct ftrace_ops *ops; | 2298 | struct ftrace_ops *ops; |
2299 | int cnt = 0; | 2299 | int cnt = 0; |
2300 | 2300 | ||
2301 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | 2301 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { |
2302 | if (ops_references_rec(ops, rec)) | 2302 | if (ops_references_rec(ops, rec)) |
2303 | cnt++; | 2303 | cnt++; |
2304 | } | 2304 | } |
2305 | 2305 | ||
2306 | return cnt; | 2306 | return cnt; |
2307 | } | 2307 | } |
2308 | 2308 | ||
2309 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) | 2309 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
2310 | { | 2310 | { |
2311 | struct ftrace_page *pg; | 2311 | struct ftrace_page *pg; |
2312 | struct dyn_ftrace *p; | 2312 | struct dyn_ftrace *p; |
2313 | cycle_t start, stop; | 2313 | cycle_t start, stop; |
2314 | unsigned long update_cnt = 0; | 2314 | unsigned long update_cnt = 0; |
2315 | unsigned long ref = 0; | 2315 | unsigned long ref = 0; |
2316 | bool test = false; | 2316 | bool test = false; |
2317 | int i; | 2317 | int i; |
2318 | 2318 | ||
2319 | /* | 2319 | /* |
2320 | * When adding a module, we need to check if tracers are | 2320 | * When adding a module, we need to check if tracers are |
2321 | * currently enabled and if they are set to trace all functions. | 2321 | * currently enabled and if they are set to trace all functions. |
2322 | * If they are, we need to enable the module functions as well | 2322 | * If they are, we need to enable the module functions as well |
2323 | * as update the reference counts for those function records. | 2323 | * as update the reference counts for those function records. |
2324 | */ | 2324 | */ |
2325 | if (mod) { | 2325 | if (mod) { |
2326 | struct ftrace_ops *ops; | 2326 | struct ftrace_ops *ops; |
2327 | 2327 | ||
2328 | for (ops = ftrace_ops_list; | 2328 | for (ops = ftrace_ops_list; |
2329 | ops != &ftrace_list_end; ops = ops->next) { | 2329 | ops != &ftrace_list_end; ops = ops->next) { |
2330 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | 2330 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { |
2331 | if (ops_traces_mod(ops)) | 2331 | if (ops_traces_mod(ops)) |
2332 | ref++; | 2332 | ref++; |
2333 | else | 2333 | else |
2334 | test = true; | 2334 | test = true; |
2335 | } | 2335 | } |
2336 | } | 2336 | } |
2337 | } | 2337 | } |
2338 | 2338 | ||
2339 | start = ftrace_now(raw_smp_processor_id()); | 2339 | start = ftrace_now(raw_smp_processor_id()); |
2340 | 2340 | ||
2341 | for (pg = new_pgs; pg; pg = pg->next) { | 2341 | for (pg = new_pgs; pg; pg = pg->next) { |
2342 | 2342 | ||
2343 | for (i = 0; i < pg->index; i++) { | 2343 | for (i = 0; i < pg->index; i++) { |
2344 | int cnt = ref; | 2344 | int cnt = ref; |
2345 | 2345 | ||
2346 | /* If something went wrong, bail without enabling anything */ | 2346 | /* If something went wrong, bail without enabling anything */ |
2347 | if (unlikely(ftrace_disabled)) | 2347 | if (unlikely(ftrace_disabled)) |
2348 | return -1; | 2348 | return -1; |
2349 | 2349 | ||
2350 | p = &pg->records[i]; | 2350 | p = &pg->records[i]; |
2351 | if (test) | 2351 | if (test) |
2352 | cnt += referenced_filters(p); | 2352 | cnt += referenced_filters(p); |
2353 | p->flags = cnt; | 2353 | p->flags = cnt; |
2354 | 2354 | ||
2355 | /* | 2355 | /* |
2356 | * Do the initial record conversion from mcount jump | 2356 | * Do the initial record conversion from mcount jump |
2357 | * to the NOP instructions. | 2357 | * to the NOP instructions. |
2358 | */ | 2358 | */ |
2359 | if (!ftrace_code_disable(mod, p)) | 2359 | if (!ftrace_code_disable(mod, p)) |
2360 | break; | 2360 | break; |
2361 | 2361 | ||
2362 | update_cnt++; | 2362 | update_cnt++; |
2363 | 2363 | ||
2364 | /* | 2364 | /* |
2365 | * If the tracing is enabled, go ahead and enable the record. | 2365 | * If the tracing is enabled, go ahead and enable the record. |
2366 | * | 2366 | * |
2367 | * The reason not to enable the record immediatelly is the | 2367 | * The reason not to enable the record immediatelly is the |
2368 | * inherent check of ftrace_make_nop/ftrace_make_call for | 2368 | * inherent check of ftrace_make_nop/ftrace_make_call for |
2369 | * correct previous instructions. Making first the NOP | 2369 | * correct previous instructions. Making first the NOP |
2370 | * conversion puts the module to the correct state, thus | 2370 | * conversion puts the module to the correct state, thus |
2371 | * passing the ftrace_make_call check. | 2371 | * passing the ftrace_make_call check. |
2372 | */ | 2372 | */ |
2373 | if (ftrace_start_up && cnt) { | 2373 | if (ftrace_start_up && cnt) { |
2374 | int failed = __ftrace_replace_code(p, 1); | 2374 | int failed = __ftrace_replace_code(p, 1); |
2375 | if (failed) | 2375 | if (failed) |
2376 | ftrace_bug(failed, p->ip); | 2376 | ftrace_bug(failed, p->ip); |
2377 | } | 2377 | } |
2378 | } | 2378 | } |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | stop = ftrace_now(raw_smp_processor_id()); | 2381 | stop = ftrace_now(raw_smp_processor_id()); |
2382 | ftrace_update_time = stop - start; | 2382 | ftrace_update_time = stop - start; |
2383 | ftrace_update_tot_cnt += update_cnt; | 2383 | ftrace_update_tot_cnt += update_cnt; |
2384 | 2384 | ||
2385 | return 0; | 2385 | return 0; |
2386 | } | 2386 | } |
2387 | 2387 | ||
2388 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) | 2388 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
2389 | { | 2389 | { |
2390 | int order; | 2390 | int order; |
2391 | int cnt; | 2391 | int cnt; |
2392 | 2392 | ||
2393 | if (WARN_ON(!count)) | 2393 | if (WARN_ON(!count)) |
2394 | return -EINVAL; | 2394 | return -EINVAL; |
2395 | 2395 | ||
2396 | order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); | 2396 | order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
2397 | 2397 | ||
2398 | /* | 2398 | /* |
2399 | * We want to fill as much as possible. No more than a page | 2399 | * We want to fill as much as possible. No more than a page |
2400 | * may be empty. | 2400 | * may be empty. |
2401 | */ | 2401 | */ |
2402 | while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) | 2402 | while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) |
2403 | order--; | 2403 | order--; |
2404 | 2404 | ||
2405 | again: | 2405 | again: |
2406 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 2406 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
2407 | 2407 | ||
2408 | if (!pg->records) { | 2408 | if (!pg->records) { |
2409 | /* if we can't allocate this size, try something smaller */ | 2409 | /* if we can't allocate this size, try something smaller */ |
2410 | if (!order) | 2410 | if (!order) |
2411 | return -ENOMEM; | 2411 | return -ENOMEM; |
2412 | order >>= 1; | 2412 | order >>= 1; |
2413 | goto again; | 2413 | goto again; |
2414 | } | 2414 | } |
2415 | 2415 | ||
2416 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; | 2416 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
2417 | pg->size = cnt; | 2417 | pg->size = cnt; |
2418 | 2418 | ||
2419 | if (cnt > count) | 2419 | if (cnt > count) |
2420 | cnt = count; | 2420 | cnt = count; |
2421 | 2421 | ||
2422 | return cnt; | 2422 | return cnt; |
2423 | } | 2423 | } |
2424 | 2424 | ||
2425 | static struct ftrace_page * | 2425 | static struct ftrace_page * |
2426 | ftrace_allocate_pages(unsigned long num_to_init) | 2426 | ftrace_allocate_pages(unsigned long num_to_init) |
2427 | { | 2427 | { |
2428 | struct ftrace_page *start_pg; | 2428 | struct ftrace_page *start_pg; |
2429 | struct ftrace_page *pg; | 2429 | struct ftrace_page *pg; |
2430 | int order; | 2430 | int order; |
2431 | int cnt; | 2431 | int cnt; |
2432 | 2432 | ||
2433 | if (!num_to_init) | 2433 | if (!num_to_init) |
2434 | return 0; | 2434 | return 0; |
2435 | 2435 | ||
2436 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); | 2436 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); |
2437 | if (!pg) | 2437 | if (!pg) |
2438 | return NULL; | 2438 | return NULL; |
2439 | 2439 | ||
2440 | /* | 2440 | /* |
2441 | * Try to allocate as much as possible in one continues | 2441 | * Try to allocate as much as possible in one continues |
2442 | * location that fills in all of the space. We want to | 2442 | * location that fills in all of the space. We want to |
2443 | * waste as little space as possible. | 2443 | * waste as little space as possible. |
2444 | */ | 2444 | */ |
2445 | for (;;) { | 2445 | for (;;) { |
2446 | cnt = ftrace_allocate_records(pg, num_to_init); | 2446 | cnt = ftrace_allocate_records(pg, num_to_init); |
2447 | if (cnt < 0) | 2447 | if (cnt < 0) |
2448 | goto free_pages; | 2448 | goto free_pages; |
2449 | 2449 | ||
2450 | num_to_init -= cnt; | 2450 | num_to_init -= cnt; |
2451 | if (!num_to_init) | 2451 | if (!num_to_init) |
2452 | break; | 2452 | break; |
2453 | 2453 | ||
2454 | pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); | 2454 | pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); |
2455 | if (!pg->next) | 2455 | if (!pg->next) |
2456 | goto free_pages; | 2456 | goto free_pages; |
2457 | 2457 | ||
2458 | pg = pg->next; | 2458 | pg = pg->next; |
2459 | } | 2459 | } |
2460 | 2460 | ||
2461 | return start_pg; | 2461 | return start_pg; |
2462 | 2462 | ||
2463 | free_pages: | 2463 | free_pages: |
2464 | while (start_pg) { | 2464 | while (start_pg) { |
2465 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | 2465 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
2466 | free_pages((unsigned long)pg->records, order); | 2466 | free_pages((unsigned long)pg->records, order); |
2467 | start_pg = pg->next; | 2467 | start_pg = pg->next; |
2468 | kfree(pg); | 2468 | kfree(pg); |
2469 | pg = start_pg; | 2469 | pg = start_pg; |
2470 | } | 2470 | } |
2471 | pr_info("ftrace: FAILED to allocate memory for functions\n"); | 2471 | pr_info("ftrace: FAILED to allocate memory for functions\n"); |
2472 | return NULL; | 2472 | return NULL; |
2473 | } | 2473 | } |
2474 | 2474 | ||
2475 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 2475 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
2476 | 2476 | ||
2477 | struct ftrace_iterator { | 2477 | struct ftrace_iterator { |
2478 | loff_t pos; | 2478 | loff_t pos; |
2479 | loff_t func_pos; | 2479 | loff_t func_pos; |
2480 | struct ftrace_page *pg; | 2480 | struct ftrace_page *pg; |
2481 | struct dyn_ftrace *func; | 2481 | struct dyn_ftrace *func; |
2482 | struct ftrace_func_probe *probe; | 2482 | struct ftrace_func_probe *probe; |
2483 | struct trace_parser parser; | 2483 | struct trace_parser parser; |
2484 | struct ftrace_hash *hash; | 2484 | struct ftrace_hash *hash; |
2485 | struct ftrace_ops *ops; | 2485 | struct ftrace_ops *ops; |
2486 | int hidx; | 2486 | int hidx; |
2487 | int idx; | 2487 | int idx; |
2488 | unsigned flags; | 2488 | unsigned flags; |
2489 | }; | 2489 | }; |
2490 | 2490 | ||
2491 | static void * | 2491 | static void * |
2492 | t_hash_next(struct seq_file *m, loff_t *pos) | 2492 | t_hash_next(struct seq_file *m, loff_t *pos) |
2493 | { | 2493 | { |
2494 | struct ftrace_iterator *iter = m->private; | 2494 | struct ftrace_iterator *iter = m->private; |
2495 | struct hlist_node *hnd = NULL; | 2495 | struct hlist_node *hnd = NULL; |
2496 | struct hlist_head *hhd; | 2496 | struct hlist_head *hhd; |
2497 | 2497 | ||
2498 | (*pos)++; | 2498 | (*pos)++; |
2499 | iter->pos = *pos; | 2499 | iter->pos = *pos; |
2500 | 2500 | ||
2501 | if (iter->probe) | 2501 | if (iter->probe) |
2502 | hnd = &iter->probe->node; | 2502 | hnd = &iter->probe->node; |
2503 | retry: | 2503 | retry: |
2504 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | 2504 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) |
2505 | return NULL; | 2505 | return NULL; |
2506 | 2506 | ||
2507 | hhd = &ftrace_func_hash[iter->hidx]; | 2507 | hhd = &ftrace_func_hash[iter->hidx]; |
2508 | 2508 | ||
2509 | if (hlist_empty(hhd)) { | 2509 | if (hlist_empty(hhd)) { |
2510 | iter->hidx++; | 2510 | iter->hidx++; |
2511 | hnd = NULL; | 2511 | hnd = NULL; |
2512 | goto retry; | 2512 | goto retry; |
2513 | } | 2513 | } |
2514 | 2514 | ||
2515 | if (!hnd) | 2515 | if (!hnd) |
2516 | hnd = hhd->first; | 2516 | hnd = hhd->first; |
2517 | else { | 2517 | else { |
2518 | hnd = hnd->next; | 2518 | hnd = hnd->next; |
2519 | if (!hnd) { | 2519 | if (!hnd) { |
2520 | iter->hidx++; | 2520 | iter->hidx++; |
2521 | goto retry; | 2521 | goto retry; |
2522 | } | 2522 | } |
2523 | } | 2523 | } |
2524 | 2524 | ||
2525 | if (WARN_ON_ONCE(!hnd)) | 2525 | if (WARN_ON_ONCE(!hnd)) |
2526 | return NULL; | 2526 | return NULL; |
2527 | 2527 | ||
2528 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); | 2528 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); |
2529 | 2529 | ||
2530 | return iter; | 2530 | return iter; |
2531 | } | 2531 | } |
2532 | 2532 | ||
2533 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | 2533 | static void *t_hash_start(struct seq_file *m, loff_t *pos) |
2534 | { | 2534 | { |
2535 | struct ftrace_iterator *iter = m->private; | 2535 | struct ftrace_iterator *iter = m->private; |
2536 | void *p = NULL; | 2536 | void *p = NULL; |
2537 | loff_t l; | 2537 | loff_t l; |
2538 | 2538 | ||
2539 | if (!(iter->flags & FTRACE_ITER_DO_HASH)) | 2539 | if (!(iter->flags & FTRACE_ITER_DO_HASH)) |
2540 | return NULL; | 2540 | return NULL; |
2541 | 2541 | ||
2542 | if (iter->func_pos > *pos) | 2542 | if (iter->func_pos > *pos) |
2543 | return NULL; | 2543 | return NULL; |
2544 | 2544 | ||
2545 | iter->hidx = 0; | 2545 | iter->hidx = 0; |
2546 | for (l = 0; l <= (*pos - iter->func_pos); ) { | 2546 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
2547 | p = t_hash_next(m, &l); | 2547 | p = t_hash_next(m, &l); |
2548 | if (!p) | 2548 | if (!p) |
2549 | break; | 2549 | break; |
2550 | } | 2550 | } |
2551 | if (!p) | 2551 | if (!p) |
2552 | return NULL; | 2552 | return NULL; |
2553 | 2553 | ||
2554 | /* Only set this if we have an item */ | 2554 | /* Only set this if we have an item */ |
2555 | iter->flags |= FTRACE_ITER_HASH; | 2555 | iter->flags |= FTRACE_ITER_HASH; |
2556 | 2556 | ||
2557 | return iter; | 2557 | return iter; |
2558 | } | 2558 | } |
2559 | 2559 | ||
2560 | static int | 2560 | static int |
2561 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) | 2561 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) |
2562 | { | 2562 | { |
2563 | struct ftrace_func_probe *rec; | 2563 | struct ftrace_func_probe *rec; |
2564 | 2564 | ||
2565 | rec = iter->probe; | 2565 | rec = iter->probe; |
2566 | if (WARN_ON_ONCE(!rec)) | 2566 | if (WARN_ON_ONCE(!rec)) |
2567 | return -EIO; | 2567 | return -EIO; |
2568 | 2568 | ||
2569 | if (rec->ops->print) | 2569 | if (rec->ops->print) |
2570 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | 2570 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); |
2571 | 2571 | ||
2572 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); | 2572 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); |
2573 | 2573 | ||
2574 | if (rec->data) | 2574 | if (rec->data) |
2575 | seq_printf(m, ":%p", rec->data); | 2575 | seq_printf(m, ":%p", rec->data); |
2576 | seq_putc(m, '\n'); | 2576 | seq_putc(m, '\n'); |
2577 | 2577 | ||
2578 | return 0; | 2578 | return 0; |
2579 | } | 2579 | } |
2580 | 2580 | ||
2581 | static void * | 2581 | static void * |
2582 | t_next(struct seq_file *m, void *v, loff_t *pos) | 2582 | t_next(struct seq_file *m, void *v, loff_t *pos) |
2583 | { | 2583 | { |
2584 | struct ftrace_iterator *iter = m->private; | 2584 | struct ftrace_iterator *iter = m->private; |
2585 | struct ftrace_ops *ops = iter->ops; | 2585 | struct ftrace_ops *ops = iter->ops; |
2586 | struct dyn_ftrace *rec = NULL; | 2586 | struct dyn_ftrace *rec = NULL; |
2587 | 2587 | ||
2588 | if (unlikely(ftrace_disabled)) | 2588 | if (unlikely(ftrace_disabled)) |
2589 | return NULL; | 2589 | return NULL; |
2590 | 2590 | ||
2591 | if (iter->flags & FTRACE_ITER_HASH) | 2591 | if (iter->flags & FTRACE_ITER_HASH) |
2592 | return t_hash_next(m, pos); | 2592 | return t_hash_next(m, pos); |
2593 | 2593 | ||
2594 | (*pos)++; | 2594 | (*pos)++; |
2595 | iter->pos = iter->func_pos = *pos; | 2595 | iter->pos = iter->func_pos = *pos; |
2596 | 2596 | ||
2597 | if (iter->flags & FTRACE_ITER_PRINTALL) | 2597 | if (iter->flags & FTRACE_ITER_PRINTALL) |
2598 | return t_hash_start(m, pos); | 2598 | return t_hash_start(m, pos); |
2599 | 2599 | ||
2600 | retry: | 2600 | retry: |
2601 | if (iter->idx >= iter->pg->index) { | 2601 | if (iter->idx >= iter->pg->index) { |
2602 | if (iter->pg->next) { | 2602 | if (iter->pg->next) { |
2603 | iter->pg = iter->pg->next; | 2603 | iter->pg = iter->pg->next; |
2604 | iter->idx = 0; | 2604 | iter->idx = 0; |
2605 | goto retry; | 2605 | goto retry; |
2606 | } | 2606 | } |
2607 | } else { | 2607 | } else { |
2608 | rec = &iter->pg->records[iter->idx++]; | 2608 | rec = &iter->pg->records[iter->idx++]; |
2609 | if (((iter->flags & FTRACE_ITER_FILTER) && | 2609 | if (((iter->flags & FTRACE_ITER_FILTER) && |
2610 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || | 2610 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || |
2611 | 2611 | ||
2612 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 2612 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
2613 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || | 2613 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || |
2614 | 2614 | ||
2615 | ((iter->flags & FTRACE_ITER_ENABLED) && | 2615 | ((iter->flags & FTRACE_ITER_ENABLED) && |
2616 | !(rec->flags & FTRACE_FL_ENABLED))) { | 2616 | !(rec->flags & FTRACE_FL_ENABLED))) { |
2617 | 2617 | ||
2618 | rec = NULL; | 2618 | rec = NULL; |
2619 | goto retry; | 2619 | goto retry; |
2620 | } | 2620 | } |
2621 | } | 2621 | } |
2622 | 2622 | ||
2623 | if (!rec) | 2623 | if (!rec) |
2624 | return t_hash_start(m, pos); | 2624 | return t_hash_start(m, pos); |
2625 | 2625 | ||
2626 | iter->func = rec; | 2626 | iter->func = rec; |
2627 | 2627 | ||
2628 | return iter; | 2628 | return iter; |
2629 | } | 2629 | } |
2630 | 2630 | ||
2631 | static void reset_iter_read(struct ftrace_iterator *iter) | 2631 | static void reset_iter_read(struct ftrace_iterator *iter) |
2632 | { | 2632 | { |
2633 | iter->pos = 0; | 2633 | iter->pos = 0; |
2634 | iter->func_pos = 0; | 2634 | iter->func_pos = 0; |
2635 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); | 2635 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); |
2636 | } | 2636 | } |
2637 | 2637 | ||
2638 | static void *t_start(struct seq_file *m, loff_t *pos) | 2638 | static void *t_start(struct seq_file *m, loff_t *pos) |
2639 | { | 2639 | { |
2640 | struct ftrace_iterator *iter = m->private; | 2640 | struct ftrace_iterator *iter = m->private; |
2641 | struct ftrace_ops *ops = iter->ops; | 2641 | struct ftrace_ops *ops = iter->ops; |
2642 | void *p = NULL; | 2642 | void *p = NULL; |
2643 | loff_t l; | 2643 | loff_t l; |
2644 | 2644 | ||
2645 | mutex_lock(&ftrace_lock); | 2645 | mutex_lock(&ftrace_lock); |
2646 | 2646 | ||
2647 | if (unlikely(ftrace_disabled)) | 2647 | if (unlikely(ftrace_disabled)) |
2648 | return NULL; | 2648 | return NULL; |
2649 | 2649 | ||
2650 | /* | 2650 | /* |
2651 | * If an lseek was done, then reset and start from beginning. | 2651 | * If an lseek was done, then reset and start from beginning. |
2652 | */ | 2652 | */ |
2653 | if (*pos < iter->pos) | 2653 | if (*pos < iter->pos) |
2654 | reset_iter_read(iter); | 2654 | reset_iter_read(iter); |
2655 | 2655 | ||
2656 | /* | 2656 | /* |
2657 | * For set_ftrace_filter reading, if we have the filter | 2657 | * For set_ftrace_filter reading, if we have the filter |
2658 | * off, we can short cut and just print out that all | 2658 | * off, we can short cut and just print out that all |
2659 | * functions are enabled. | 2659 | * functions are enabled. |
2660 | */ | 2660 | */ |
2661 | if (iter->flags & FTRACE_ITER_FILTER && | 2661 | if (iter->flags & FTRACE_ITER_FILTER && |
2662 | ftrace_hash_empty(ops->filter_hash)) { | 2662 | ftrace_hash_empty(ops->filter_hash)) { |
2663 | if (*pos > 0) | 2663 | if (*pos > 0) |
2664 | return t_hash_start(m, pos); | 2664 | return t_hash_start(m, pos); |
2665 | iter->flags |= FTRACE_ITER_PRINTALL; | 2665 | iter->flags |= FTRACE_ITER_PRINTALL; |
2666 | /* reset in case of seek/pread */ | 2666 | /* reset in case of seek/pread */ |
2667 | iter->flags &= ~FTRACE_ITER_HASH; | 2667 | iter->flags &= ~FTRACE_ITER_HASH; |
2668 | return iter; | 2668 | return iter; |
2669 | } | 2669 | } |
2670 | 2670 | ||
2671 | if (iter->flags & FTRACE_ITER_HASH) | 2671 | if (iter->flags & FTRACE_ITER_HASH) |
2672 | return t_hash_start(m, pos); | 2672 | return t_hash_start(m, pos); |
2673 | 2673 | ||
2674 | /* | 2674 | /* |
2675 | * Unfortunately, we need to restart at ftrace_pages_start | 2675 | * Unfortunately, we need to restart at ftrace_pages_start |
2676 | * every time we let go of the ftrace_mutex. This is because | 2676 | * every time we let go of the ftrace_mutex. This is because |
2677 | * those pointers can change without the lock. | 2677 | * those pointers can change without the lock. |
2678 | */ | 2678 | */ |
2679 | iter->pg = ftrace_pages_start; | 2679 | iter->pg = ftrace_pages_start; |
2680 | iter->idx = 0; | 2680 | iter->idx = 0; |
2681 | for (l = 0; l <= *pos; ) { | 2681 | for (l = 0; l <= *pos; ) { |
2682 | p = t_next(m, p, &l); | 2682 | p = t_next(m, p, &l); |
2683 | if (!p) | 2683 | if (!p) |
2684 | break; | 2684 | break; |
2685 | } | 2685 | } |
2686 | 2686 | ||
2687 | if (!p) | 2687 | if (!p) |
2688 | return t_hash_start(m, pos); | 2688 | return t_hash_start(m, pos); |
2689 | 2689 | ||
2690 | return iter; | 2690 | return iter; |
2691 | } | 2691 | } |
2692 | 2692 | ||
2693 | static void t_stop(struct seq_file *m, void *p) | 2693 | static void t_stop(struct seq_file *m, void *p) |
2694 | { | 2694 | { |
2695 | mutex_unlock(&ftrace_lock); | 2695 | mutex_unlock(&ftrace_lock); |
2696 | } | 2696 | } |
2697 | 2697 | ||
2698 | static int t_show(struct seq_file *m, void *v) | 2698 | static int t_show(struct seq_file *m, void *v) |
2699 | { | 2699 | { |
2700 | struct ftrace_iterator *iter = m->private; | 2700 | struct ftrace_iterator *iter = m->private; |
2701 | struct dyn_ftrace *rec; | 2701 | struct dyn_ftrace *rec; |
2702 | 2702 | ||
2703 | if (iter->flags & FTRACE_ITER_HASH) | 2703 | if (iter->flags & FTRACE_ITER_HASH) |
2704 | return t_hash_show(m, iter); | 2704 | return t_hash_show(m, iter); |
2705 | 2705 | ||
2706 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 2706 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
2707 | seq_printf(m, "#### all functions enabled ####\n"); | 2707 | seq_printf(m, "#### all functions enabled ####\n"); |
2708 | return 0; | 2708 | return 0; |
2709 | } | 2709 | } |
2710 | 2710 | ||
2711 | rec = iter->func; | 2711 | rec = iter->func; |
2712 | 2712 | ||
2713 | if (!rec) | 2713 | if (!rec) |
2714 | return 0; | 2714 | return 0; |
2715 | 2715 | ||
2716 | seq_printf(m, "%ps", (void *)rec->ip); | 2716 | seq_printf(m, "%ps", (void *)rec->ip); |
2717 | if (iter->flags & FTRACE_ITER_ENABLED) | 2717 | if (iter->flags & FTRACE_ITER_ENABLED) |
2718 | seq_printf(m, " (%ld)%s", | 2718 | seq_printf(m, " (%ld)%s", |
2719 | rec->flags & ~FTRACE_FL_MASK, | 2719 | rec->flags & ~FTRACE_FL_MASK, |
2720 | rec->flags & FTRACE_FL_REGS ? " R" : ""); | 2720 | rec->flags & FTRACE_FL_REGS ? " R" : ""); |
2721 | seq_printf(m, "\n"); | 2721 | seq_printf(m, "\n"); |
2722 | 2722 | ||
2723 | return 0; | 2723 | return 0; |
2724 | } | 2724 | } |
2725 | 2725 | ||
2726 | static const struct seq_operations show_ftrace_seq_ops = { | 2726 | static const struct seq_operations show_ftrace_seq_ops = { |
2727 | .start = t_start, | 2727 | .start = t_start, |
2728 | .next = t_next, | 2728 | .next = t_next, |
2729 | .stop = t_stop, | 2729 | .stop = t_stop, |
2730 | .show = t_show, | 2730 | .show = t_show, |
2731 | }; | 2731 | }; |
2732 | 2732 | ||
2733 | static int | 2733 | static int |
2734 | ftrace_avail_open(struct inode *inode, struct file *file) | 2734 | ftrace_avail_open(struct inode *inode, struct file *file) |
2735 | { | 2735 | { |
2736 | struct ftrace_iterator *iter; | 2736 | struct ftrace_iterator *iter; |
2737 | 2737 | ||
2738 | if (unlikely(ftrace_disabled)) | 2738 | if (unlikely(ftrace_disabled)) |
2739 | return -ENODEV; | 2739 | return -ENODEV; |
2740 | 2740 | ||
2741 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | 2741 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
2742 | if (iter) { | 2742 | if (iter) { |
2743 | iter->pg = ftrace_pages_start; | 2743 | iter->pg = ftrace_pages_start; |
2744 | iter->ops = &global_ops; | 2744 | iter->ops = &global_ops; |
2745 | } | 2745 | } |
2746 | 2746 | ||
2747 | return iter ? 0 : -ENOMEM; | 2747 | return iter ? 0 : -ENOMEM; |
2748 | } | 2748 | } |
2749 | 2749 | ||
2750 | static int | 2750 | static int |
2751 | ftrace_enabled_open(struct inode *inode, struct file *file) | 2751 | ftrace_enabled_open(struct inode *inode, struct file *file) |
2752 | { | 2752 | { |
2753 | struct ftrace_iterator *iter; | 2753 | struct ftrace_iterator *iter; |
2754 | 2754 | ||
2755 | if (unlikely(ftrace_disabled)) | 2755 | if (unlikely(ftrace_disabled)) |
2756 | return -ENODEV; | 2756 | return -ENODEV; |
2757 | 2757 | ||
2758 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | 2758 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
2759 | if (iter) { | 2759 | if (iter) { |
2760 | iter->pg = ftrace_pages_start; | 2760 | iter->pg = ftrace_pages_start; |
2761 | iter->flags = FTRACE_ITER_ENABLED; | 2761 | iter->flags = FTRACE_ITER_ENABLED; |
2762 | iter->ops = &global_ops; | 2762 | iter->ops = &global_ops; |
2763 | } | 2763 | } |
2764 | 2764 | ||
2765 | return iter ? 0 : -ENOMEM; | 2765 | return iter ? 0 : -ENOMEM; |
2766 | } | 2766 | } |
2767 | 2767 | ||
2768 | static void ftrace_filter_reset(struct ftrace_hash *hash) | 2768 | static void ftrace_filter_reset(struct ftrace_hash *hash) |
2769 | { | 2769 | { |
2770 | mutex_lock(&ftrace_lock); | 2770 | mutex_lock(&ftrace_lock); |
2771 | ftrace_hash_clear(hash); | 2771 | ftrace_hash_clear(hash); |
2772 | mutex_unlock(&ftrace_lock); | 2772 | mutex_unlock(&ftrace_lock); |
2773 | } | 2773 | } |
2774 | 2774 | ||
2775 | /** | 2775 | /** |
2776 | * ftrace_regex_open - initialize function tracer filter files | 2776 | * ftrace_regex_open - initialize function tracer filter files |
2777 | * @ops: The ftrace_ops that hold the hash filters | 2777 | * @ops: The ftrace_ops that hold the hash filters |
2778 | * @flag: The type of filter to process | 2778 | * @flag: The type of filter to process |
2779 | * @inode: The inode, usually passed in to your open routine | 2779 | * @inode: The inode, usually passed in to your open routine |
2780 | * @file: The file, usually passed in to your open routine | 2780 | * @file: The file, usually passed in to your open routine |
2781 | * | 2781 | * |
2782 | * ftrace_regex_open() initializes the filter files for the | 2782 | * ftrace_regex_open() initializes the filter files for the |
2783 | * @ops. Depending on @flag it may process the filter hash or | 2783 | * @ops. Depending on @flag it may process the filter hash or |
2784 | * the notrace hash of @ops. With this called from the open | 2784 | * the notrace hash of @ops. With this called from the open |
2785 | * routine, you can use ftrace_filter_write() for the write | 2785 | * routine, you can use ftrace_filter_write() for the write |
2786 | * routine if @flag has FTRACE_ITER_FILTER set, or | 2786 | * routine if @flag has FTRACE_ITER_FILTER set, or |
2787 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | 2787 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. |
2788 | * tracing_lseek() should be used as the lseek routine, and | 2788 | * tracing_lseek() should be used as the lseek routine, and |
2789 | * release must call ftrace_regex_release(). | 2789 | * release must call ftrace_regex_release(). |
2790 | */ | 2790 | */ |
2791 | int | 2791 | int |
2792 | ftrace_regex_open(struct ftrace_ops *ops, int flag, | 2792 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
2793 | struct inode *inode, struct file *file) | 2793 | struct inode *inode, struct file *file) |
2794 | { | 2794 | { |
2795 | struct ftrace_iterator *iter; | 2795 | struct ftrace_iterator *iter; |
2796 | struct ftrace_hash *hash; | 2796 | struct ftrace_hash *hash; |
2797 | int ret = 0; | 2797 | int ret = 0; |
2798 | 2798 | ||
2799 | ftrace_ops_init(ops); | 2799 | ftrace_ops_init(ops); |
2800 | 2800 | ||
2801 | if (unlikely(ftrace_disabled)) | 2801 | if (unlikely(ftrace_disabled)) |
2802 | return -ENODEV; | 2802 | return -ENODEV; |
2803 | 2803 | ||
2804 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2804 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2805 | if (!iter) | 2805 | if (!iter) |
2806 | return -ENOMEM; | 2806 | return -ENOMEM; |
2807 | 2807 | ||
2808 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { | 2808 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { |
2809 | kfree(iter); | 2809 | kfree(iter); |
2810 | return -ENOMEM; | 2810 | return -ENOMEM; |
2811 | } | 2811 | } |
2812 | 2812 | ||
2813 | iter->ops = ops; | 2813 | iter->ops = ops; |
2814 | iter->flags = flag; | 2814 | iter->flags = flag; |
2815 | 2815 | ||
2816 | mutex_lock(&ops->regex_lock); | 2816 | mutex_lock(&ops->regex_lock); |
2817 | 2817 | ||
2818 | if (flag & FTRACE_ITER_NOTRACE) | 2818 | if (flag & FTRACE_ITER_NOTRACE) |
2819 | hash = ops->notrace_hash; | 2819 | hash = ops->notrace_hash; |
2820 | else | 2820 | else |
2821 | hash = ops->filter_hash; | 2821 | hash = ops->filter_hash; |
2822 | 2822 | ||
2823 | if (file->f_mode & FMODE_WRITE) { | 2823 | if (file->f_mode & FMODE_WRITE) { |
2824 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); | 2824 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); |
2825 | if (!iter->hash) { | 2825 | if (!iter->hash) { |
2826 | trace_parser_put(&iter->parser); | 2826 | trace_parser_put(&iter->parser); |
2827 | kfree(iter); | 2827 | kfree(iter); |
2828 | ret = -ENOMEM; | 2828 | ret = -ENOMEM; |
2829 | goto out_unlock; | 2829 | goto out_unlock; |
2830 | } | 2830 | } |
2831 | } | 2831 | } |
2832 | 2832 | ||
2833 | if ((file->f_mode & FMODE_WRITE) && | 2833 | if ((file->f_mode & FMODE_WRITE) && |
2834 | (file->f_flags & O_TRUNC)) | 2834 | (file->f_flags & O_TRUNC)) |
2835 | ftrace_filter_reset(iter->hash); | 2835 | ftrace_filter_reset(iter->hash); |
2836 | 2836 | ||
2837 | if (file->f_mode & FMODE_READ) { | 2837 | if (file->f_mode & FMODE_READ) { |
2838 | iter->pg = ftrace_pages_start; | 2838 | iter->pg = ftrace_pages_start; |
2839 | 2839 | ||
2840 | ret = seq_open(file, &show_ftrace_seq_ops); | 2840 | ret = seq_open(file, &show_ftrace_seq_ops); |
2841 | if (!ret) { | 2841 | if (!ret) { |
2842 | struct seq_file *m = file->private_data; | 2842 | struct seq_file *m = file->private_data; |
2843 | m->private = iter; | 2843 | m->private = iter; |
2844 | } else { | 2844 | } else { |
2845 | /* Failed */ | 2845 | /* Failed */ |
2846 | free_ftrace_hash(iter->hash); | 2846 | free_ftrace_hash(iter->hash); |
2847 | trace_parser_put(&iter->parser); | 2847 | trace_parser_put(&iter->parser); |
2848 | kfree(iter); | 2848 | kfree(iter); |
2849 | } | 2849 | } |
2850 | } else | 2850 | } else |
2851 | file->private_data = iter; | 2851 | file->private_data = iter; |
2852 | 2852 | ||
2853 | out_unlock: | 2853 | out_unlock: |
2854 | mutex_unlock(&ops->regex_lock); | 2854 | mutex_unlock(&ops->regex_lock); |
2855 | 2855 | ||
2856 | return ret; | 2856 | return ret; |
2857 | } | 2857 | } |
2858 | 2858 | ||
2859 | static int | 2859 | static int |
2860 | ftrace_filter_open(struct inode *inode, struct file *file) | 2860 | ftrace_filter_open(struct inode *inode, struct file *file) |
2861 | { | 2861 | { |
2862 | struct ftrace_ops *ops = inode->i_private; | 2862 | struct ftrace_ops *ops = inode->i_private; |
2863 | 2863 | ||
2864 | return ftrace_regex_open(ops, | 2864 | return ftrace_regex_open(ops, |
2865 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | 2865 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, |
2866 | inode, file); | 2866 | inode, file); |
2867 | } | 2867 | } |
2868 | 2868 | ||
2869 | static int | 2869 | static int |
2870 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2870 | ftrace_notrace_open(struct inode *inode, struct file *file) |
2871 | { | 2871 | { |
2872 | struct ftrace_ops *ops = inode->i_private; | 2872 | struct ftrace_ops *ops = inode->i_private; |
2873 | 2873 | ||
2874 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | 2874 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, |
2875 | inode, file); | 2875 | inode, file); |
2876 | } | 2876 | } |
2877 | 2877 | ||
2878 | static int ftrace_match(char *str, char *regex, int len, int type) | 2878 | static int ftrace_match(char *str, char *regex, int len, int type) |
2879 | { | 2879 | { |
2880 | int matched = 0; | 2880 | int matched = 0; |
2881 | int slen; | 2881 | int slen; |
2882 | 2882 | ||
2883 | switch (type) { | 2883 | switch (type) { |
2884 | case MATCH_FULL: | 2884 | case MATCH_FULL: |
2885 | if (strcmp(str, regex) == 0) | 2885 | if (strcmp(str, regex) == 0) |
2886 | matched = 1; | 2886 | matched = 1; |
2887 | break; | 2887 | break; |
2888 | case MATCH_FRONT_ONLY: | 2888 | case MATCH_FRONT_ONLY: |
2889 | if (strncmp(str, regex, len) == 0) | 2889 | if (strncmp(str, regex, len) == 0) |
2890 | matched = 1; | 2890 | matched = 1; |
2891 | break; | 2891 | break; |
2892 | case MATCH_MIDDLE_ONLY: | 2892 | case MATCH_MIDDLE_ONLY: |
2893 | if (strstr(str, regex)) | 2893 | if (strstr(str, regex)) |
2894 | matched = 1; | 2894 | matched = 1; |
2895 | break; | 2895 | break; |
2896 | case MATCH_END_ONLY: | 2896 | case MATCH_END_ONLY: |
2897 | slen = strlen(str); | 2897 | slen = strlen(str); |
2898 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) | 2898 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) |
2899 | matched = 1; | 2899 | matched = 1; |
2900 | break; | 2900 | break; |
2901 | } | 2901 | } |
2902 | 2902 | ||
2903 | return matched; | 2903 | return matched; |
2904 | } | 2904 | } |
2905 | 2905 | ||
2906 | static int | 2906 | static int |
2907 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) | 2907 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) |
2908 | { | 2908 | { |
2909 | struct ftrace_func_entry *entry; | 2909 | struct ftrace_func_entry *entry; |
2910 | int ret = 0; | 2910 | int ret = 0; |
2911 | 2911 | ||
2912 | entry = ftrace_lookup_ip(hash, rec->ip); | 2912 | entry = ftrace_lookup_ip(hash, rec->ip); |
2913 | if (not) { | 2913 | if (not) { |
2914 | /* Do nothing if it doesn't exist */ | 2914 | /* Do nothing if it doesn't exist */ |
2915 | if (!entry) | 2915 | if (!entry) |
2916 | return 0; | 2916 | return 0; |
2917 | 2917 | ||
2918 | free_hash_entry(hash, entry); | 2918 | free_hash_entry(hash, entry); |
2919 | } else { | 2919 | } else { |
2920 | /* Do nothing if it exists */ | 2920 | /* Do nothing if it exists */ |
2921 | if (entry) | 2921 | if (entry) |
2922 | return 0; | 2922 | return 0; |
2923 | 2923 | ||
2924 | ret = add_hash_entry(hash, rec->ip); | 2924 | ret = add_hash_entry(hash, rec->ip); |
2925 | } | 2925 | } |
2926 | return ret; | 2926 | return ret; |
2927 | } | 2927 | } |
2928 | 2928 | ||
2929 | static int | 2929 | static int |
2930 | ftrace_match_record(struct dyn_ftrace *rec, char *mod, | 2930 | ftrace_match_record(struct dyn_ftrace *rec, char *mod, |
2931 | char *regex, int len, int type) | 2931 | char *regex, int len, int type) |
2932 | { | 2932 | { |
2933 | char str[KSYM_SYMBOL_LEN]; | 2933 | char str[KSYM_SYMBOL_LEN]; |
2934 | char *modname; | 2934 | char *modname; |
2935 | 2935 | ||
2936 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | 2936 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); |
2937 | 2937 | ||
2938 | if (mod) { | 2938 | if (mod) { |
2939 | /* module lookup requires matching the module */ | 2939 | /* module lookup requires matching the module */ |
2940 | if (!modname || strcmp(modname, mod)) | 2940 | if (!modname || strcmp(modname, mod)) |
2941 | return 0; | 2941 | return 0; |
2942 | 2942 | ||
2943 | /* blank search means to match all funcs in the mod */ | 2943 | /* blank search means to match all funcs in the mod */ |
2944 | if (!len) | 2944 | if (!len) |
2945 | return 1; | 2945 | return 1; |
2946 | } | 2946 | } |
2947 | 2947 | ||
2948 | return ftrace_match(str, regex, len, type); | 2948 | return ftrace_match(str, regex, len, type); |
2949 | } | 2949 | } |
2950 | 2950 | ||
2951 | static int | 2951 | static int |
2952 | match_records(struct ftrace_hash *hash, char *buff, | 2952 | match_records(struct ftrace_hash *hash, char *buff, |
2953 | int len, char *mod, int not) | 2953 | int len, char *mod, int not) |
2954 | { | 2954 | { |
2955 | unsigned search_len = 0; | 2955 | unsigned search_len = 0; |
2956 | struct ftrace_page *pg; | 2956 | struct ftrace_page *pg; |
2957 | struct dyn_ftrace *rec; | 2957 | struct dyn_ftrace *rec; |
2958 | int type = MATCH_FULL; | 2958 | int type = MATCH_FULL; |
2959 | char *search = buff; | 2959 | char *search = buff; |
2960 | int found = 0; | 2960 | int found = 0; |
2961 | int ret; | 2961 | int ret; |
2962 | 2962 | ||
2963 | if (len) { | 2963 | if (len) { |
2964 | type = filter_parse_regex(buff, len, &search, ¬); | 2964 | type = filter_parse_regex(buff, len, &search, ¬); |
2965 | search_len = strlen(search); | 2965 | search_len = strlen(search); |
2966 | } | 2966 | } |
2967 | 2967 | ||
2968 | mutex_lock(&ftrace_lock); | 2968 | mutex_lock(&ftrace_lock); |
2969 | 2969 | ||
2970 | if (unlikely(ftrace_disabled)) | 2970 | if (unlikely(ftrace_disabled)) |
2971 | goto out_unlock; | 2971 | goto out_unlock; |
2972 | 2972 | ||
2973 | do_for_each_ftrace_rec(pg, rec) { | 2973 | do_for_each_ftrace_rec(pg, rec) { |
2974 | if (ftrace_match_record(rec, mod, search, search_len, type)) { | 2974 | if (ftrace_match_record(rec, mod, search, search_len, type)) { |
2975 | ret = enter_record(hash, rec, not); | 2975 | ret = enter_record(hash, rec, not); |
2976 | if (ret < 0) { | 2976 | if (ret < 0) { |
2977 | found = ret; | 2977 | found = ret; |
2978 | goto out_unlock; | 2978 | goto out_unlock; |
2979 | } | 2979 | } |
2980 | found = 1; | 2980 | found = 1; |
2981 | } | 2981 | } |
2982 | } while_for_each_ftrace_rec(); | 2982 | } while_for_each_ftrace_rec(); |
2983 | out_unlock: | 2983 | out_unlock: |
2984 | mutex_unlock(&ftrace_lock); | 2984 | mutex_unlock(&ftrace_lock); |
2985 | 2985 | ||
2986 | return found; | 2986 | return found; |
2987 | } | 2987 | } |
2988 | 2988 | ||
2989 | static int | 2989 | static int |
2990 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) | 2990 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
2991 | { | 2991 | { |
2992 | return match_records(hash, buff, len, NULL, 0); | 2992 | return match_records(hash, buff, len, NULL, 0); |
2993 | } | 2993 | } |
2994 | 2994 | ||
2995 | static int | 2995 | static int |
2996 | ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) | 2996 | ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) |
2997 | { | 2997 | { |
2998 | int not = 0; | 2998 | int not = 0; |
2999 | 2999 | ||
3000 | /* blank or '*' mean the same */ | 3000 | /* blank or '*' mean the same */ |
3001 | if (strcmp(buff, "*") == 0) | 3001 | if (strcmp(buff, "*") == 0) |
3002 | buff[0] = 0; | 3002 | buff[0] = 0; |
3003 | 3003 | ||
3004 | /* handle the case of 'dont filter this module' */ | 3004 | /* handle the case of 'dont filter this module' */ |
3005 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { | 3005 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { |
3006 | buff[0] = 0; | 3006 | buff[0] = 0; |
3007 | not = 1; | 3007 | not = 1; |
3008 | } | 3008 | } |
3009 | 3009 | ||
3010 | return match_records(hash, buff, strlen(buff), mod, not); | 3010 | return match_records(hash, buff, strlen(buff), mod, not); |
3011 | } | 3011 | } |
3012 | 3012 | ||
3013 | /* | 3013 | /* |
3014 | * We register the module command as a template to show others how | 3014 | * We register the module command as a template to show others how |
3015 | * to register the a command as well. | 3015 | * to register the a command as well. |
3016 | */ | 3016 | */ |
3017 | 3017 | ||
3018 | static int | 3018 | static int |
3019 | ftrace_mod_callback(struct ftrace_hash *hash, | 3019 | ftrace_mod_callback(struct ftrace_hash *hash, |
3020 | char *func, char *cmd, char *param, int enable) | 3020 | char *func, char *cmd, char *param, int enable) |
3021 | { | 3021 | { |
3022 | char *mod; | 3022 | char *mod; |
3023 | int ret = -EINVAL; | 3023 | int ret = -EINVAL; |
3024 | 3024 | ||
3025 | /* | 3025 | /* |
3026 | * cmd == 'mod' because we only registered this func | 3026 | * cmd == 'mod' because we only registered this func |
3027 | * for the 'mod' ftrace_func_command. | 3027 | * for the 'mod' ftrace_func_command. |
3028 | * But if you register one func with multiple commands, | 3028 | * But if you register one func with multiple commands, |
3029 | * you can tell which command was used by the cmd | 3029 | * you can tell which command was used by the cmd |
3030 | * parameter. | 3030 | * parameter. |
3031 | */ | 3031 | */ |
3032 | 3032 | ||
3033 | /* we must have a module name */ | 3033 | /* we must have a module name */ |
3034 | if (!param) | 3034 | if (!param) |
3035 | return ret; | 3035 | return ret; |
3036 | 3036 | ||
3037 | mod = strsep(¶m, ":"); | 3037 | mod = strsep(¶m, ":"); |
3038 | if (!strlen(mod)) | 3038 | if (!strlen(mod)) |
3039 | return ret; | 3039 | return ret; |
3040 | 3040 | ||
3041 | ret = ftrace_match_module_records(hash, func, mod); | 3041 | ret = ftrace_match_module_records(hash, func, mod); |
3042 | if (!ret) | 3042 | if (!ret) |
3043 | ret = -EINVAL; | 3043 | ret = -EINVAL; |
3044 | if (ret < 0) | 3044 | if (ret < 0) |
3045 | return ret; | 3045 | return ret; |
3046 | 3046 | ||
3047 | return 0; | 3047 | return 0; |
3048 | } | 3048 | } |
3049 | 3049 | ||
3050 | static struct ftrace_func_command ftrace_mod_cmd = { | 3050 | static struct ftrace_func_command ftrace_mod_cmd = { |
3051 | .name = "mod", | 3051 | .name = "mod", |
3052 | .func = ftrace_mod_callback, | 3052 | .func = ftrace_mod_callback, |
3053 | }; | 3053 | }; |
3054 | 3054 | ||
3055 | static int __init ftrace_mod_cmd_init(void) | 3055 | static int __init ftrace_mod_cmd_init(void) |
3056 | { | 3056 | { |
3057 | return register_ftrace_command(&ftrace_mod_cmd); | 3057 | return register_ftrace_command(&ftrace_mod_cmd); |
3058 | } | 3058 | } |
3059 | core_initcall(ftrace_mod_cmd_init); | 3059 | core_initcall(ftrace_mod_cmd_init); |
3060 | 3060 | ||
3061 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | 3061 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
3062 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 3062 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
3063 | { | 3063 | { |
3064 | struct ftrace_func_probe *entry; | 3064 | struct ftrace_func_probe *entry; |
3065 | struct hlist_head *hhd; | 3065 | struct hlist_head *hhd; |
3066 | unsigned long key; | 3066 | unsigned long key; |
3067 | 3067 | ||
3068 | key = hash_long(ip, FTRACE_HASH_BITS); | 3068 | key = hash_long(ip, FTRACE_HASH_BITS); |
3069 | 3069 | ||
3070 | hhd = &ftrace_func_hash[key]; | 3070 | hhd = &ftrace_func_hash[key]; |
3071 | 3071 | ||
3072 | if (hlist_empty(hhd)) | 3072 | if (hlist_empty(hhd)) |
3073 | return; | 3073 | return; |
3074 | 3074 | ||
3075 | /* | 3075 | /* |
3076 | * Disable preemption for these calls to prevent a RCU grace | 3076 | * Disable preemption for these calls to prevent a RCU grace |
3077 | * period. This syncs the hash iteration and freeing of items | 3077 | * period. This syncs the hash iteration and freeing of items |
3078 | * on the hash. rcu_read_lock is too dangerous here. | 3078 | * on the hash. rcu_read_lock is too dangerous here. |
3079 | */ | 3079 | */ |
3080 | preempt_disable_notrace(); | 3080 | preempt_disable_notrace(); |
3081 | hlist_for_each_entry_rcu_notrace(entry, hhd, node) { | 3081 | hlist_for_each_entry_rcu_notrace(entry, hhd, node) { |
3082 | if (entry->ip == ip) | 3082 | if (entry->ip == ip) |
3083 | entry->ops->func(ip, parent_ip, &entry->data); | 3083 | entry->ops->func(ip, parent_ip, &entry->data); |
3084 | } | 3084 | } |
3085 | preempt_enable_notrace(); | 3085 | preempt_enable_notrace(); |
3086 | } | 3086 | } |
3087 | 3087 | ||
3088 | static struct ftrace_ops trace_probe_ops __read_mostly = | 3088 | static struct ftrace_ops trace_probe_ops __read_mostly = |
3089 | { | 3089 | { |
3090 | .func = function_trace_probe_call, | 3090 | .func = function_trace_probe_call, |
3091 | .flags = FTRACE_OPS_FL_INITIALIZED, | 3091 | .flags = FTRACE_OPS_FL_INITIALIZED, |
3092 | INIT_REGEX_LOCK(trace_probe_ops) | 3092 | INIT_REGEX_LOCK(trace_probe_ops) |
3093 | }; | 3093 | }; |
3094 | 3094 | ||
3095 | static int ftrace_probe_registered; | 3095 | static int ftrace_probe_registered; |
3096 | 3096 | ||
3097 | static void __enable_ftrace_function_probe(void) | 3097 | static void __enable_ftrace_function_probe(void) |
3098 | { | 3098 | { |
3099 | int ret; | 3099 | int ret; |
3100 | int i; | 3100 | int i; |
3101 | 3101 | ||
3102 | if (ftrace_probe_registered) { | 3102 | if (ftrace_probe_registered) { |
3103 | /* still need to update the function call sites */ | 3103 | /* still need to update the function call sites */ |
3104 | if (ftrace_enabled) | 3104 | if (ftrace_enabled) |
3105 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 3105 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); |
3106 | return; | 3106 | return; |
3107 | } | 3107 | } |
3108 | 3108 | ||
3109 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3109 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
3110 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3110 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
3111 | if (hhd->first) | 3111 | if (hhd->first) |
3112 | break; | 3112 | break; |
3113 | } | 3113 | } |
3114 | /* Nothing registered? */ | 3114 | /* Nothing registered? */ |
3115 | if (i == FTRACE_FUNC_HASHSIZE) | 3115 | if (i == FTRACE_FUNC_HASHSIZE) |
3116 | return; | 3116 | return; |
3117 | 3117 | ||
3118 | ret = ftrace_startup(&trace_probe_ops, 0); | 3118 | ret = ftrace_startup(&trace_probe_ops, 0); |
3119 | 3119 | ||
3120 | ftrace_probe_registered = 1; | 3120 | ftrace_probe_registered = 1; |
3121 | } | 3121 | } |
3122 | 3122 | ||
3123 | static void __disable_ftrace_function_probe(void) | 3123 | static void __disable_ftrace_function_probe(void) |
3124 | { | 3124 | { |
3125 | int i; | 3125 | int i; |
3126 | 3126 | ||
3127 | if (!ftrace_probe_registered) | 3127 | if (!ftrace_probe_registered) |
3128 | return; | 3128 | return; |
3129 | 3129 | ||
3130 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3130 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
3131 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3131 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
3132 | if (hhd->first) | 3132 | if (hhd->first) |
3133 | return; | 3133 | return; |
3134 | } | 3134 | } |
3135 | 3135 | ||
3136 | /* no more funcs left */ | 3136 | /* no more funcs left */ |
3137 | ftrace_shutdown(&trace_probe_ops, 0); | 3137 | ftrace_shutdown(&trace_probe_ops, 0); |
3138 | 3138 | ||
3139 | ftrace_probe_registered = 0; | 3139 | ftrace_probe_registered = 0; |
3140 | } | 3140 | } |
3141 | 3141 | ||
3142 | 3142 | ||
3143 | static void ftrace_free_entry(struct ftrace_func_probe *entry) | 3143 | static void ftrace_free_entry(struct ftrace_func_probe *entry) |
3144 | { | 3144 | { |
3145 | if (entry->ops->free) | 3145 | if (entry->ops->free) |
3146 | entry->ops->free(entry->ops, entry->ip, &entry->data); | 3146 | entry->ops->free(entry->ops, entry->ip, &entry->data); |
3147 | kfree(entry); | 3147 | kfree(entry); |
3148 | } | 3148 | } |
3149 | 3149 | ||
3150 | int | 3150 | int |
3151 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3151 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
3152 | void *data) | 3152 | void *data) |
3153 | { | 3153 | { |
3154 | struct ftrace_func_probe *entry; | 3154 | struct ftrace_func_probe *entry; |
3155 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3155 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; |
3156 | struct ftrace_hash *hash; | 3156 | struct ftrace_hash *hash; |
3157 | struct ftrace_page *pg; | 3157 | struct ftrace_page *pg; |
3158 | struct dyn_ftrace *rec; | 3158 | struct dyn_ftrace *rec; |
3159 | int type, len, not; | 3159 | int type, len, not; |
3160 | unsigned long key; | 3160 | unsigned long key; |
3161 | int count = 0; | 3161 | int count = 0; |
3162 | char *search; | 3162 | char *search; |
3163 | int ret; | 3163 | int ret; |
3164 | 3164 | ||
3165 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | 3165 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
3166 | len = strlen(search); | 3166 | len = strlen(search); |
3167 | 3167 | ||
3168 | /* we do not support '!' for function probes */ | 3168 | /* we do not support '!' for function probes */ |
3169 | if (WARN_ON(not)) | 3169 | if (WARN_ON(not)) |
3170 | return -EINVAL; | 3170 | return -EINVAL; |
3171 | 3171 | ||
3172 | mutex_lock(&trace_probe_ops.regex_lock); | 3172 | mutex_lock(&trace_probe_ops.regex_lock); |
3173 | 3173 | ||
3174 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3174 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
3175 | if (!hash) { | 3175 | if (!hash) { |
3176 | count = -ENOMEM; | 3176 | count = -ENOMEM; |
3177 | goto out; | 3177 | goto out; |
3178 | } | 3178 | } |
3179 | 3179 | ||
3180 | if (unlikely(ftrace_disabled)) { | 3180 | if (unlikely(ftrace_disabled)) { |
3181 | count = -ENODEV; | 3181 | count = -ENODEV; |
3182 | goto out; | 3182 | goto out; |
3183 | } | 3183 | } |
3184 | 3184 | ||
3185 | mutex_lock(&ftrace_lock); | 3185 | mutex_lock(&ftrace_lock); |
3186 | 3186 | ||
3187 | do_for_each_ftrace_rec(pg, rec) { | 3187 | do_for_each_ftrace_rec(pg, rec) { |
3188 | 3188 | ||
3189 | if (!ftrace_match_record(rec, NULL, search, len, type)) | 3189 | if (!ftrace_match_record(rec, NULL, search, len, type)) |
3190 | continue; | 3190 | continue; |
3191 | 3191 | ||
3192 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 3192 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
3193 | if (!entry) { | 3193 | if (!entry) { |
3194 | /* If we did not process any, then return error */ | 3194 | /* If we did not process any, then return error */ |
3195 | if (!count) | 3195 | if (!count) |
3196 | count = -ENOMEM; | 3196 | count = -ENOMEM; |
3197 | goto out_unlock; | 3197 | goto out_unlock; |
3198 | } | 3198 | } |
3199 | 3199 | ||
3200 | count++; | 3200 | count++; |
3201 | 3201 | ||
3202 | entry->data = data; | 3202 | entry->data = data; |
3203 | 3203 | ||
3204 | /* | 3204 | /* |
3205 | * The caller might want to do something special | 3205 | * The caller might want to do something special |
3206 | * for each function we find. We call the callback | 3206 | * for each function we find. We call the callback |
3207 | * to give the caller an opportunity to do so. | 3207 | * to give the caller an opportunity to do so. |
3208 | */ | 3208 | */ |
3209 | if (ops->init) { | 3209 | if (ops->init) { |
3210 | if (ops->init(ops, rec->ip, &entry->data) < 0) { | 3210 | if (ops->init(ops, rec->ip, &entry->data) < 0) { |
3211 | /* caller does not like this func */ | 3211 | /* caller does not like this func */ |
3212 | kfree(entry); | 3212 | kfree(entry); |
3213 | continue; | 3213 | continue; |
3214 | } | 3214 | } |
3215 | } | 3215 | } |
3216 | 3216 | ||
3217 | ret = enter_record(hash, rec, 0); | 3217 | ret = enter_record(hash, rec, 0); |
3218 | if (ret < 0) { | 3218 | if (ret < 0) { |
3219 | kfree(entry); | 3219 | kfree(entry); |
3220 | count = ret; | 3220 | count = ret; |
3221 | goto out_unlock; | 3221 | goto out_unlock; |
3222 | } | 3222 | } |
3223 | 3223 | ||
3224 | entry->ops = ops; | 3224 | entry->ops = ops; |
3225 | entry->ip = rec->ip; | 3225 | entry->ip = rec->ip; |
3226 | 3226 | ||
3227 | key = hash_long(entry->ip, FTRACE_HASH_BITS); | 3227 | key = hash_long(entry->ip, FTRACE_HASH_BITS); |
3228 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | 3228 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); |
3229 | 3229 | ||
3230 | } while_for_each_ftrace_rec(); | 3230 | } while_for_each_ftrace_rec(); |
3231 | 3231 | ||
3232 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3232 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
3233 | if (ret < 0) | 3233 | if (ret < 0) |
3234 | count = ret; | 3234 | count = ret; |
3235 | 3235 | ||
3236 | __enable_ftrace_function_probe(); | 3236 | __enable_ftrace_function_probe(); |
3237 | 3237 | ||
3238 | out_unlock: | 3238 | out_unlock: |
3239 | mutex_unlock(&ftrace_lock); | 3239 | mutex_unlock(&ftrace_lock); |
3240 | out: | 3240 | out: |
3241 | mutex_unlock(&trace_probe_ops.regex_lock); | 3241 | mutex_unlock(&trace_probe_ops.regex_lock); |
3242 | free_ftrace_hash(hash); | 3242 | free_ftrace_hash(hash); |
3243 | 3243 | ||
3244 | return count; | 3244 | return count; |
3245 | } | 3245 | } |
3246 | 3246 | ||
3247 | enum { | 3247 | enum { |
3248 | PROBE_TEST_FUNC = 1, | 3248 | PROBE_TEST_FUNC = 1, |
3249 | PROBE_TEST_DATA = 2 | 3249 | PROBE_TEST_DATA = 2 |
3250 | }; | 3250 | }; |
3251 | 3251 | ||
3252 | static void | 3252 | static void |
3253 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3253 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
3254 | void *data, int flags) | 3254 | void *data, int flags) |
3255 | { | 3255 | { |
3256 | struct ftrace_func_entry *rec_entry; | 3256 | struct ftrace_func_entry *rec_entry; |
3257 | struct ftrace_func_probe *entry; | 3257 | struct ftrace_func_probe *entry; |
3258 | struct ftrace_func_probe *p; | 3258 | struct ftrace_func_probe *p; |
3259 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3259 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; |
3260 | struct list_head free_list; | 3260 | struct list_head free_list; |
3261 | struct ftrace_hash *hash; | 3261 | struct ftrace_hash *hash; |
3262 | struct hlist_node *tmp; | 3262 | struct hlist_node *tmp; |
3263 | char str[KSYM_SYMBOL_LEN]; | 3263 | char str[KSYM_SYMBOL_LEN]; |
3264 | int type = MATCH_FULL; | 3264 | int type = MATCH_FULL; |
3265 | int i, len = 0; | 3265 | int i, len = 0; |
3266 | char *search; | 3266 | char *search; |
3267 | 3267 | ||
3268 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) | 3268 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
3269 | glob = NULL; | 3269 | glob = NULL; |
3270 | else if (glob) { | 3270 | else if (glob) { |
3271 | int not; | 3271 | int not; |
3272 | 3272 | ||
3273 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | 3273 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
3274 | len = strlen(search); | 3274 | len = strlen(search); |
3275 | 3275 | ||
3276 | /* we do not support '!' for function probes */ | 3276 | /* we do not support '!' for function probes */ |
3277 | if (WARN_ON(not)) | 3277 | if (WARN_ON(not)) |
3278 | return; | 3278 | return; |
3279 | } | 3279 | } |
3280 | 3280 | ||
3281 | mutex_lock(&trace_probe_ops.regex_lock); | 3281 | mutex_lock(&trace_probe_ops.regex_lock); |
3282 | 3282 | ||
3283 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3283 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
3284 | if (!hash) | 3284 | if (!hash) |
3285 | /* Hmm, should report this somehow */ | 3285 | /* Hmm, should report this somehow */ |
3286 | goto out_unlock; | 3286 | goto out_unlock; |
3287 | 3287 | ||
3288 | INIT_LIST_HEAD(&free_list); | 3288 | INIT_LIST_HEAD(&free_list); |
3289 | 3289 | ||
3290 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3290 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
3291 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3291 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
3292 | 3292 | ||
3293 | hlist_for_each_entry_safe(entry, tmp, hhd, node) { | 3293 | hlist_for_each_entry_safe(entry, tmp, hhd, node) { |
3294 | 3294 | ||
3295 | /* break up if statements for readability */ | 3295 | /* break up if statements for readability */ |
3296 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | 3296 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) |
3297 | continue; | 3297 | continue; |
3298 | 3298 | ||
3299 | if ((flags & PROBE_TEST_DATA) && entry->data != data) | 3299 | if ((flags & PROBE_TEST_DATA) && entry->data != data) |
3300 | continue; | 3300 | continue; |
3301 | 3301 | ||
3302 | /* do this last, since it is the most expensive */ | 3302 | /* do this last, since it is the most expensive */ |
3303 | if (glob) { | 3303 | if (glob) { |
3304 | kallsyms_lookup(entry->ip, NULL, NULL, | 3304 | kallsyms_lookup(entry->ip, NULL, NULL, |
3305 | NULL, str); | 3305 | NULL, str); |
3306 | if (!ftrace_match(str, glob, len, type)) | 3306 | if (!ftrace_match(str, glob, len, type)) |
3307 | continue; | 3307 | continue; |
3308 | } | 3308 | } |
3309 | 3309 | ||
3310 | rec_entry = ftrace_lookup_ip(hash, entry->ip); | 3310 | rec_entry = ftrace_lookup_ip(hash, entry->ip); |
3311 | /* It is possible more than one entry had this ip */ | 3311 | /* It is possible more than one entry had this ip */ |
3312 | if (rec_entry) | 3312 | if (rec_entry) |
3313 | free_hash_entry(hash, rec_entry); | 3313 | free_hash_entry(hash, rec_entry); |
3314 | 3314 | ||
3315 | hlist_del_rcu(&entry->node); | 3315 | hlist_del_rcu(&entry->node); |
3316 | list_add(&entry->free_list, &free_list); | 3316 | list_add(&entry->free_list, &free_list); |
3317 | } | 3317 | } |
3318 | } | 3318 | } |
3319 | mutex_lock(&ftrace_lock); | 3319 | mutex_lock(&ftrace_lock); |
3320 | __disable_ftrace_function_probe(); | 3320 | __disable_ftrace_function_probe(); |
3321 | /* | 3321 | /* |
3322 | * Remove after the disable is called. Otherwise, if the last | 3322 | * Remove after the disable is called. Otherwise, if the last |
3323 | * probe is removed, a null hash means *all enabled*. | 3323 | * probe is removed, a null hash means *all enabled*. |
3324 | */ | 3324 | */ |
3325 | ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3325 | ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
3326 | synchronize_sched(); | 3326 | synchronize_sched(); |
3327 | list_for_each_entry_safe(entry, p, &free_list, free_list) { | 3327 | list_for_each_entry_safe(entry, p, &free_list, free_list) { |
3328 | list_del(&entry->free_list); | 3328 | list_del(&entry->free_list); |
3329 | ftrace_free_entry(entry); | 3329 | ftrace_free_entry(entry); |
3330 | } | 3330 | } |
3331 | mutex_unlock(&ftrace_lock); | 3331 | mutex_unlock(&ftrace_lock); |
3332 | 3332 | ||
3333 | out_unlock: | 3333 | out_unlock: |
3334 | mutex_unlock(&trace_probe_ops.regex_lock); | 3334 | mutex_unlock(&trace_probe_ops.regex_lock); |
3335 | free_ftrace_hash(hash); | 3335 | free_ftrace_hash(hash); |
3336 | } | 3336 | } |
3337 | 3337 | ||
3338 | void | 3338 | void |
3339 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3339 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
3340 | void *data) | 3340 | void *data) |
3341 | { | 3341 | { |
3342 | __unregister_ftrace_function_probe(glob, ops, data, | 3342 | __unregister_ftrace_function_probe(glob, ops, data, |
3343 | PROBE_TEST_FUNC | PROBE_TEST_DATA); | 3343 | PROBE_TEST_FUNC | PROBE_TEST_DATA); |
3344 | } | 3344 | } |
3345 | 3345 | ||
3346 | void | 3346 | void |
3347 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) | 3347 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) |
3348 | { | 3348 | { |
3349 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); | 3349 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); |
3350 | } | 3350 | } |
3351 | 3351 | ||
3352 | void unregister_ftrace_function_probe_all(char *glob) | 3352 | void unregister_ftrace_function_probe_all(char *glob) |
3353 | { | 3353 | { |
3354 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); | 3354 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); |
3355 | } | 3355 | } |
3356 | 3356 | ||
3357 | static LIST_HEAD(ftrace_commands); | 3357 | static LIST_HEAD(ftrace_commands); |
3358 | static DEFINE_MUTEX(ftrace_cmd_mutex); | 3358 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
3359 | 3359 | ||
3360 | /* | 3360 | /* |
3361 | * Currently we only register ftrace commands from __init, so mark this | 3361 | * Currently we only register ftrace commands from __init, so mark this |
3362 | * __init too. | 3362 | * __init too. |
3363 | */ | 3363 | */ |
3364 | __init int register_ftrace_command(struct ftrace_func_command *cmd) | 3364 | __init int register_ftrace_command(struct ftrace_func_command *cmd) |
3365 | { | 3365 | { |
3366 | struct ftrace_func_command *p; | 3366 | struct ftrace_func_command *p; |
3367 | int ret = 0; | 3367 | int ret = 0; |
3368 | 3368 | ||
3369 | mutex_lock(&ftrace_cmd_mutex); | 3369 | mutex_lock(&ftrace_cmd_mutex); |
3370 | list_for_each_entry(p, &ftrace_commands, list) { | 3370 | list_for_each_entry(p, &ftrace_commands, list) { |
3371 | if (strcmp(cmd->name, p->name) == 0) { | 3371 | if (strcmp(cmd->name, p->name) == 0) { |
3372 | ret = -EBUSY; | 3372 | ret = -EBUSY; |
3373 | goto out_unlock; | 3373 | goto out_unlock; |
3374 | } | 3374 | } |
3375 | } | 3375 | } |
3376 | list_add(&cmd->list, &ftrace_commands); | 3376 | list_add(&cmd->list, &ftrace_commands); |
3377 | out_unlock: | 3377 | out_unlock: |
3378 | mutex_unlock(&ftrace_cmd_mutex); | 3378 | mutex_unlock(&ftrace_cmd_mutex); |
3379 | 3379 | ||
3380 | return ret; | 3380 | return ret; |
3381 | } | 3381 | } |
3382 | 3382 | ||
3383 | /* | 3383 | /* |
3384 | * Currently we only unregister ftrace commands from __init, so mark | 3384 | * Currently we only unregister ftrace commands from __init, so mark |
3385 | * this __init too. | 3385 | * this __init too. |
3386 | */ | 3386 | */ |
3387 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) | 3387 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) |
3388 | { | 3388 | { |
3389 | struct ftrace_func_command *p, *n; | 3389 | struct ftrace_func_command *p, *n; |
3390 | int ret = -ENODEV; | 3390 | int ret = -ENODEV; |
3391 | 3391 | ||
3392 | mutex_lock(&ftrace_cmd_mutex); | 3392 | mutex_lock(&ftrace_cmd_mutex); |
3393 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { | 3393 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { |
3394 | if (strcmp(cmd->name, p->name) == 0) { | 3394 | if (strcmp(cmd->name, p->name) == 0) { |
3395 | ret = 0; | 3395 | ret = 0; |
3396 | list_del_init(&p->list); | 3396 | list_del_init(&p->list); |
3397 | goto out_unlock; | 3397 | goto out_unlock; |
3398 | } | 3398 | } |
3399 | } | 3399 | } |
3400 | out_unlock: | 3400 | out_unlock: |
3401 | mutex_unlock(&ftrace_cmd_mutex); | 3401 | mutex_unlock(&ftrace_cmd_mutex); |
3402 | 3402 | ||
3403 | return ret; | 3403 | return ret; |
3404 | } | 3404 | } |
3405 | 3405 | ||
3406 | static int ftrace_process_regex(struct ftrace_hash *hash, | 3406 | static int ftrace_process_regex(struct ftrace_hash *hash, |
3407 | char *buff, int len, int enable) | 3407 | char *buff, int len, int enable) |
3408 | { | 3408 | { |
3409 | char *func, *command, *next = buff; | 3409 | char *func, *command, *next = buff; |
3410 | struct ftrace_func_command *p; | 3410 | struct ftrace_func_command *p; |
3411 | int ret = -EINVAL; | 3411 | int ret = -EINVAL; |
3412 | 3412 | ||
3413 | func = strsep(&next, ":"); | 3413 | func = strsep(&next, ":"); |
3414 | 3414 | ||
3415 | if (!next) { | 3415 | if (!next) { |
3416 | ret = ftrace_match_records(hash, func, len); | 3416 | ret = ftrace_match_records(hash, func, len); |
3417 | if (!ret) | 3417 | if (!ret) |
3418 | ret = -EINVAL; | 3418 | ret = -EINVAL; |
3419 | if (ret < 0) | 3419 | if (ret < 0) |
3420 | return ret; | 3420 | return ret; |
3421 | return 0; | 3421 | return 0; |
3422 | } | 3422 | } |
3423 | 3423 | ||
3424 | /* command found */ | 3424 | /* command found */ |
3425 | 3425 | ||
3426 | command = strsep(&next, ":"); | 3426 | command = strsep(&next, ":"); |
3427 | 3427 | ||
3428 | mutex_lock(&ftrace_cmd_mutex); | 3428 | mutex_lock(&ftrace_cmd_mutex); |
3429 | list_for_each_entry(p, &ftrace_commands, list) { | 3429 | list_for_each_entry(p, &ftrace_commands, list) { |
3430 | if (strcmp(p->name, command) == 0) { | 3430 | if (strcmp(p->name, command) == 0) { |
3431 | ret = p->func(hash, func, command, next, enable); | 3431 | ret = p->func(hash, func, command, next, enable); |
3432 | goto out_unlock; | 3432 | goto out_unlock; |
3433 | } | 3433 | } |
3434 | } | 3434 | } |
3435 | out_unlock: | 3435 | out_unlock: |
3436 | mutex_unlock(&ftrace_cmd_mutex); | 3436 | mutex_unlock(&ftrace_cmd_mutex); |
3437 | 3437 | ||
3438 | return ret; | 3438 | return ret; |
3439 | } | 3439 | } |
3440 | 3440 | ||
3441 | static ssize_t | 3441 | static ssize_t |
3442 | ftrace_regex_write(struct file *file, const char __user *ubuf, | 3442 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
3443 | size_t cnt, loff_t *ppos, int enable) | 3443 | size_t cnt, loff_t *ppos, int enable) |
3444 | { | 3444 | { |
3445 | struct ftrace_iterator *iter; | 3445 | struct ftrace_iterator *iter; |
3446 | struct trace_parser *parser; | 3446 | struct trace_parser *parser; |
3447 | ssize_t ret, read; | 3447 | ssize_t ret, read; |
3448 | 3448 | ||
3449 | if (!cnt) | 3449 | if (!cnt) |
3450 | return 0; | 3450 | return 0; |
3451 | 3451 | ||
3452 | if (file->f_mode & FMODE_READ) { | 3452 | if (file->f_mode & FMODE_READ) { |
3453 | struct seq_file *m = file->private_data; | 3453 | struct seq_file *m = file->private_data; |
3454 | iter = m->private; | 3454 | iter = m->private; |
3455 | } else | 3455 | } else |
3456 | iter = file->private_data; | 3456 | iter = file->private_data; |
3457 | 3457 | ||
3458 | if (unlikely(ftrace_disabled)) | 3458 | if (unlikely(ftrace_disabled)) |
3459 | return -ENODEV; | 3459 | return -ENODEV; |
3460 | 3460 | ||
3461 | /* iter->hash is a local copy, so we don't need regex_lock */ | 3461 | /* iter->hash is a local copy, so we don't need regex_lock */ |
3462 | 3462 | ||
3463 | parser = &iter->parser; | 3463 | parser = &iter->parser; |
3464 | read = trace_get_user(parser, ubuf, cnt, ppos); | 3464 | read = trace_get_user(parser, ubuf, cnt, ppos); |
3465 | 3465 | ||
3466 | if (read >= 0 && trace_parser_loaded(parser) && | 3466 | if (read >= 0 && trace_parser_loaded(parser) && |
3467 | !trace_parser_cont(parser)) { | 3467 | !trace_parser_cont(parser)) { |
3468 | ret = ftrace_process_regex(iter->hash, parser->buffer, | 3468 | ret = ftrace_process_regex(iter->hash, parser->buffer, |
3469 | parser->idx, enable); | 3469 | parser->idx, enable); |
3470 | trace_parser_clear(parser); | 3470 | trace_parser_clear(parser); |
3471 | if (ret < 0) | 3471 | if (ret < 0) |
3472 | goto out; | 3472 | goto out; |
3473 | } | 3473 | } |
3474 | 3474 | ||
3475 | ret = read; | 3475 | ret = read; |
3476 | out: | 3476 | out: |
3477 | return ret; | 3477 | return ret; |
3478 | } | 3478 | } |
3479 | 3479 | ||
3480 | ssize_t | 3480 | ssize_t |
3481 | ftrace_filter_write(struct file *file, const char __user *ubuf, | 3481 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
3482 | size_t cnt, loff_t *ppos) | 3482 | size_t cnt, loff_t *ppos) |
3483 | { | 3483 | { |
3484 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); | 3484 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); |
3485 | } | 3485 | } |
3486 | 3486 | ||
3487 | ssize_t | 3487 | ssize_t |
3488 | ftrace_notrace_write(struct file *file, const char __user *ubuf, | 3488 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
3489 | size_t cnt, loff_t *ppos) | 3489 | size_t cnt, loff_t *ppos) |
3490 | { | 3490 | { |
3491 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | 3491 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
3492 | } | 3492 | } |
3493 | 3493 | ||
3494 | static int | 3494 | static int |
3495 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | 3495 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
3496 | { | 3496 | { |
3497 | struct ftrace_func_entry *entry; | 3497 | struct ftrace_func_entry *entry; |
3498 | 3498 | ||
3499 | if (!ftrace_location(ip)) | 3499 | if (!ftrace_location(ip)) |
3500 | return -EINVAL; | 3500 | return -EINVAL; |
3501 | 3501 | ||
3502 | if (remove) { | 3502 | if (remove) { |
3503 | entry = ftrace_lookup_ip(hash, ip); | 3503 | entry = ftrace_lookup_ip(hash, ip); |
3504 | if (!entry) | 3504 | if (!entry) |
3505 | return -ENOENT; | 3505 | return -ENOENT; |
3506 | free_hash_entry(hash, entry); | 3506 | free_hash_entry(hash, entry); |
3507 | return 0; | 3507 | return 0; |
3508 | } | 3508 | } |
3509 | 3509 | ||
3510 | return add_hash_entry(hash, ip); | 3510 | return add_hash_entry(hash, ip); |
3511 | } | 3511 | } |
3512 | 3512 | ||
3513 | static void ftrace_ops_update_code(struct ftrace_ops *ops) | 3513 | static void ftrace_ops_update_code(struct ftrace_ops *ops) |
3514 | { | 3514 | { |
3515 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | 3515 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) |
3516 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 3516 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); |
3517 | } | 3517 | } |
3518 | 3518 | ||
3519 | static int | 3519 | static int |
3520 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | 3520 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, |
3521 | unsigned long ip, int remove, int reset, int enable) | 3521 | unsigned long ip, int remove, int reset, int enable) |
3522 | { | 3522 | { |
3523 | struct ftrace_hash **orig_hash; | 3523 | struct ftrace_hash **orig_hash; |
3524 | struct ftrace_hash *hash; | 3524 | struct ftrace_hash *hash; |
3525 | int ret; | 3525 | int ret; |
3526 | 3526 | ||
3527 | /* All global ops uses the global ops filters */ | 3527 | /* All global ops uses the global ops filters */ |
3528 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) | 3528 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) |
3529 | ops = &global_ops; | 3529 | ops = &global_ops; |
3530 | 3530 | ||
3531 | if (unlikely(ftrace_disabled)) | 3531 | if (unlikely(ftrace_disabled)) |
3532 | return -ENODEV; | 3532 | return -ENODEV; |
3533 | 3533 | ||
3534 | mutex_lock(&ops->regex_lock); | 3534 | mutex_lock(&ops->regex_lock); |
3535 | 3535 | ||
3536 | if (enable) | 3536 | if (enable) |
3537 | orig_hash = &ops->filter_hash; | 3537 | orig_hash = &ops->filter_hash; |
3538 | else | 3538 | else |
3539 | orig_hash = &ops->notrace_hash; | 3539 | orig_hash = &ops->notrace_hash; |
3540 | 3540 | ||
3541 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3541 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
3542 | if (!hash) { | 3542 | if (!hash) { |
3543 | ret = -ENOMEM; | 3543 | ret = -ENOMEM; |
3544 | goto out_regex_unlock; | 3544 | goto out_regex_unlock; |
3545 | } | 3545 | } |
3546 | 3546 | ||
3547 | if (reset) | 3547 | if (reset) |
3548 | ftrace_filter_reset(hash); | 3548 | ftrace_filter_reset(hash); |
3549 | if (buf && !ftrace_match_records(hash, buf, len)) { | 3549 | if (buf && !ftrace_match_records(hash, buf, len)) { |
3550 | ret = -EINVAL; | 3550 | ret = -EINVAL; |
3551 | goto out_regex_unlock; | 3551 | goto out_regex_unlock; |
3552 | } | 3552 | } |
3553 | if (ip) { | 3553 | if (ip) { |
3554 | ret = ftrace_match_addr(hash, ip, remove); | 3554 | ret = ftrace_match_addr(hash, ip, remove); |
3555 | if (ret < 0) | 3555 | if (ret < 0) |
3556 | goto out_regex_unlock; | 3556 | goto out_regex_unlock; |
3557 | } | 3557 | } |
3558 | 3558 | ||
3559 | mutex_lock(&ftrace_lock); | 3559 | mutex_lock(&ftrace_lock); |
3560 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3560 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
3561 | if (!ret) | 3561 | if (!ret) |
3562 | ftrace_ops_update_code(ops); | 3562 | ftrace_ops_update_code(ops); |
3563 | 3563 | ||
3564 | mutex_unlock(&ftrace_lock); | 3564 | mutex_unlock(&ftrace_lock); |
3565 | 3565 | ||
3566 | out_regex_unlock: | 3566 | out_regex_unlock: |
3567 | mutex_unlock(&ops->regex_lock); | 3567 | mutex_unlock(&ops->regex_lock); |
3568 | 3568 | ||
3569 | free_ftrace_hash(hash); | 3569 | free_ftrace_hash(hash); |
3570 | return ret; | 3570 | return ret; |
3571 | } | 3571 | } |
3572 | 3572 | ||
3573 | static int | 3573 | static int |
3574 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, | 3574 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, |
3575 | int reset, int enable) | 3575 | int reset, int enable) |
3576 | { | 3576 | { |
3577 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); | 3577 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); |
3578 | } | 3578 | } |
3579 | 3579 | ||
3580 | /** | 3580 | /** |
3581 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address | 3581 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address |
3582 | * @ops - the ops to set the filter with | 3582 | * @ops - the ops to set the filter with |
3583 | * @ip - the address to add to or remove from the filter. | 3583 | * @ip - the address to add to or remove from the filter. |
3584 | * @remove - non zero to remove the ip from the filter | 3584 | * @remove - non zero to remove the ip from the filter |
3585 | * @reset - non zero to reset all filters before applying this filter. | 3585 | * @reset - non zero to reset all filters before applying this filter. |
3586 | * | 3586 | * |
3587 | * Filters denote which functions should be enabled when tracing is enabled | 3587 | * Filters denote which functions should be enabled when tracing is enabled |
3588 | * If @ip is NULL, it failes to update filter. | 3588 | * If @ip is NULL, it failes to update filter. |
3589 | */ | 3589 | */ |
3590 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | 3590 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
3591 | int remove, int reset) | 3591 | int remove, int reset) |
3592 | { | 3592 | { |
3593 | ftrace_ops_init(ops); | 3593 | ftrace_ops_init(ops); |
3594 | return ftrace_set_addr(ops, ip, remove, reset, 1); | 3594 | return ftrace_set_addr(ops, ip, remove, reset, 1); |
3595 | } | 3595 | } |
3596 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); | 3596 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); |
3597 | 3597 | ||
3598 | static int | 3598 | static int |
3599 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | 3599 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
3600 | int reset, int enable) | 3600 | int reset, int enable) |
3601 | { | 3601 | { |
3602 | return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); | 3602 | return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); |
3603 | } | 3603 | } |
3604 | 3604 | ||
3605 | /** | 3605 | /** |
3606 | * ftrace_set_filter - set a function to filter on in ftrace | 3606 | * ftrace_set_filter - set a function to filter on in ftrace |
3607 | * @ops - the ops to set the filter with | 3607 | * @ops - the ops to set the filter with |
3608 | * @buf - the string that holds the function filter text. | 3608 | * @buf - the string that holds the function filter text. |
3609 | * @len - the length of the string. | 3609 | * @len - the length of the string. |
3610 | * @reset - non zero to reset all filters before applying this filter. | 3610 | * @reset - non zero to reset all filters before applying this filter. |
3611 | * | 3611 | * |
3612 | * Filters denote which functions should be enabled when tracing is enabled. | 3612 | * Filters denote which functions should be enabled when tracing is enabled. |
3613 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 3613 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
3614 | */ | 3614 | */ |
3615 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, | 3615 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
3616 | int len, int reset) | 3616 | int len, int reset) |
3617 | { | 3617 | { |
3618 | ftrace_ops_init(ops); | 3618 | ftrace_ops_init(ops); |
3619 | return ftrace_set_regex(ops, buf, len, reset, 1); | 3619 | return ftrace_set_regex(ops, buf, len, reset, 1); |
3620 | } | 3620 | } |
3621 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | 3621 | EXPORT_SYMBOL_GPL(ftrace_set_filter); |
3622 | 3622 | ||
3623 | /** | 3623 | /** |
3624 | * ftrace_set_notrace - set a function to not trace in ftrace | 3624 | * ftrace_set_notrace - set a function to not trace in ftrace |
3625 | * @ops - the ops to set the notrace filter with | 3625 | * @ops - the ops to set the notrace filter with |
3626 | * @buf - the string that holds the function notrace text. | 3626 | * @buf - the string that holds the function notrace text. |
3627 | * @len - the length of the string. | 3627 | * @len - the length of the string. |
3628 | * @reset - non zero to reset all filters before applying this filter. | 3628 | * @reset - non zero to reset all filters before applying this filter. |
3629 | * | 3629 | * |
3630 | * Notrace Filters denote which functions should not be enabled when tracing | 3630 | * Notrace Filters denote which functions should not be enabled when tracing |
3631 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 3631 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
3632 | * for tracing. | 3632 | * for tracing. |
3633 | */ | 3633 | */ |
3634 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | 3634 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
3635 | int len, int reset) | 3635 | int len, int reset) |
3636 | { | 3636 | { |
3637 | ftrace_ops_init(ops); | 3637 | ftrace_ops_init(ops); |
3638 | return ftrace_set_regex(ops, buf, len, reset, 0); | 3638 | return ftrace_set_regex(ops, buf, len, reset, 0); |
3639 | } | 3639 | } |
3640 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | 3640 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); |
3641 | /** | 3641 | /** |
3642 | * ftrace_set_filter - set a function to filter on in ftrace | 3642 | * ftrace_set_filter - set a function to filter on in ftrace |
3643 | * @ops - the ops to set the filter with | 3643 | * @ops - the ops to set the filter with |
3644 | * @buf - the string that holds the function filter text. | 3644 | * @buf - the string that holds the function filter text. |
3645 | * @len - the length of the string. | 3645 | * @len - the length of the string. |
3646 | * @reset - non zero to reset all filters before applying this filter. | 3646 | * @reset - non zero to reset all filters before applying this filter. |
3647 | * | 3647 | * |
3648 | * Filters denote which functions should be enabled when tracing is enabled. | 3648 | * Filters denote which functions should be enabled when tracing is enabled. |
3649 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 3649 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
3650 | */ | 3650 | */ |
3651 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) | 3651 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) |
3652 | { | 3652 | { |
3653 | ftrace_set_regex(&global_ops, buf, len, reset, 1); | 3653 | ftrace_set_regex(&global_ops, buf, len, reset, 1); |
3654 | } | 3654 | } |
3655 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); | 3655 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); |
3656 | 3656 | ||
3657 | /** | 3657 | /** |
3658 | * ftrace_set_notrace - set a function to not trace in ftrace | 3658 | * ftrace_set_notrace - set a function to not trace in ftrace |
3659 | * @ops - the ops to set the notrace filter with | 3659 | * @ops - the ops to set the notrace filter with |
3660 | * @buf - the string that holds the function notrace text. | 3660 | * @buf - the string that holds the function notrace text. |
3661 | * @len - the length of the string. | 3661 | * @len - the length of the string. |
3662 | * @reset - non zero to reset all filters before applying this filter. | 3662 | * @reset - non zero to reset all filters before applying this filter. |
3663 | * | 3663 | * |
3664 | * Notrace Filters denote which functions should not be enabled when tracing | 3664 | * Notrace Filters denote which functions should not be enabled when tracing |
3665 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 3665 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
3666 | * for tracing. | 3666 | * for tracing. |
3667 | */ | 3667 | */ |
3668 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) | 3668 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) |
3669 | { | 3669 | { |
3670 | ftrace_set_regex(&global_ops, buf, len, reset, 0); | 3670 | ftrace_set_regex(&global_ops, buf, len, reset, 0); |
3671 | } | 3671 | } |
3672 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); | 3672 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); |
3673 | 3673 | ||
3674 | /* | 3674 | /* |
3675 | * command line interface to allow users to set filters on boot up. | 3675 | * command line interface to allow users to set filters on boot up. |
3676 | */ | 3676 | */ |
3677 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | 3677 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
3678 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 3678 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
3679 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | 3679 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
3680 | 3680 | ||
3681 | /* Used by function selftest to not test if filter is set */ | 3681 | /* Used by function selftest to not test if filter is set */ |
3682 | bool ftrace_filter_param __initdata; | 3682 | bool ftrace_filter_param __initdata; |
3683 | 3683 | ||
3684 | static int __init set_ftrace_notrace(char *str) | 3684 | static int __init set_ftrace_notrace(char *str) |
3685 | { | 3685 | { |
3686 | ftrace_filter_param = true; | 3686 | ftrace_filter_param = true; |
3687 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | 3687 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
3688 | return 1; | 3688 | return 1; |
3689 | } | 3689 | } |
3690 | __setup("ftrace_notrace=", set_ftrace_notrace); | 3690 | __setup("ftrace_notrace=", set_ftrace_notrace); |
3691 | 3691 | ||
3692 | static int __init set_ftrace_filter(char *str) | 3692 | static int __init set_ftrace_filter(char *str) |
3693 | { | 3693 | { |
3694 | ftrace_filter_param = true; | 3694 | ftrace_filter_param = true; |
3695 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | 3695 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
3696 | return 1; | 3696 | return 1; |
3697 | } | 3697 | } |
3698 | __setup("ftrace_filter=", set_ftrace_filter); | 3698 | __setup("ftrace_filter=", set_ftrace_filter); |
3699 | 3699 | ||
3700 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3700 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3701 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | 3701 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
3702 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); | 3702 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); |
3703 | 3703 | ||
3704 | static int __init set_graph_function(char *str) | 3704 | static int __init set_graph_function(char *str) |
3705 | { | 3705 | { |
3706 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 3706 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
3707 | return 1; | 3707 | return 1; |
3708 | } | 3708 | } |
3709 | __setup("ftrace_graph_filter=", set_graph_function); | 3709 | __setup("ftrace_graph_filter=", set_graph_function); |
3710 | 3710 | ||
3711 | static void __init set_ftrace_early_graph(char *buf) | 3711 | static void __init set_ftrace_early_graph(char *buf) |
3712 | { | 3712 | { |
3713 | int ret; | 3713 | int ret; |
3714 | char *func; | 3714 | char *func; |
3715 | 3715 | ||
3716 | while (buf) { | 3716 | while (buf) { |
3717 | func = strsep(&buf, ","); | 3717 | func = strsep(&buf, ","); |
3718 | /* we allow only one expression at a time */ | 3718 | /* we allow only one expression at a time */ |
3719 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | 3719 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
3720 | FTRACE_GRAPH_MAX_FUNCS, func); | 3720 | FTRACE_GRAPH_MAX_FUNCS, func); |
3721 | if (ret) | 3721 | if (ret) |
3722 | printk(KERN_DEBUG "ftrace: function %s not " | 3722 | printk(KERN_DEBUG "ftrace: function %s not " |
3723 | "traceable\n", func); | 3723 | "traceable\n", func); |
3724 | } | 3724 | } |
3725 | } | 3725 | } |
3726 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 3726 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
3727 | 3727 | ||
3728 | void __init | 3728 | void __init |
3729 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) | 3729 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) |
3730 | { | 3730 | { |
3731 | char *func; | 3731 | char *func; |
3732 | 3732 | ||
3733 | ftrace_ops_init(ops); | 3733 | ftrace_ops_init(ops); |
3734 | 3734 | ||
3735 | while (buf) { | 3735 | while (buf) { |
3736 | func = strsep(&buf, ","); | 3736 | func = strsep(&buf, ","); |
3737 | ftrace_set_regex(ops, func, strlen(func), 0, enable); | 3737 | ftrace_set_regex(ops, func, strlen(func), 0, enable); |
3738 | } | 3738 | } |
3739 | } | 3739 | } |
3740 | 3740 | ||
3741 | static void __init set_ftrace_early_filters(void) | 3741 | static void __init set_ftrace_early_filters(void) |
3742 | { | 3742 | { |
3743 | if (ftrace_filter_buf[0]) | 3743 | if (ftrace_filter_buf[0]) |
3744 | ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); | 3744 | ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); |
3745 | if (ftrace_notrace_buf[0]) | 3745 | if (ftrace_notrace_buf[0]) |
3746 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); | 3746 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); |
3747 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3747 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3748 | if (ftrace_graph_buf[0]) | 3748 | if (ftrace_graph_buf[0]) |
3749 | set_ftrace_early_graph(ftrace_graph_buf); | 3749 | set_ftrace_early_graph(ftrace_graph_buf); |
3750 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 3750 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
3751 | } | 3751 | } |
3752 | 3752 | ||
3753 | int ftrace_regex_release(struct inode *inode, struct file *file) | 3753 | int ftrace_regex_release(struct inode *inode, struct file *file) |
3754 | { | 3754 | { |
3755 | struct seq_file *m = (struct seq_file *)file->private_data; | 3755 | struct seq_file *m = (struct seq_file *)file->private_data; |
3756 | struct ftrace_iterator *iter; | 3756 | struct ftrace_iterator *iter; |
3757 | struct ftrace_hash **orig_hash; | 3757 | struct ftrace_hash **orig_hash; |
3758 | struct trace_parser *parser; | 3758 | struct trace_parser *parser; |
3759 | int filter_hash; | 3759 | int filter_hash; |
3760 | int ret; | 3760 | int ret; |
3761 | 3761 | ||
3762 | if (file->f_mode & FMODE_READ) { | 3762 | if (file->f_mode & FMODE_READ) { |
3763 | iter = m->private; | 3763 | iter = m->private; |
3764 | seq_release(inode, file); | 3764 | seq_release(inode, file); |
3765 | } else | 3765 | } else |
3766 | iter = file->private_data; | 3766 | iter = file->private_data; |
3767 | 3767 | ||
3768 | parser = &iter->parser; | 3768 | parser = &iter->parser; |
3769 | if (trace_parser_loaded(parser)) { | 3769 | if (trace_parser_loaded(parser)) { |
3770 | parser->buffer[parser->idx] = 0; | 3770 | parser->buffer[parser->idx] = 0; |
3771 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); | 3771 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); |
3772 | } | 3772 | } |
3773 | 3773 | ||
3774 | trace_parser_put(parser); | 3774 | trace_parser_put(parser); |
3775 | 3775 | ||
3776 | mutex_lock(&iter->ops->regex_lock); | 3776 | mutex_lock(&iter->ops->regex_lock); |
3777 | 3777 | ||
3778 | if (file->f_mode & FMODE_WRITE) { | 3778 | if (file->f_mode & FMODE_WRITE) { |
3779 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | 3779 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
3780 | 3780 | ||
3781 | if (filter_hash) | 3781 | if (filter_hash) |
3782 | orig_hash = &iter->ops->filter_hash; | 3782 | orig_hash = &iter->ops->filter_hash; |
3783 | else | 3783 | else |
3784 | orig_hash = &iter->ops->notrace_hash; | 3784 | orig_hash = &iter->ops->notrace_hash; |
3785 | 3785 | ||
3786 | mutex_lock(&ftrace_lock); | 3786 | mutex_lock(&ftrace_lock); |
3787 | ret = ftrace_hash_move(iter->ops, filter_hash, | 3787 | ret = ftrace_hash_move(iter->ops, filter_hash, |
3788 | orig_hash, iter->hash); | 3788 | orig_hash, iter->hash); |
3789 | if (!ret) | 3789 | if (!ret) |
3790 | ftrace_ops_update_code(iter->ops); | 3790 | ftrace_ops_update_code(iter->ops); |
3791 | 3791 | ||
3792 | mutex_unlock(&ftrace_lock); | 3792 | mutex_unlock(&ftrace_lock); |
3793 | } | 3793 | } |
3794 | 3794 | ||
3795 | mutex_unlock(&iter->ops->regex_lock); | 3795 | mutex_unlock(&iter->ops->regex_lock); |
3796 | free_ftrace_hash(iter->hash); | 3796 | free_ftrace_hash(iter->hash); |
3797 | kfree(iter); | 3797 | kfree(iter); |
3798 | 3798 | ||
3799 | return 0; | 3799 | return 0; |
3800 | } | 3800 | } |
3801 | 3801 | ||
3802 | static const struct file_operations ftrace_avail_fops = { | 3802 | static const struct file_operations ftrace_avail_fops = { |
3803 | .open = ftrace_avail_open, | 3803 | .open = ftrace_avail_open, |
3804 | .read = seq_read, | 3804 | .read = seq_read, |
3805 | .llseek = seq_lseek, | 3805 | .llseek = seq_lseek, |
3806 | .release = seq_release_private, | 3806 | .release = seq_release_private, |
3807 | }; | 3807 | }; |
3808 | 3808 | ||
3809 | static const struct file_operations ftrace_enabled_fops = { | 3809 | static const struct file_operations ftrace_enabled_fops = { |
3810 | .open = ftrace_enabled_open, | 3810 | .open = ftrace_enabled_open, |
3811 | .read = seq_read, | 3811 | .read = seq_read, |
3812 | .llseek = seq_lseek, | 3812 | .llseek = seq_lseek, |
3813 | .release = seq_release_private, | 3813 | .release = seq_release_private, |
3814 | }; | 3814 | }; |
3815 | 3815 | ||
3816 | static const struct file_operations ftrace_filter_fops = { | 3816 | static const struct file_operations ftrace_filter_fops = { |
3817 | .open = ftrace_filter_open, | 3817 | .open = ftrace_filter_open, |
3818 | .read = seq_read, | 3818 | .read = seq_read, |
3819 | .write = ftrace_filter_write, | 3819 | .write = ftrace_filter_write, |
3820 | .llseek = tracing_lseek, | 3820 | .llseek = tracing_lseek, |
3821 | .release = ftrace_regex_release, | 3821 | .release = ftrace_regex_release, |
3822 | }; | 3822 | }; |
3823 | 3823 | ||
3824 | static const struct file_operations ftrace_notrace_fops = { | 3824 | static const struct file_operations ftrace_notrace_fops = { |
3825 | .open = ftrace_notrace_open, | 3825 | .open = ftrace_notrace_open, |
3826 | .read = seq_read, | 3826 | .read = seq_read, |
3827 | .write = ftrace_notrace_write, | 3827 | .write = ftrace_notrace_write, |
3828 | .llseek = tracing_lseek, | 3828 | .llseek = tracing_lseek, |
3829 | .release = ftrace_regex_release, | 3829 | .release = ftrace_regex_release, |
3830 | }; | 3830 | }; |
3831 | 3831 | ||
3832 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3832 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3833 | 3833 | ||
3834 | static DEFINE_MUTEX(graph_lock); | 3834 | static DEFINE_MUTEX(graph_lock); |
3835 | 3835 | ||
3836 | int ftrace_graph_count; | 3836 | int ftrace_graph_count; |
3837 | int ftrace_graph_notrace_count; | 3837 | int ftrace_graph_notrace_count; |
3838 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 3838 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; |
3839 | unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 3839 | unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; |
3840 | 3840 | ||
3841 | struct ftrace_graph_data { | 3841 | struct ftrace_graph_data { |
3842 | unsigned long *table; | 3842 | unsigned long *table; |
3843 | size_t size; | 3843 | size_t size; |
3844 | int *count; | 3844 | int *count; |
3845 | const struct seq_operations *seq_ops; | 3845 | const struct seq_operations *seq_ops; |
3846 | }; | 3846 | }; |
3847 | 3847 | ||
3848 | static void * | 3848 | static void * |
3849 | __g_next(struct seq_file *m, loff_t *pos) | 3849 | __g_next(struct seq_file *m, loff_t *pos) |
3850 | { | 3850 | { |
3851 | struct ftrace_graph_data *fgd = m->private; | 3851 | struct ftrace_graph_data *fgd = m->private; |
3852 | 3852 | ||
3853 | if (*pos >= *fgd->count) | 3853 | if (*pos >= *fgd->count) |
3854 | return NULL; | 3854 | return NULL; |
3855 | return &fgd->table[*pos]; | 3855 | return &fgd->table[*pos]; |
3856 | } | 3856 | } |
3857 | 3857 | ||
3858 | static void * | 3858 | static void * |
3859 | g_next(struct seq_file *m, void *v, loff_t *pos) | 3859 | g_next(struct seq_file *m, void *v, loff_t *pos) |
3860 | { | 3860 | { |
3861 | (*pos)++; | 3861 | (*pos)++; |
3862 | return __g_next(m, pos); | 3862 | return __g_next(m, pos); |
3863 | } | 3863 | } |
3864 | 3864 | ||
3865 | static void *g_start(struct seq_file *m, loff_t *pos) | 3865 | static void *g_start(struct seq_file *m, loff_t *pos) |
3866 | { | 3866 | { |
3867 | struct ftrace_graph_data *fgd = m->private; | 3867 | struct ftrace_graph_data *fgd = m->private; |
3868 | 3868 | ||
3869 | mutex_lock(&graph_lock); | 3869 | mutex_lock(&graph_lock); |
3870 | 3870 | ||
3871 | /* Nothing, tell g_show to print all functions are enabled */ | 3871 | /* Nothing, tell g_show to print all functions are enabled */ |
3872 | if (!*fgd->count && !*pos) | 3872 | if (!*fgd->count && !*pos) |
3873 | return (void *)1; | 3873 | return (void *)1; |
3874 | 3874 | ||
3875 | return __g_next(m, pos); | 3875 | return __g_next(m, pos); |
3876 | } | 3876 | } |
3877 | 3877 | ||
3878 | static void g_stop(struct seq_file *m, void *p) | 3878 | static void g_stop(struct seq_file *m, void *p) |
3879 | { | 3879 | { |
3880 | mutex_unlock(&graph_lock); | 3880 | mutex_unlock(&graph_lock); |
3881 | } | 3881 | } |
3882 | 3882 | ||
3883 | static int g_show(struct seq_file *m, void *v) | 3883 | static int g_show(struct seq_file *m, void *v) |
3884 | { | 3884 | { |
3885 | unsigned long *ptr = v; | 3885 | unsigned long *ptr = v; |
3886 | 3886 | ||
3887 | if (!ptr) | 3887 | if (!ptr) |
3888 | return 0; | 3888 | return 0; |
3889 | 3889 | ||
3890 | if (ptr == (unsigned long *)1) { | 3890 | if (ptr == (unsigned long *)1) { |
3891 | seq_printf(m, "#### all functions enabled ####\n"); | 3891 | seq_printf(m, "#### all functions enabled ####\n"); |
3892 | return 0; | 3892 | return 0; |
3893 | } | 3893 | } |
3894 | 3894 | ||
3895 | seq_printf(m, "%ps\n", (void *)*ptr); | 3895 | seq_printf(m, "%ps\n", (void *)*ptr); |
3896 | 3896 | ||
3897 | return 0; | 3897 | return 0; |
3898 | } | 3898 | } |
3899 | 3899 | ||
3900 | static const struct seq_operations ftrace_graph_seq_ops = { | 3900 | static const struct seq_operations ftrace_graph_seq_ops = { |
3901 | .start = g_start, | 3901 | .start = g_start, |
3902 | .next = g_next, | 3902 | .next = g_next, |
3903 | .stop = g_stop, | 3903 | .stop = g_stop, |
3904 | .show = g_show, | 3904 | .show = g_show, |
3905 | }; | 3905 | }; |
3906 | 3906 | ||
3907 | static int | 3907 | static int |
3908 | __ftrace_graph_open(struct inode *inode, struct file *file, | 3908 | __ftrace_graph_open(struct inode *inode, struct file *file, |
3909 | struct ftrace_graph_data *fgd) | 3909 | struct ftrace_graph_data *fgd) |
3910 | { | 3910 | { |
3911 | int ret = 0; | 3911 | int ret = 0; |
3912 | 3912 | ||
3913 | mutex_lock(&graph_lock); | 3913 | mutex_lock(&graph_lock); |
3914 | if ((file->f_mode & FMODE_WRITE) && | 3914 | if ((file->f_mode & FMODE_WRITE) && |
3915 | (file->f_flags & O_TRUNC)) { | 3915 | (file->f_flags & O_TRUNC)) { |
3916 | *fgd->count = 0; | 3916 | *fgd->count = 0; |
3917 | memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); | 3917 | memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); |
3918 | } | 3918 | } |
3919 | mutex_unlock(&graph_lock); | 3919 | mutex_unlock(&graph_lock); |
3920 | 3920 | ||
3921 | if (file->f_mode & FMODE_READ) { | 3921 | if (file->f_mode & FMODE_READ) { |
3922 | ret = seq_open(file, fgd->seq_ops); | 3922 | ret = seq_open(file, fgd->seq_ops); |
3923 | if (!ret) { | 3923 | if (!ret) { |
3924 | struct seq_file *m = file->private_data; | 3924 | struct seq_file *m = file->private_data; |
3925 | m->private = fgd; | 3925 | m->private = fgd; |
3926 | } | 3926 | } |
3927 | } else | 3927 | } else |
3928 | file->private_data = fgd; | 3928 | file->private_data = fgd; |
3929 | 3929 | ||
3930 | return ret; | 3930 | return ret; |
3931 | } | 3931 | } |
3932 | 3932 | ||
3933 | static int | 3933 | static int |
3934 | ftrace_graph_open(struct inode *inode, struct file *file) | 3934 | ftrace_graph_open(struct inode *inode, struct file *file) |
3935 | { | 3935 | { |
3936 | struct ftrace_graph_data *fgd; | 3936 | struct ftrace_graph_data *fgd; |
3937 | 3937 | ||
3938 | if (unlikely(ftrace_disabled)) | 3938 | if (unlikely(ftrace_disabled)) |
3939 | return -ENODEV; | 3939 | return -ENODEV; |
3940 | 3940 | ||
3941 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | 3941 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); |
3942 | if (fgd == NULL) | 3942 | if (fgd == NULL) |
3943 | return -ENOMEM; | 3943 | return -ENOMEM; |
3944 | 3944 | ||
3945 | fgd->table = ftrace_graph_funcs; | 3945 | fgd->table = ftrace_graph_funcs; |
3946 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | 3946 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; |
3947 | fgd->count = &ftrace_graph_count; | 3947 | fgd->count = &ftrace_graph_count; |
3948 | fgd->seq_ops = &ftrace_graph_seq_ops; | 3948 | fgd->seq_ops = &ftrace_graph_seq_ops; |
3949 | 3949 | ||
3950 | return __ftrace_graph_open(inode, file, fgd); | 3950 | return __ftrace_graph_open(inode, file, fgd); |
3951 | } | 3951 | } |
3952 | 3952 | ||
3953 | static int | 3953 | static int |
3954 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) | 3954 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) |
3955 | { | 3955 | { |
3956 | struct ftrace_graph_data *fgd; | 3956 | struct ftrace_graph_data *fgd; |
3957 | 3957 | ||
3958 | if (unlikely(ftrace_disabled)) | 3958 | if (unlikely(ftrace_disabled)) |
3959 | return -ENODEV; | 3959 | return -ENODEV; |
3960 | 3960 | ||
3961 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | 3961 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); |
3962 | if (fgd == NULL) | 3962 | if (fgd == NULL) |
3963 | return -ENOMEM; | 3963 | return -ENOMEM; |
3964 | 3964 | ||
3965 | fgd->table = ftrace_graph_notrace_funcs; | 3965 | fgd->table = ftrace_graph_notrace_funcs; |
3966 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | 3966 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; |
3967 | fgd->count = &ftrace_graph_notrace_count; | 3967 | fgd->count = &ftrace_graph_notrace_count; |
3968 | fgd->seq_ops = &ftrace_graph_seq_ops; | 3968 | fgd->seq_ops = &ftrace_graph_seq_ops; |
3969 | 3969 | ||
3970 | return __ftrace_graph_open(inode, file, fgd); | 3970 | return __ftrace_graph_open(inode, file, fgd); |
3971 | } | 3971 | } |
3972 | 3972 | ||
3973 | static int | 3973 | static int |
3974 | ftrace_graph_release(struct inode *inode, struct file *file) | 3974 | ftrace_graph_release(struct inode *inode, struct file *file) |
3975 | { | 3975 | { |
3976 | if (file->f_mode & FMODE_READ) { | 3976 | if (file->f_mode & FMODE_READ) { |
3977 | struct seq_file *m = file->private_data; | 3977 | struct seq_file *m = file->private_data; |
3978 | 3978 | ||
3979 | kfree(m->private); | 3979 | kfree(m->private); |
3980 | seq_release(inode, file); | 3980 | seq_release(inode, file); |
3981 | } else { | 3981 | } else { |
3982 | kfree(file->private_data); | 3982 | kfree(file->private_data); |
3983 | } | 3983 | } |
3984 | 3984 | ||
3985 | return 0; | 3985 | return 0; |
3986 | } | 3986 | } |
3987 | 3987 | ||
3988 | static int | 3988 | static int |
3989 | ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) | 3989 | ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) |
3990 | { | 3990 | { |
3991 | struct dyn_ftrace *rec; | 3991 | struct dyn_ftrace *rec; |
3992 | struct ftrace_page *pg; | 3992 | struct ftrace_page *pg; |
3993 | int search_len; | 3993 | int search_len; |
3994 | int fail = 1; | 3994 | int fail = 1; |
3995 | int type, not; | 3995 | int type, not; |
3996 | char *search; | 3996 | char *search; |
3997 | bool exists; | 3997 | bool exists; |
3998 | int i; | 3998 | int i; |
3999 | 3999 | ||
4000 | /* decode regex */ | 4000 | /* decode regex */ |
4001 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); | 4001 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
4002 | if (!not && *idx >= size) | 4002 | if (!not && *idx >= size) |
4003 | return -EBUSY; | 4003 | return -EBUSY; |
4004 | 4004 | ||
4005 | search_len = strlen(search); | 4005 | search_len = strlen(search); |
4006 | 4006 | ||
4007 | mutex_lock(&ftrace_lock); | 4007 | mutex_lock(&ftrace_lock); |
4008 | 4008 | ||
4009 | if (unlikely(ftrace_disabled)) { | 4009 | if (unlikely(ftrace_disabled)) { |
4010 | mutex_unlock(&ftrace_lock); | 4010 | mutex_unlock(&ftrace_lock); |
4011 | return -ENODEV; | 4011 | return -ENODEV; |
4012 | } | 4012 | } |
4013 | 4013 | ||
4014 | do_for_each_ftrace_rec(pg, rec) { | 4014 | do_for_each_ftrace_rec(pg, rec) { |
4015 | 4015 | ||
4016 | if (ftrace_match_record(rec, NULL, search, search_len, type)) { | 4016 | if (ftrace_match_record(rec, NULL, search, search_len, type)) { |
4017 | /* if it is in the array */ | 4017 | /* if it is in the array */ |
4018 | exists = false; | 4018 | exists = false; |
4019 | for (i = 0; i < *idx; i++) { | 4019 | for (i = 0; i < *idx; i++) { |
4020 | if (array[i] == rec->ip) { | 4020 | if (array[i] == rec->ip) { |
4021 | exists = true; | 4021 | exists = true; |
4022 | break; | 4022 | break; |
4023 | } | 4023 | } |
4024 | } | 4024 | } |
4025 | 4025 | ||
4026 | if (!not) { | 4026 | if (!not) { |
4027 | fail = 0; | 4027 | fail = 0; |
4028 | if (!exists) { | 4028 | if (!exists) { |
4029 | array[(*idx)++] = rec->ip; | 4029 | array[(*idx)++] = rec->ip; |
4030 | if (*idx >= size) | 4030 | if (*idx >= size) |
4031 | goto out; | 4031 | goto out; |
4032 | } | 4032 | } |
4033 | } else { | 4033 | } else { |
4034 | if (exists) { | 4034 | if (exists) { |
4035 | array[i] = array[--(*idx)]; | 4035 | array[i] = array[--(*idx)]; |
4036 | array[*idx] = 0; | 4036 | array[*idx] = 0; |
4037 | fail = 0; | 4037 | fail = 0; |
4038 | } | 4038 | } |
4039 | } | 4039 | } |
4040 | } | 4040 | } |
4041 | } while_for_each_ftrace_rec(); | 4041 | } while_for_each_ftrace_rec(); |
4042 | out: | 4042 | out: |
4043 | mutex_unlock(&ftrace_lock); | 4043 | mutex_unlock(&ftrace_lock); |
4044 | 4044 | ||
4045 | if (fail) | 4045 | if (fail) |
4046 | return -EINVAL; | 4046 | return -EINVAL; |
4047 | 4047 | ||
4048 | return 0; | 4048 | return 0; |
4049 | } | 4049 | } |
4050 | 4050 | ||
4051 | static ssize_t | 4051 | static ssize_t |
4052 | ftrace_graph_write(struct file *file, const char __user *ubuf, | 4052 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
4053 | size_t cnt, loff_t *ppos) | 4053 | size_t cnt, loff_t *ppos) |
4054 | { | 4054 | { |
4055 | struct trace_parser parser; | 4055 | struct trace_parser parser; |
4056 | ssize_t read, ret = 0; | 4056 | ssize_t read, ret = 0; |
4057 | struct ftrace_graph_data *fgd = file->private_data; | 4057 | struct ftrace_graph_data *fgd = file->private_data; |
4058 | 4058 | ||
4059 | if (!cnt) | 4059 | if (!cnt) |
4060 | return 0; | 4060 | return 0; |
4061 | 4061 | ||
4062 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) | 4062 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) |
4063 | return -ENOMEM; | 4063 | return -ENOMEM; |
4064 | 4064 | ||
4065 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 4065 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
4066 | 4066 | ||
4067 | if (read >= 0 && trace_parser_loaded((&parser))) { | 4067 | if (read >= 0 && trace_parser_loaded((&parser))) { |
4068 | parser.buffer[parser.idx] = 0; | 4068 | parser.buffer[parser.idx] = 0; |
4069 | 4069 | ||
4070 | mutex_lock(&graph_lock); | 4070 | mutex_lock(&graph_lock); |
4071 | 4071 | ||
4072 | /* we allow only one expression at a time */ | 4072 | /* we allow only one expression at a time */ |
4073 | ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, | 4073 | ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, |
4074 | parser.buffer); | 4074 | parser.buffer); |
4075 | 4075 | ||
4076 | mutex_unlock(&graph_lock); | 4076 | mutex_unlock(&graph_lock); |
4077 | } | 4077 | } |
4078 | 4078 | ||
4079 | if (!ret) | 4079 | if (!ret) |
4080 | ret = read; | 4080 | ret = read; |
4081 | 4081 | ||
4082 | trace_parser_put(&parser); | 4082 | trace_parser_put(&parser); |
4083 | 4083 | ||
4084 | return ret; | 4084 | return ret; |
4085 | } | 4085 | } |
4086 | 4086 | ||
4087 | static const struct file_operations ftrace_graph_fops = { | 4087 | static const struct file_operations ftrace_graph_fops = { |
4088 | .open = ftrace_graph_open, | 4088 | .open = ftrace_graph_open, |
4089 | .read = seq_read, | 4089 | .read = seq_read, |
4090 | .write = ftrace_graph_write, | 4090 | .write = ftrace_graph_write, |
4091 | .llseek = tracing_lseek, | 4091 | .llseek = tracing_lseek, |
4092 | .release = ftrace_graph_release, | 4092 | .release = ftrace_graph_release, |
4093 | }; | 4093 | }; |
4094 | 4094 | ||
4095 | static const struct file_operations ftrace_graph_notrace_fops = { | 4095 | static const struct file_operations ftrace_graph_notrace_fops = { |
4096 | .open = ftrace_graph_notrace_open, | 4096 | .open = ftrace_graph_notrace_open, |
4097 | .read = seq_read, | 4097 | .read = seq_read, |
4098 | .write = ftrace_graph_write, | 4098 | .write = ftrace_graph_write, |
4099 | .llseek = tracing_lseek, | 4099 | .llseek = tracing_lseek, |
4100 | .release = ftrace_graph_release, | 4100 | .release = ftrace_graph_release, |
4101 | }; | 4101 | }; |
4102 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4102 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
4103 | 4103 | ||
4104 | void ftrace_create_filter_files(struct ftrace_ops *ops, | 4104 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
4105 | struct dentry *parent) | 4105 | struct dentry *parent) |
4106 | { | 4106 | { |
4107 | 4107 | ||
4108 | trace_create_file("set_ftrace_filter", 0644, parent, | 4108 | trace_create_file("set_ftrace_filter", 0644, parent, |
4109 | ops, &ftrace_filter_fops); | 4109 | ops, &ftrace_filter_fops); |
4110 | 4110 | ||
4111 | trace_create_file("set_ftrace_notrace", 0644, parent, | 4111 | trace_create_file("set_ftrace_notrace", 0644, parent, |
4112 | ops, &ftrace_notrace_fops); | 4112 | ops, &ftrace_notrace_fops); |
4113 | } | 4113 | } |
4114 | 4114 | ||
4115 | /* | 4115 | /* |
4116 | * The name "destroy_filter_files" is really a misnomer. Although | 4116 | * The name "destroy_filter_files" is really a misnomer. Although |
4117 | * in the future, it may actualy delete the files, but this is | 4117 | * in the future, it may actualy delete the files, but this is |
4118 | * really intended to make sure the ops passed in are disabled | 4118 | * really intended to make sure the ops passed in are disabled |
4119 | * and that when this function returns, the caller is free to | 4119 | * and that when this function returns, the caller is free to |
4120 | * free the ops. | 4120 | * free the ops. |
4121 | * | 4121 | * |
4122 | * The "destroy" name is only to match the "create" name that this | 4122 | * The "destroy" name is only to match the "create" name that this |
4123 | * should be paired with. | 4123 | * should be paired with. |
4124 | */ | 4124 | */ |
4125 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | 4125 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) |
4126 | { | 4126 | { |
4127 | mutex_lock(&ftrace_lock); | 4127 | mutex_lock(&ftrace_lock); |
4128 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | 4128 | if (ops->flags & FTRACE_OPS_FL_ENABLED) |
4129 | ftrace_shutdown(ops, 0); | 4129 | ftrace_shutdown(ops, 0); |
4130 | ops->flags |= FTRACE_OPS_FL_DELETED; | 4130 | ops->flags |= FTRACE_OPS_FL_DELETED; |
4131 | mutex_unlock(&ftrace_lock); | 4131 | mutex_unlock(&ftrace_lock); |
4132 | } | 4132 | } |
4133 | 4133 | ||
4134 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4134 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
4135 | { | 4135 | { |
4136 | 4136 | ||
4137 | trace_create_file("available_filter_functions", 0444, | 4137 | trace_create_file("available_filter_functions", 0444, |
4138 | d_tracer, NULL, &ftrace_avail_fops); | 4138 | d_tracer, NULL, &ftrace_avail_fops); |
4139 | 4139 | ||
4140 | trace_create_file("enabled_functions", 0444, | 4140 | trace_create_file("enabled_functions", 0444, |
4141 | d_tracer, NULL, &ftrace_enabled_fops); | 4141 | d_tracer, NULL, &ftrace_enabled_fops); |
4142 | 4142 | ||
4143 | ftrace_create_filter_files(&global_ops, d_tracer); | 4143 | ftrace_create_filter_files(&global_ops, d_tracer); |
4144 | 4144 | ||
4145 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4145 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
4146 | trace_create_file("set_graph_function", 0444, d_tracer, | 4146 | trace_create_file("set_graph_function", 0444, d_tracer, |
4147 | NULL, | 4147 | NULL, |
4148 | &ftrace_graph_fops); | 4148 | &ftrace_graph_fops); |
4149 | trace_create_file("set_graph_notrace", 0444, d_tracer, | 4149 | trace_create_file("set_graph_notrace", 0444, d_tracer, |
4150 | NULL, | 4150 | NULL, |
4151 | &ftrace_graph_notrace_fops); | 4151 | &ftrace_graph_notrace_fops); |
4152 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4152 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
4153 | 4153 | ||
4154 | return 0; | 4154 | return 0; |
4155 | } | 4155 | } |
4156 | 4156 | ||
4157 | static int ftrace_cmp_ips(const void *a, const void *b) | 4157 | static int ftrace_cmp_ips(const void *a, const void *b) |
4158 | { | 4158 | { |
4159 | const unsigned long *ipa = a; | 4159 | const unsigned long *ipa = a; |
4160 | const unsigned long *ipb = b; | 4160 | const unsigned long *ipb = b; |
4161 | 4161 | ||
4162 | if (*ipa > *ipb) | 4162 | if (*ipa > *ipb) |
4163 | return 1; | 4163 | return 1; |
4164 | if (*ipa < *ipb) | 4164 | if (*ipa < *ipb) |
4165 | return -1; | 4165 | return -1; |
4166 | return 0; | 4166 | return 0; |
4167 | } | 4167 | } |
4168 | 4168 | ||
4169 | static void ftrace_swap_ips(void *a, void *b, int size) | 4169 | static void ftrace_swap_ips(void *a, void *b, int size) |
4170 | { | 4170 | { |
4171 | unsigned long *ipa = a; | 4171 | unsigned long *ipa = a; |
4172 | unsigned long *ipb = b; | 4172 | unsigned long *ipb = b; |
4173 | unsigned long t; | 4173 | unsigned long t; |
4174 | 4174 | ||
4175 | t = *ipa; | 4175 | t = *ipa; |
4176 | *ipa = *ipb; | 4176 | *ipa = *ipb; |
4177 | *ipb = t; | 4177 | *ipb = t; |
4178 | } | 4178 | } |
4179 | 4179 | ||
4180 | static int ftrace_process_locs(struct module *mod, | 4180 | static int ftrace_process_locs(struct module *mod, |
4181 | unsigned long *start, | 4181 | unsigned long *start, |
4182 | unsigned long *end) | 4182 | unsigned long *end) |
4183 | { | 4183 | { |
4184 | struct ftrace_page *start_pg; | 4184 | struct ftrace_page *start_pg; |
4185 | struct ftrace_page *pg; | 4185 | struct ftrace_page *pg; |
4186 | struct dyn_ftrace *rec; | 4186 | struct dyn_ftrace *rec; |
4187 | unsigned long count; | 4187 | unsigned long count; |
4188 | unsigned long *p; | 4188 | unsigned long *p; |
4189 | unsigned long addr; | 4189 | unsigned long addr; |
4190 | unsigned long flags = 0; /* Shut up gcc */ | 4190 | unsigned long flags = 0; /* Shut up gcc */ |
4191 | int ret = -ENOMEM; | 4191 | int ret = -ENOMEM; |
4192 | 4192 | ||
4193 | count = end - start; | 4193 | count = end - start; |
4194 | 4194 | ||
4195 | if (!count) | 4195 | if (!count) |
4196 | return 0; | 4196 | return 0; |
4197 | 4197 | ||
4198 | sort(start, count, sizeof(*start), | 4198 | sort(start, count, sizeof(*start), |
4199 | ftrace_cmp_ips, ftrace_swap_ips); | 4199 | ftrace_cmp_ips, ftrace_swap_ips); |
4200 | 4200 | ||
4201 | start_pg = ftrace_allocate_pages(count); | 4201 | start_pg = ftrace_allocate_pages(count); |
4202 | if (!start_pg) | 4202 | if (!start_pg) |
4203 | return -ENOMEM; | 4203 | return -ENOMEM; |
4204 | 4204 | ||
4205 | mutex_lock(&ftrace_lock); | 4205 | mutex_lock(&ftrace_lock); |
4206 | 4206 | ||
4207 | /* | 4207 | /* |
4208 | * Core and each module needs their own pages, as | 4208 | * Core and each module needs their own pages, as |
4209 | * modules will free them when they are removed. | 4209 | * modules will free them when they are removed. |
4210 | * Force a new page to be allocated for modules. | 4210 | * Force a new page to be allocated for modules. |
4211 | */ | 4211 | */ |
4212 | if (!mod) { | 4212 | if (!mod) { |
4213 | WARN_ON(ftrace_pages || ftrace_pages_start); | 4213 | WARN_ON(ftrace_pages || ftrace_pages_start); |
4214 | /* First initialization */ | 4214 | /* First initialization */ |
4215 | ftrace_pages = ftrace_pages_start = start_pg; | 4215 | ftrace_pages = ftrace_pages_start = start_pg; |
4216 | } else { | 4216 | } else { |
4217 | if (!ftrace_pages) | 4217 | if (!ftrace_pages) |
4218 | goto out; | 4218 | goto out; |
4219 | 4219 | ||
4220 | if (WARN_ON(ftrace_pages->next)) { | 4220 | if (WARN_ON(ftrace_pages->next)) { |
4221 | /* Hmm, we have free pages? */ | 4221 | /* Hmm, we have free pages? */ |
4222 | while (ftrace_pages->next) | 4222 | while (ftrace_pages->next) |
4223 | ftrace_pages = ftrace_pages->next; | 4223 | ftrace_pages = ftrace_pages->next; |
4224 | } | 4224 | } |
4225 | 4225 | ||
4226 | ftrace_pages->next = start_pg; | 4226 | ftrace_pages->next = start_pg; |
4227 | } | 4227 | } |
4228 | 4228 | ||
4229 | p = start; | 4229 | p = start; |
4230 | pg = start_pg; | 4230 | pg = start_pg; |
4231 | while (p < end) { | 4231 | while (p < end) { |
4232 | addr = ftrace_call_adjust(*p++); | 4232 | addr = ftrace_call_adjust(*p++); |
4233 | /* | 4233 | /* |
4234 | * Some architecture linkers will pad between | 4234 | * Some architecture linkers will pad between |
4235 | * the different mcount_loc sections of different | 4235 | * the different mcount_loc sections of different |
4236 | * object files to satisfy alignments. | 4236 | * object files to satisfy alignments. |
4237 | * Skip any NULL pointers. | 4237 | * Skip any NULL pointers. |
4238 | */ | 4238 | */ |
4239 | if (!addr) | 4239 | if (!addr) |
4240 | continue; | 4240 | continue; |
4241 | 4241 | ||
4242 | if (pg->index == pg->size) { | 4242 | if (pg->index == pg->size) { |
4243 | /* We should have allocated enough */ | 4243 | /* We should have allocated enough */ |
4244 | if (WARN_ON(!pg->next)) | 4244 | if (WARN_ON(!pg->next)) |
4245 | break; | 4245 | break; |
4246 | pg = pg->next; | 4246 | pg = pg->next; |
4247 | } | 4247 | } |
4248 | 4248 | ||
4249 | rec = &pg->records[pg->index++]; | 4249 | rec = &pg->records[pg->index++]; |
4250 | rec->ip = addr; | 4250 | rec->ip = addr; |
4251 | } | 4251 | } |
4252 | 4252 | ||
4253 | /* We should have used all pages */ | 4253 | /* We should have used all pages */ |
4254 | WARN_ON(pg->next); | 4254 | WARN_ON(pg->next); |
4255 | 4255 | ||
4256 | /* Assign the last page to ftrace_pages */ | 4256 | /* Assign the last page to ftrace_pages */ |
4257 | ftrace_pages = pg; | 4257 | ftrace_pages = pg; |
4258 | 4258 | ||
4259 | /* | 4259 | /* |
4260 | * We only need to disable interrupts on start up | 4260 | * We only need to disable interrupts on start up |
4261 | * because we are modifying code that an interrupt | 4261 | * because we are modifying code that an interrupt |
4262 | * may execute, and the modification is not atomic. | 4262 | * may execute, and the modification is not atomic. |
4263 | * But for modules, nothing runs the code we modify | 4263 | * But for modules, nothing runs the code we modify |
4264 | * until we are finished with it, and there's no | 4264 | * until we are finished with it, and there's no |
4265 | * reason to cause large interrupt latencies while we do it. | 4265 | * reason to cause large interrupt latencies while we do it. |
4266 | */ | 4266 | */ |
4267 | if (!mod) | 4267 | if (!mod) |
4268 | local_irq_save(flags); | 4268 | local_irq_save(flags); |
4269 | ftrace_update_code(mod, start_pg); | 4269 | ftrace_update_code(mod, start_pg); |
4270 | if (!mod) | 4270 | if (!mod) |
4271 | local_irq_restore(flags); | 4271 | local_irq_restore(flags); |
4272 | ret = 0; | 4272 | ret = 0; |
4273 | out: | 4273 | out: |
4274 | mutex_unlock(&ftrace_lock); | 4274 | mutex_unlock(&ftrace_lock); |
4275 | 4275 | ||
4276 | return ret; | 4276 | return ret; |
4277 | } | 4277 | } |
4278 | 4278 | ||
4279 | #ifdef CONFIG_MODULES | 4279 | #ifdef CONFIG_MODULES |
4280 | 4280 | ||
4281 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) | 4281 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) |
4282 | 4282 | ||
4283 | void ftrace_release_mod(struct module *mod) | 4283 | void ftrace_release_mod(struct module *mod) |
4284 | { | 4284 | { |
4285 | struct dyn_ftrace *rec; | 4285 | struct dyn_ftrace *rec; |
4286 | struct ftrace_page **last_pg; | 4286 | struct ftrace_page **last_pg; |
4287 | struct ftrace_page *pg; | 4287 | struct ftrace_page *pg; |
4288 | int order; | 4288 | int order; |
4289 | 4289 | ||
4290 | mutex_lock(&ftrace_lock); | 4290 | mutex_lock(&ftrace_lock); |
4291 | 4291 | ||
4292 | if (ftrace_disabled) | 4292 | if (ftrace_disabled) |
4293 | goto out_unlock; | 4293 | goto out_unlock; |
4294 | 4294 | ||
4295 | /* | 4295 | /* |
4296 | * Each module has its own ftrace_pages, remove | 4296 | * Each module has its own ftrace_pages, remove |
4297 | * them from the list. | 4297 | * them from the list. |
4298 | */ | 4298 | */ |
4299 | last_pg = &ftrace_pages_start; | 4299 | last_pg = &ftrace_pages_start; |
4300 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { | 4300 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { |
4301 | rec = &pg->records[0]; | 4301 | rec = &pg->records[0]; |
4302 | if (within_module_core(rec->ip, mod)) { | 4302 | if (within_module_core(rec->ip, mod)) { |
4303 | /* | 4303 | /* |
4304 | * As core pages are first, the first | 4304 | * As core pages are first, the first |
4305 | * page should never be a module page. | 4305 | * page should never be a module page. |
4306 | */ | 4306 | */ |
4307 | if (WARN_ON(pg == ftrace_pages_start)) | 4307 | if (WARN_ON(pg == ftrace_pages_start)) |
4308 | goto out_unlock; | 4308 | goto out_unlock; |
4309 | 4309 | ||
4310 | /* Check if we are deleting the last page */ | 4310 | /* Check if we are deleting the last page */ |
4311 | if (pg == ftrace_pages) | 4311 | if (pg == ftrace_pages) |
4312 | ftrace_pages = next_to_ftrace_page(last_pg); | 4312 | ftrace_pages = next_to_ftrace_page(last_pg); |
4313 | 4313 | ||
4314 | *last_pg = pg->next; | 4314 | *last_pg = pg->next; |
4315 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | 4315 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
4316 | free_pages((unsigned long)pg->records, order); | 4316 | free_pages((unsigned long)pg->records, order); |
4317 | kfree(pg); | 4317 | kfree(pg); |
4318 | } else | 4318 | } else |
4319 | last_pg = &pg->next; | 4319 | last_pg = &pg->next; |
4320 | } | 4320 | } |
4321 | out_unlock: | 4321 | out_unlock: |
4322 | mutex_unlock(&ftrace_lock); | 4322 | mutex_unlock(&ftrace_lock); |
4323 | } | 4323 | } |
4324 | 4324 | ||
4325 | static void ftrace_init_module(struct module *mod, | 4325 | static void ftrace_init_module(struct module *mod, |
4326 | unsigned long *start, unsigned long *end) | 4326 | unsigned long *start, unsigned long *end) |
4327 | { | 4327 | { |
4328 | if (ftrace_disabled || start == end) | 4328 | if (ftrace_disabled || start == end) |
4329 | return; | 4329 | return; |
4330 | ftrace_process_locs(mod, start, end); | 4330 | ftrace_process_locs(mod, start, end); |
4331 | } | 4331 | } |
4332 | 4332 | ||
4333 | static int ftrace_module_notify_enter(struct notifier_block *self, | 4333 | void ftrace_module_init(struct module *mod) |
4334 | unsigned long val, void *data) | ||
4335 | { | 4334 | { |
4336 | struct module *mod = data; | 4335 | ftrace_init_module(mod, mod->ftrace_callsites, |
4337 | 4336 | mod->ftrace_callsites + | |
4338 | if (val == MODULE_STATE_COMING) | 4337 | mod->num_ftrace_callsites); |
4339 | ftrace_init_module(mod, mod->ftrace_callsites, | ||
4340 | mod->ftrace_callsites + | ||
4341 | mod->num_ftrace_callsites); | ||
4342 | return 0; | ||
4343 | } | 4338 | } |
4344 | 4339 | ||
4345 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4340 | static int ftrace_module_notify_exit(struct notifier_block *self, |
4346 | unsigned long val, void *data) | 4341 | unsigned long val, void *data) |
4347 | { | 4342 | { |
4348 | struct module *mod = data; | 4343 | struct module *mod = data; |
4349 | 4344 | ||
4350 | if (val == MODULE_STATE_GOING) | 4345 | if (val == MODULE_STATE_GOING) |
4351 | ftrace_release_mod(mod); | 4346 | ftrace_release_mod(mod); |
4352 | 4347 | ||
4353 | return 0; | 4348 | return 0; |
4354 | } | 4349 | } |
4355 | #else | 4350 | #else |
4356 | static int ftrace_module_notify_enter(struct notifier_block *self, | ||
4357 | unsigned long val, void *data) | ||
4358 | { | ||
4359 | return 0; | ||
4360 | } | ||
4361 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4351 | static int ftrace_module_notify_exit(struct notifier_block *self, |
4362 | unsigned long val, void *data) | 4352 | unsigned long val, void *data) |
4363 | { | 4353 | { |
4364 | return 0; | 4354 | return 0; |
4365 | } | 4355 | } |
4366 | #endif /* CONFIG_MODULES */ | 4356 | #endif /* CONFIG_MODULES */ |
4367 | 4357 | ||
4368 | struct notifier_block ftrace_module_enter_nb = { | ||
4369 | .notifier_call = ftrace_module_notify_enter, | ||
4370 | .priority = INT_MAX, /* Run before anything that can use kprobes */ | ||
4371 | }; | ||
4372 | |||
4373 | struct notifier_block ftrace_module_exit_nb = { | 4358 | struct notifier_block ftrace_module_exit_nb = { |
4374 | .notifier_call = ftrace_module_notify_exit, | 4359 | .notifier_call = ftrace_module_notify_exit, |
4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
4376 | }; | 4361 | }; |
4377 | 4362 | ||
4378 | void __init ftrace_init(void) | 4363 | void __init ftrace_init(void) |
4379 | { | 4364 | { |
4380 | extern unsigned long __start_mcount_loc[]; | 4365 | extern unsigned long __start_mcount_loc[]; |
4381 | extern unsigned long __stop_mcount_loc[]; | 4366 | extern unsigned long __stop_mcount_loc[]; |
4382 | unsigned long count, flags; | 4367 | unsigned long count, flags; |
4383 | int ret; | 4368 | int ret; |
4384 | 4369 | ||
4385 | local_irq_save(flags); | 4370 | local_irq_save(flags); |
4386 | ret = ftrace_dyn_arch_init(); | 4371 | ret = ftrace_dyn_arch_init(); |
4387 | local_irq_restore(flags); | 4372 | local_irq_restore(flags); |
4388 | if (ret) | 4373 | if (ret) |
4389 | goto failed; | 4374 | goto failed; |
4390 | 4375 | ||
4391 | count = __stop_mcount_loc - __start_mcount_loc; | 4376 | count = __stop_mcount_loc - __start_mcount_loc; |
4392 | if (!count) { | 4377 | if (!count) { |
4393 | pr_info("ftrace: No functions to be traced?\n"); | 4378 | pr_info("ftrace: No functions to be traced?\n"); |
4394 | goto failed; | 4379 | goto failed; |
4395 | } | 4380 | } |
4396 | 4381 | ||
4397 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | 4382 | pr_info("ftrace: allocating %ld entries in %ld pages\n", |
4398 | count, count / ENTRIES_PER_PAGE + 1); | 4383 | count, count / ENTRIES_PER_PAGE + 1); |
4399 | 4384 | ||
4400 | last_ftrace_enabled = ftrace_enabled = 1; | 4385 | last_ftrace_enabled = ftrace_enabled = 1; |
4401 | 4386 | ||
4402 | ret = ftrace_process_locs(NULL, | 4387 | ret = ftrace_process_locs(NULL, |
4403 | __start_mcount_loc, | 4388 | __start_mcount_loc, |
4404 | __stop_mcount_loc); | 4389 | __stop_mcount_loc); |
4405 | |||
4406 | ret = register_module_notifier(&ftrace_module_enter_nb); | ||
4407 | if (ret) | ||
4408 | pr_warning("Failed to register trace ftrace module enter notifier\n"); | ||
4409 | 4390 | ||
4410 | ret = register_module_notifier(&ftrace_module_exit_nb); | 4391 | ret = register_module_notifier(&ftrace_module_exit_nb); |
4411 | if (ret) | 4392 | if (ret) |
4412 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | 4393 | pr_warning("Failed to register trace ftrace module exit notifier\n"); |
4413 | 4394 | ||
4414 | set_ftrace_early_filters(); | 4395 | set_ftrace_early_filters(); |
4415 | 4396 | ||
4416 | return; | 4397 | return; |
4417 | failed: | 4398 | failed: |
4418 | ftrace_disabled = 1; | 4399 | ftrace_disabled = 1; |
4419 | } | 4400 | } |
4420 | 4401 | ||
4421 | #else | 4402 | #else |
4422 | 4403 | ||
4423 | static struct ftrace_ops global_ops = { | 4404 | static struct ftrace_ops global_ops = { |
4424 | .func = ftrace_stub, | 4405 | .func = ftrace_stub, |
4425 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4406 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
4426 | INIT_REGEX_LOCK(global_ops) | 4407 | INIT_REGEX_LOCK(global_ops) |
4427 | }; | 4408 | }; |
4428 | 4409 | ||
4429 | static int __init ftrace_nodyn_init(void) | 4410 | static int __init ftrace_nodyn_init(void) |
4430 | { | 4411 | { |
4431 | ftrace_enabled = 1; | 4412 | ftrace_enabled = 1; |
4432 | return 0; | 4413 | return 0; |
4433 | } | 4414 | } |
4434 | core_initcall(ftrace_nodyn_init); | 4415 | core_initcall(ftrace_nodyn_init); |
4435 | 4416 | ||
4436 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 4417 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
4437 | static inline void ftrace_startup_enable(int command) { } | 4418 | static inline void ftrace_startup_enable(int command) { } |
4438 | /* Keep as macros so we do not need to define the commands */ | 4419 | /* Keep as macros so we do not need to define the commands */ |
4439 | # define ftrace_startup(ops, command) \ | 4420 | # define ftrace_startup(ops, command) \ |
4440 | ({ \ | 4421 | ({ \ |
4441 | int ___ret = __register_ftrace_function(ops); \ | 4422 | int ___ret = __register_ftrace_function(ops); \ |
4442 | if (!___ret) \ | 4423 | if (!___ret) \ |
4443 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | 4424 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
4444 | ___ret; \ | 4425 | ___ret; \ |
4445 | }) | 4426 | }) |
4446 | # define ftrace_shutdown(ops, command) \ | 4427 | # define ftrace_shutdown(ops, command) \ |
4447 | ({ \ | 4428 | ({ \ |
4448 | int ___ret = __unregister_ftrace_function(ops); \ | 4429 | int ___ret = __unregister_ftrace_function(ops); \ |
4449 | if (!___ret) \ | 4430 | if (!___ret) \ |
4450 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | 4431 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ |
4451 | ___ret; \ | 4432 | ___ret; \ |
4452 | }) | 4433 | }) |
4453 | 4434 | ||
4454 | # define ftrace_startup_sysctl() do { } while (0) | 4435 | # define ftrace_startup_sysctl() do { } while (0) |
4455 | # define ftrace_shutdown_sysctl() do { } while (0) | 4436 | # define ftrace_shutdown_sysctl() do { } while (0) |
4456 | 4437 | ||
4457 | static inline int | 4438 | static inline int |
4458 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 4439 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
4459 | { | 4440 | { |
4460 | return 1; | 4441 | return 1; |
4461 | } | 4442 | } |
4462 | 4443 | ||
4463 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 4444 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
4464 | 4445 | ||
4465 | static void | 4446 | static void |
4466 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | 4447 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, |
4467 | struct ftrace_ops *op, struct pt_regs *regs) | 4448 | struct ftrace_ops *op, struct pt_regs *regs) |
4468 | { | 4449 | { |
4469 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) | 4450 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) |
4470 | return; | 4451 | return; |
4471 | 4452 | ||
4472 | /* | 4453 | /* |
4473 | * Some of the ops may be dynamically allocated, | 4454 | * Some of the ops may be dynamically allocated, |
4474 | * they must be freed after a synchronize_sched(). | 4455 | * they must be freed after a synchronize_sched(). |
4475 | */ | 4456 | */ |
4476 | preempt_disable_notrace(); | 4457 | preempt_disable_notrace(); |
4477 | trace_recursion_set(TRACE_CONTROL_BIT); | 4458 | trace_recursion_set(TRACE_CONTROL_BIT); |
4478 | 4459 | ||
4479 | /* | 4460 | /* |
4480 | * Control funcs (perf) uses RCU. Only trace if | 4461 | * Control funcs (perf) uses RCU. Only trace if |
4481 | * RCU is currently active. | 4462 | * RCU is currently active. |
4482 | */ | 4463 | */ |
4483 | if (!rcu_is_watching()) | 4464 | if (!rcu_is_watching()) |
4484 | goto out; | 4465 | goto out; |
4485 | 4466 | ||
4486 | do_for_each_ftrace_op(op, ftrace_control_list) { | 4467 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4487 | if (!(op->flags & FTRACE_OPS_FL_STUB) && | 4468 | if (!(op->flags & FTRACE_OPS_FL_STUB) && |
4488 | !ftrace_function_local_disabled(op) && | 4469 | !ftrace_function_local_disabled(op) && |
4489 | ftrace_ops_test(op, ip, regs)) | 4470 | ftrace_ops_test(op, ip, regs)) |
4490 | op->func(ip, parent_ip, op, regs); | 4471 | op->func(ip, parent_ip, op, regs); |
4491 | } while_for_each_ftrace_op(op); | 4472 | } while_for_each_ftrace_op(op); |
4492 | out: | 4473 | out: |
4493 | trace_recursion_clear(TRACE_CONTROL_BIT); | 4474 | trace_recursion_clear(TRACE_CONTROL_BIT); |
4494 | preempt_enable_notrace(); | 4475 | preempt_enable_notrace(); |
4495 | } | 4476 | } |
4496 | 4477 | ||
4497 | static struct ftrace_ops control_ops = { | 4478 | static struct ftrace_ops control_ops = { |
4498 | .func = ftrace_ops_control_func, | 4479 | .func = ftrace_ops_control_func, |
4499 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4480 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
4500 | INIT_REGEX_LOCK(control_ops) | 4481 | INIT_REGEX_LOCK(control_ops) |
4501 | }; | 4482 | }; |
4502 | 4483 | ||
4503 | static inline void | 4484 | static inline void |
4504 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 4485 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
4505 | struct ftrace_ops *ignored, struct pt_regs *regs) | 4486 | struct ftrace_ops *ignored, struct pt_regs *regs) |
4506 | { | 4487 | { |
4507 | struct ftrace_ops *op; | 4488 | struct ftrace_ops *op; |
4508 | int bit; | 4489 | int bit; |
4509 | 4490 | ||
4510 | if (function_trace_stop) | 4491 | if (function_trace_stop) |
4511 | return; | 4492 | return; |
4512 | 4493 | ||
4513 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | 4494 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
4514 | if (bit < 0) | 4495 | if (bit < 0) |
4515 | return; | 4496 | return; |
4516 | 4497 | ||
4517 | /* | 4498 | /* |
4518 | * Some of the ops may be dynamically allocated, | 4499 | * Some of the ops may be dynamically allocated, |
4519 | * they must be freed after a synchronize_sched(). | 4500 | * they must be freed after a synchronize_sched(). |
4520 | */ | 4501 | */ |
4521 | preempt_disable_notrace(); | 4502 | preempt_disable_notrace(); |
4522 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 4503 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4523 | if (ftrace_ops_test(op, ip, regs)) | 4504 | if (ftrace_ops_test(op, ip, regs)) |
4524 | op->func(ip, parent_ip, op, regs); | 4505 | op->func(ip, parent_ip, op, regs); |
4525 | } while_for_each_ftrace_op(op); | 4506 | } while_for_each_ftrace_op(op); |
4526 | preempt_enable_notrace(); | 4507 | preempt_enable_notrace(); |
4527 | trace_clear_recursion(bit); | 4508 | trace_clear_recursion(bit); |
4528 | } | 4509 | } |
4529 | 4510 | ||
4530 | /* | 4511 | /* |
4531 | * Some archs only support passing ip and parent_ip. Even though | 4512 | * Some archs only support passing ip and parent_ip. Even though |
4532 | * the list function ignores the op parameter, we do not want any | 4513 | * the list function ignores the op parameter, we do not want any |
4533 | * C side effects, where a function is called without the caller | 4514 | * C side effects, where a function is called without the caller |
4534 | * sending a third parameter. | 4515 | * sending a third parameter. |
4535 | * Archs are to support both the regs and ftrace_ops at the same time. | 4516 | * Archs are to support both the regs and ftrace_ops at the same time. |
4536 | * If they support ftrace_ops, it is assumed they support regs. | 4517 | * If they support ftrace_ops, it is assumed they support regs. |
4537 | * If call backs want to use regs, they must either check for regs | 4518 | * If call backs want to use regs, they must either check for regs |
4538 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. | 4519 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
4539 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. | 4520 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. |
4540 | * An architecture can pass partial regs with ftrace_ops and still | 4521 | * An architecture can pass partial regs with ftrace_ops and still |
4541 | * set the ARCH_SUPPORT_FTARCE_OPS. | 4522 | * set the ARCH_SUPPORT_FTARCE_OPS. |
4542 | */ | 4523 | */ |
4543 | #if ARCH_SUPPORTS_FTRACE_OPS | 4524 | #if ARCH_SUPPORTS_FTRACE_OPS |
4544 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 4525 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
4545 | struct ftrace_ops *op, struct pt_regs *regs) | 4526 | struct ftrace_ops *op, struct pt_regs *regs) |
4546 | { | 4527 | { |
4547 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); | 4528 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); |
4548 | } | 4529 | } |
4549 | #else | 4530 | #else |
4550 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | 4531 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) |
4551 | { | 4532 | { |
4552 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); | 4533 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
4553 | } | 4534 | } |
4554 | #endif | 4535 | #endif |
4555 | 4536 | ||
4556 | static void clear_ftrace_swapper(void) | 4537 | static void clear_ftrace_swapper(void) |
4557 | { | 4538 | { |
4558 | struct task_struct *p; | 4539 | struct task_struct *p; |
4559 | int cpu; | 4540 | int cpu; |
4560 | 4541 | ||
4561 | get_online_cpus(); | 4542 | get_online_cpus(); |
4562 | for_each_online_cpu(cpu) { | 4543 | for_each_online_cpu(cpu) { |
4563 | p = idle_task(cpu); | 4544 | p = idle_task(cpu); |
4564 | clear_tsk_trace_trace(p); | 4545 | clear_tsk_trace_trace(p); |
4565 | } | 4546 | } |
4566 | put_online_cpus(); | 4547 | put_online_cpus(); |
4567 | } | 4548 | } |
4568 | 4549 | ||
4569 | static void set_ftrace_swapper(void) | 4550 | static void set_ftrace_swapper(void) |
4570 | { | 4551 | { |
4571 | struct task_struct *p; | 4552 | struct task_struct *p; |
4572 | int cpu; | 4553 | int cpu; |
4573 | 4554 | ||
4574 | get_online_cpus(); | 4555 | get_online_cpus(); |
4575 | for_each_online_cpu(cpu) { | 4556 | for_each_online_cpu(cpu) { |
4576 | p = idle_task(cpu); | 4557 | p = idle_task(cpu); |
4577 | set_tsk_trace_trace(p); | 4558 | set_tsk_trace_trace(p); |
4578 | } | 4559 | } |
4579 | put_online_cpus(); | 4560 | put_online_cpus(); |
4580 | } | 4561 | } |
4581 | 4562 | ||
4582 | static void clear_ftrace_pid(struct pid *pid) | 4563 | static void clear_ftrace_pid(struct pid *pid) |
4583 | { | 4564 | { |
4584 | struct task_struct *p; | 4565 | struct task_struct *p; |
4585 | 4566 | ||
4586 | rcu_read_lock(); | 4567 | rcu_read_lock(); |
4587 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 4568 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
4588 | clear_tsk_trace_trace(p); | 4569 | clear_tsk_trace_trace(p); |
4589 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 4570 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
4590 | rcu_read_unlock(); | 4571 | rcu_read_unlock(); |
4591 | 4572 | ||
4592 | put_pid(pid); | 4573 | put_pid(pid); |
4593 | } | 4574 | } |
4594 | 4575 | ||
4595 | static void set_ftrace_pid(struct pid *pid) | 4576 | static void set_ftrace_pid(struct pid *pid) |
4596 | { | 4577 | { |
4597 | struct task_struct *p; | 4578 | struct task_struct *p; |
4598 | 4579 | ||
4599 | rcu_read_lock(); | 4580 | rcu_read_lock(); |
4600 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 4581 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
4601 | set_tsk_trace_trace(p); | 4582 | set_tsk_trace_trace(p); |
4602 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 4583 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
4603 | rcu_read_unlock(); | 4584 | rcu_read_unlock(); |
4604 | } | 4585 | } |
4605 | 4586 | ||
4606 | static void clear_ftrace_pid_task(struct pid *pid) | 4587 | static void clear_ftrace_pid_task(struct pid *pid) |
4607 | { | 4588 | { |
4608 | if (pid == ftrace_swapper_pid) | 4589 | if (pid == ftrace_swapper_pid) |
4609 | clear_ftrace_swapper(); | 4590 | clear_ftrace_swapper(); |
4610 | else | 4591 | else |
4611 | clear_ftrace_pid(pid); | 4592 | clear_ftrace_pid(pid); |
4612 | } | 4593 | } |
4613 | 4594 | ||
4614 | static void set_ftrace_pid_task(struct pid *pid) | 4595 | static void set_ftrace_pid_task(struct pid *pid) |
4615 | { | 4596 | { |
4616 | if (pid == ftrace_swapper_pid) | 4597 | if (pid == ftrace_swapper_pid) |
4617 | set_ftrace_swapper(); | 4598 | set_ftrace_swapper(); |
4618 | else | 4599 | else |
4619 | set_ftrace_pid(pid); | 4600 | set_ftrace_pid(pid); |
4620 | } | 4601 | } |
4621 | 4602 | ||
4622 | static int ftrace_pid_add(int p) | 4603 | static int ftrace_pid_add(int p) |
4623 | { | 4604 | { |
4624 | struct pid *pid; | 4605 | struct pid *pid; |
4625 | struct ftrace_pid *fpid; | 4606 | struct ftrace_pid *fpid; |
4626 | int ret = -EINVAL; | 4607 | int ret = -EINVAL; |
4627 | 4608 | ||
4628 | mutex_lock(&ftrace_lock); | 4609 | mutex_lock(&ftrace_lock); |
4629 | 4610 | ||
4630 | if (!p) | 4611 | if (!p) |
4631 | pid = ftrace_swapper_pid; | 4612 | pid = ftrace_swapper_pid; |
4632 | else | 4613 | else |
4633 | pid = find_get_pid(p); | 4614 | pid = find_get_pid(p); |
4634 | 4615 | ||
4635 | if (!pid) | 4616 | if (!pid) |
4636 | goto out; | 4617 | goto out; |
4637 | 4618 | ||
4638 | ret = 0; | 4619 | ret = 0; |
4639 | 4620 | ||
4640 | list_for_each_entry(fpid, &ftrace_pids, list) | 4621 | list_for_each_entry(fpid, &ftrace_pids, list) |
4641 | if (fpid->pid == pid) | 4622 | if (fpid->pid == pid) |
4642 | goto out_put; | 4623 | goto out_put; |
4643 | 4624 | ||
4644 | ret = -ENOMEM; | 4625 | ret = -ENOMEM; |
4645 | 4626 | ||
4646 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); | 4627 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); |
4647 | if (!fpid) | 4628 | if (!fpid) |
4648 | goto out_put; | 4629 | goto out_put; |
4649 | 4630 | ||
4650 | list_add(&fpid->list, &ftrace_pids); | 4631 | list_add(&fpid->list, &ftrace_pids); |
4651 | fpid->pid = pid; | 4632 | fpid->pid = pid; |
4652 | 4633 | ||
4653 | set_ftrace_pid_task(pid); | 4634 | set_ftrace_pid_task(pid); |
4654 | 4635 | ||
4655 | ftrace_update_pid_func(); | 4636 | ftrace_update_pid_func(); |
4656 | ftrace_startup_enable(0); | 4637 | ftrace_startup_enable(0); |
4657 | 4638 | ||
4658 | mutex_unlock(&ftrace_lock); | 4639 | mutex_unlock(&ftrace_lock); |
4659 | return 0; | 4640 | return 0; |
4660 | 4641 | ||
4661 | out_put: | 4642 | out_put: |
4662 | if (pid != ftrace_swapper_pid) | 4643 | if (pid != ftrace_swapper_pid) |
4663 | put_pid(pid); | 4644 | put_pid(pid); |
4664 | 4645 | ||
4665 | out: | 4646 | out: |
4666 | mutex_unlock(&ftrace_lock); | 4647 | mutex_unlock(&ftrace_lock); |
4667 | return ret; | 4648 | return ret; |
4668 | } | 4649 | } |
4669 | 4650 | ||
4670 | static void ftrace_pid_reset(void) | 4651 | static void ftrace_pid_reset(void) |
4671 | { | 4652 | { |
4672 | struct ftrace_pid *fpid, *safe; | 4653 | struct ftrace_pid *fpid, *safe; |
4673 | 4654 | ||
4674 | mutex_lock(&ftrace_lock); | 4655 | mutex_lock(&ftrace_lock); |
4675 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { | 4656 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { |
4676 | struct pid *pid = fpid->pid; | 4657 | struct pid *pid = fpid->pid; |
4677 | 4658 | ||
4678 | clear_ftrace_pid_task(pid); | 4659 | clear_ftrace_pid_task(pid); |
4679 | 4660 | ||
4680 | list_del(&fpid->list); | 4661 | list_del(&fpid->list); |
4681 | kfree(fpid); | 4662 | kfree(fpid); |
4682 | } | 4663 | } |
4683 | 4664 | ||
4684 | ftrace_update_pid_func(); | 4665 | ftrace_update_pid_func(); |
4685 | ftrace_startup_enable(0); | 4666 | ftrace_startup_enable(0); |
4686 | 4667 | ||
4687 | mutex_unlock(&ftrace_lock); | 4668 | mutex_unlock(&ftrace_lock); |
4688 | } | 4669 | } |
4689 | 4670 | ||
4690 | static void *fpid_start(struct seq_file *m, loff_t *pos) | 4671 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
4691 | { | 4672 | { |
4692 | mutex_lock(&ftrace_lock); | 4673 | mutex_lock(&ftrace_lock); |
4693 | 4674 | ||
4694 | if (list_empty(&ftrace_pids) && (!*pos)) | 4675 | if (list_empty(&ftrace_pids) && (!*pos)) |
4695 | return (void *) 1; | 4676 | return (void *) 1; |
4696 | 4677 | ||
4697 | return seq_list_start(&ftrace_pids, *pos); | 4678 | return seq_list_start(&ftrace_pids, *pos); |
4698 | } | 4679 | } |
4699 | 4680 | ||
4700 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | 4681 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) |
4701 | { | 4682 | { |
4702 | if (v == (void *)1) | 4683 | if (v == (void *)1) |
4703 | return NULL; | 4684 | return NULL; |
4704 | 4685 | ||
4705 | return seq_list_next(v, &ftrace_pids, pos); | 4686 | return seq_list_next(v, &ftrace_pids, pos); |
4706 | } | 4687 | } |
4707 | 4688 | ||
4708 | static void fpid_stop(struct seq_file *m, void *p) | 4689 | static void fpid_stop(struct seq_file *m, void *p) |
4709 | { | 4690 | { |
4710 | mutex_unlock(&ftrace_lock); | 4691 | mutex_unlock(&ftrace_lock); |
4711 | } | 4692 | } |
4712 | 4693 | ||
4713 | static int fpid_show(struct seq_file *m, void *v) | 4694 | static int fpid_show(struct seq_file *m, void *v) |
4714 | { | 4695 | { |
4715 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | 4696 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); |
4716 | 4697 | ||
4717 | if (v == (void *)1) { | 4698 | if (v == (void *)1) { |
4718 | seq_printf(m, "no pid\n"); | 4699 | seq_printf(m, "no pid\n"); |
4719 | return 0; | 4700 | return 0; |
4720 | } | 4701 | } |
4721 | 4702 | ||
4722 | if (fpid->pid == ftrace_swapper_pid) | 4703 | if (fpid->pid == ftrace_swapper_pid) |
4723 | seq_printf(m, "swapper tasks\n"); | 4704 | seq_printf(m, "swapper tasks\n"); |
4724 | else | 4705 | else |
4725 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | 4706 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); |
4726 | 4707 | ||
4727 | return 0; | 4708 | return 0; |
4728 | } | 4709 | } |
4729 | 4710 | ||
4730 | static const struct seq_operations ftrace_pid_sops = { | 4711 | static const struct seq_operations ftrace_pid_sops = { |
4731 | .start = fpid_start, | 4712 | .start = fpid_start, |
4732 | .next = fpid_next, | 4713 | .next = fpid_next, |
4733 | .stop = fpid_stop, | 4714 | .stop = fpid_stop, |
4734 | .show = fpid_show, | 4715 | .show = fpid_show, |
4735 | }; | 4716 | }; |
4736 | 4717 | ||
4737 | static int | 4718 | static int |
4738 | ftrace_pid_open(struct inode *inode, struct file *file) | 4719 | ftrace_pid_open(struct inode *inode, struct file *file) |
4739 | { | 4720 | { |
4740 | int ret = 0; | 4721 | int ret = 0; |
4741 | 4722 | ||
4742 | if ((file->f_mode & FMODE_WRITE) && | 4723 | if ((file->f_mode & FMODE_WRITE) && |
4743 | (file->f_flags & O_TRUNC)) | 4724 | (file->f_flags & O_TRUNC)) |
4744 | ftrace_pid_reset(); | 4725 | ftrace_pid_reset(); |
4745 | 4726 | ||
4746 | if (file->f_mode & FMODE_READ) | 4727 | if (file->f_mode & FMODE_READ) |
4747 | ret = seq_open(file, &ftrace_pid_sops); | 4728 | ret = seq_open(file, &ftrace_pid_sops); |
4748 | 4729 | ||
4749 | return ret; | 4730 | return ret; |
4750 | } | 4731 | } |
4751 | 4732 | ||
4752 | static ssize_t | 4733 | static ssize_t |
4753 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | 4734 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
4754 | size_t cnt, loff_t *ppos) | 4735 | size_t cnt, loff_t *ppos) |
4755 | { | 4736 | { |
4756 | char buf[64], *tmp; | 4737 | char buf[64], *tmp; |
4757 | long val; | 4738 | long val; |
4758 | int ret; | 4739 | int ret; |
4759 | 4740 | ||
4760 | if (cnt >= sizeof(buf)) | 4741 | if (cnt >= sizeof(buf)) |
4761 | return -EINVAL; | 4742 | return -EINVAL; |
4762 | 4743 | ||
4763 | if (copy_from_user(&buf, ubuf, cnt)) | 4744 | if (copy_from_user(&buf, ubuf, cnt)) |
4764 | return -EFAULT; | 4745 | return -EFAULT; |
4765 | 4746 | ||
4766 | buf[cnt] = 0; | 4747 | buf[cnt] = 0; |
4767 | 4748 | ||
4768 | /* | 4749 | /* |
4769 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" | 4750 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" |
4770 | * to clean the filter quietly. | 4751 | * to clean the filter quietly. |
4771 | */ | 4752 | */ |
4772 | tmp = strstrip(buf); | 4753 | tmp = strstrip(buf); |
4773 | if (strlen(tmp) == 0) | 4754 | if (strlen(tmp) == 0) |
4774 | return 1; | 4755 | return 1; |
4775 | 4756 | ||
4776 | ret = kstrtol(tmp, 10, &val); | 4757 | ret = kstrtol(tmp, 10, &val); |
4777 | if (ret < 0) | 4758 | if (ret < 0) |
4778 | return ret; | 4759 | return ret; |
4779 | 4760 | ||
4780 | ret = ftrace_pid_add(val); | 4761 | ret = ftrace_pid_add(val); |
4781 | 4762 | ||
4782 | return ret ? ret : cnt; | 4763 | return ret ? ret : cnt; |
4783 | } | 4764 | } |
4784 | 4765 | ||
4785 | static int | 4766 | static int |
4786 | ftrace_pid_release(struct inode *inode, struct file *file) | 4767 | ftrace_pid_release(struct inode *inode, struct file *file) |
4787 | { | 4768 | { |
4788 | if (file->f_mode & FMODE_READ) | 4769 | if (file->f_mode & FMODE_READ) |
4789 | seq_release(inode, file); | 4770 | seq_release(inode, file); |
4790 | 4771 | ||
4791 | return 0; | 4772 | return 0; |
4792 | } | 4773 | } |
4793 | 4774 | ||
4794 | static const struct file_operations ftrace_pid_fops = { | 4775 | static const struct file_operations ftrace_pid_fops = { |
4795 | .open = ftrace_pid_open, | 4776 | .open = ftrace_pid_open, |
4796 | .write = ftrace_pid_write, | 4777 | .write = ftrace_pid_write, |
4797 | .read = seq_read, | 4778 | .read = seq_read, |
4798 | .llseek = tracing_lseek, | 4779 | .llseek = tracing_lseek, |
4799 | .release = ftrace_pid_release, | 4780 | .release = ftrace_pid_release, |
4800 | }; | 4781 | }; |
4801 | 4782 | ||
4802 | static __init int ftrace_init_debugfs(void) | 4783 | static __init int ftrace_init_debugfs(void) |
4803 | { | 4784 | { |
4804 | struct dentry *d_tracer; | 4785 | struct dentry *d_tracer; |
4805 | 4786 | ||
4806 | d_tracer = tracing_init_dentry(); | 4787 | d_tracer = tracing_init_dentry(); |
4807 | if (!d_tracer) | 4788 | if (!d_tracer) |
4808 | return 0; | 4789 | return 0; |
4809 | 4790 | ||
4810 | ftrace_init_dyn_debugfs(d_tracer); | 4791 | ftrace_init_dyn_debugfs(d_tracer); |
4811 | 4792 | ||
4812 | trace_create_file("set_ftrace_pid", 0644, d_tracer, | 4793 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
4813 | NULL, &ftrace_pid_fops); | 4794 | NULL, &ftrace_pid_fops); |
4814 | 4795 | ||
4815 | ftrace_profile_debugfs(d_tracer); | 4796 | ftrace_profile_debugfs(d_tracer); |
4816 | 4797 | ||
4817 | return 0; | 4798 | return 0; |
4818 | } | 4799 | } |
4819 | fs_initcall(ftrace_init_debugfs); | 4800 | fs_initcall(ftrace_init_debugfs); |
4820 | 4801 | ||
4821 | /** | 4802 | /** |
4822 | * ftrace_kill - kill ftrace | 4803 | * ftrace_kill - kill ftrace |
4823 | * | 4804 | * |
4824 | * This function should be used by panic code. It stops ftrace | 4805 | * This function should be used by panic code. It stops ftrace |
4825 | * but in a not so nice way. If you need to simply kill ftrace | 4806 | * but in a not so nice way. If you need to simply kill ftrace |
4826 | * from a non-atomic section, use ftrace_kill. | 4807 | * from a non-atomic section, use ftrace_kill. |
4827 | */ | 4808 | */ |
4828 | void ftrace_kill(void) | 4809 | void ftrace_kill(void) |
4829 | { | 4810 | { |
4830 | ftrace_disabled = 1; | 4811 | ftrace_disabled = 1; |
4831 | ftrace_enabled = 0; | 4812 | ftrace_enabled = 0; |
4832 | clear_ftrace_function(); | 4813 | clear_ftrace_function(); |
4833 | } | 4814 | } |
4834 | 4815 | ||
4835 | /** | 4816 | /** |
4836 | * Test if ftrace is dead or not. | 4817 | * Test if ftrace is dead or not. |
4837 | */ | 4818 | */ |
4838 | int ftrace_is_dead(void) | 4819 | int ftrace_is_dead(void) |
4839 | { | 4820 | { |
4840 | return ftrace_disabled; | 4821 | return ftrace_disabled; |
4841 | } | 4822 | } |
4842 | 4823 | ||
4843 | /** | 4824 | /** |
4844 | * register_ftrace_function - register a function for profiling | 4825 | * register_ftrace_function - register a function for profiling |
4845 | * @ops - ops structure that holds the function for profiling. | 4826 | * @ops - ops structure that holds the function for profiling. |
4846 | * | 4827 | * |
4847 | * Register a function to be called by all functions in the | 4828 | * Register a function to be called by all functions in the |
4848 | * kernel. | 4829 | * kernel. |
4849 | * | 4830 | * |
4850 | * Note: @ops->func and all the functions it calls must be labeled | 4831 | * Note: @ops->func and all the functions it calls must be labeled |
4851 | * with "notrace", otherwise it will go into a | 4832 | * with "notrace", otherwise it will go into a |
4852 | * recursive loop. | 4833 | * recursive loop. |
4853 | */ | 4834 | */ |
4854 | int register_ftrace_function(struct ftrace_ops *ops) | 4835 | int register_ftrace_function(struct ftrace_ops *ops) |
4855 | { | 4836 | { |
4856 | int ret = -1; | 4837 | int ret = -1; |
4857 | 4838 | ||
4858 | ftrace_ops_init(ops); | 4839 | ftrace_ops_init(ops); |
4859 | 4840 | ||
4860 | mutex_lock(&ftrace_lock); | 4841 | mutex_lock(&ftrace_lock); |
4861 | 4842 | ||
4862 | ret = ftrace_startup(ops, 0); | 4843 | ret = ftrace_startup(ops, 0); |
4863 | 4844 | ||
4864 | mutex_unlock(&ftrace_lock); | 4845 | mutex_unlock(&ftrace_lock); |
4865 | 4846 | ||
4866 | return ret; | 4847 | return ret; |
4867 | } | 4848 | } |
4868 | EXPORT_SYMBOL_GPL(register_ftrace_function); | 4849 | EXPORT_SYMBOL_GPL(register_ftrace_function); |
4869 | 4850 | ||
4870 | /** | 4851 | /** |
4871 | * unregister_ftrace_function - unregister a function for profiling. | 4852 | * unregister_ftrace_function - unregister a function for profiling. |
4872 | * @ops - ops structure that holds the function to unregister | 4853 | * @ops - ops structure that holds the function to unregister |
4873 | * | 4854 | * |
4874 | * Unregister a function that was added to be called by ftrace profiling. | 4855 | * Unregister a function that was added to be called by ftrace profiling. |
4875 | */ | 4856 | */ |
4876 | int unregister_ftrace_function(struct ftrace_ops *ops) | 4857 | int unregister_ftrace_function(struct ftrace_ops *ops) |
4877 | { | 4858 | { |
4878 | int ret; | 4859 | int ret; |
4879 | 4860 | ||
4880 | mutex_lock(&ftrace_lock); | 4861 | mutex_lock(&ftrace_lock); |
4881 | ret = ftrace_shutdown(ops, 0); | 4862 | ret = ftrace_shutdown(ops, 0); |
4882 | mutex_unlock(&ftrace_lock); | 4863 | mutex_unlock(&ftrace_lock); |
4883 | 4864 | ||
4884 | return ret; | 4865 | return ret; |
4885 | } | 4866 | } |
4886 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); | 4867 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
4887 | 4868 | ||
4888 | int | 4869 | int |
4889 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 4870 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
4890 | void __user *buffer, size_t *lenp, | 4871 | void __user *buffer, size_t *lenp, |
4891 | loff_t *ppos) | 4872 | loff_t *ppos) |
4892 | { | 4873 | { |
4893 | int ret = -ENODEV; | 4874 | int ret = -ENODEV; |
4894 | 4875 | ||
4895 | mutex_lock(&ftrace_lock); | 4876 | mutex_lock(&ftrace_lock); |
4896 | 4877 | ||
4897 | if (unlikely(ftrace_disabled)) | 4878 | if (unlikely(ftrace_disabled)) |
4898 | goto out; | 4879 | goto out; |
4899 | 4880 | ||
4900 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | 4881 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
4901 | 4882 | ||
4902 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 4883 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
4903 | goto out; | 4884 | goto out; |
4904 | 4885 | ||
4905 | last_ftrace_enabled = !!ftrace_enabled; | 4886 | last_ftrace_enabled = !!ftrace_enabled; |
4906 | 4887 | ||
4907 | if (ftrace_enabled) { | 4888 | if (ftrace_enabled) { |
4908 | 4889 | ||
4909 | ftrace_startup_sysctl(); | 4890 | ftrace_startup_sysctl(); |
4910 | 4891 | ||
4911 | /* we are starting ftrace again */ | 4892 | /* we are starting ftrace again */ |
4912 | if (ftrace_ops_list != &ftrace_list_end) | 4893 | if (ftrace_ops_list != &ftrace_list_end) |
4913 | update_ftrace_function(); | 4894 | update_ftrace_function(); |
4914 | 4895 | ||
4915 | } else { | 4896 | } else { |
4916 | /* stopping ftrace calls (just send to ftrace_stub) */ | 4897 | /* stopping ftrace calls (just send to ftrace_stub) */ |
4917 | ftrace_trace_function = ftrace_stub; | 4898 | ftrace_trace_function = ftrace_stub; |
4918 | 4899 | ||
4919 | ftrace_shutdown_sysctl(); | 4900 | ftrace_shutdown_sysctl(); |
4920 | } | 4901 | } |
4921 | 4902 | ||
4922 | out: | 4903 | out: |
4923 | mutex_unlock(&ftrace_lock); | 4904 | mutex_unlock(&ftrace_lock); |
4924 | return ret; | 4905 | return ret; |
4925 | } | 4906 | } |
4926 | 4907 | ||
4927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4908 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
4928 | 4909 | ||
4929 | static int ftrace_graph_active; | 4910 | static int ftrace_graph_active; |
4930 | static struct notifier_block ftrace_suspend_notifier; | 4911 | static struct notifier_block ftrace_suspend_notifier; |
4931 | 4912 | ||
4932 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 4913 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
4933 | { | 4914 | { |
4934 | return 0; | 4915 | return 0; |
4935 | } | 4916 | } |
4936 | 4917 | ||
4937 | /* The callbacks that hook a function */ | 4918 | /* The callbacks that hook a function */ |
4938 | trace_func_graph_ret_t ftrace_graph_return = | 4919 | trace_func_graph_ret_t ftrace_graph_return = |
4939 | (trace_func_graph_ret_t)ftrace_stub; | 4920 | (trace_func_graph_ret_t)ftrace_stub; |
4940 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | 4921 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
4941 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; | 4922 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; |
4942 | 4923 | ||
4943 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 4924 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
4944 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 4925 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
4945 | { | 4926 | { |
4946 | int i; | 4927 | int i; |
4947 | int ret = 0; | 4928 | int ret = 0; |
4948 | unsigned long flags; | 4929 | unsigned long flags; |
4949 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | 4930 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
4950 | struct task_struct *g, *t; | 4931 | struct task_struct *g, *t; |
4951 | 4932 | ||
4952 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | 4933 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { |
4953 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH | 4934 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH |
4954 | * sizeof(struct ftrace_ret_stack), | 4935 | * sizeof(struct ftrace_ret_stack), |
4955 | GFP_KERNEL); | 4936 | GFP_KERNEL); |
4956 | if (!ret_stack_list[i]) { | 4937 | if (!ret_stack_list[i]) { |
4957 | start = 0; | 4938 | start = 0; |
4958 | end = i; | 4939 | end = i; |
4959 | ret = -ENOMEM; | 4940 | ret = -ENOMEM; |
4960 | goto free; | 4941 | goto free; |
4961 | } | 4942 | } |
4962 | } | 4943 | } |
4963 | 4944 | ||
4964 | read_lock_irqsave(&tasklist_lock, flags); | 4945 | read_lock_irqsave(&tasklist_lock, flags); |
4965 | do_each_thread(g, t) { | 4946 | do_each_thread(g, t) { |
4966 | if (start == end) { | 4947 | if (start == end) { |
4967 | ret = -EAGAIN; | 4948 | ret = -EAGAIN; |
4968 | goto unlock; | 4949 | goto unlock; |
4969 | } | 4950 | } |
4970 | 4951 | ||
4971 | if (t->ret_stack == NULL) { | 4952 | if (t->ret_stack == NULL) { |
4972 | atomic_set(&t->tracing_graph_pause, 0); | 4953 | atomic_set(&t->tracing_graph_pause, 0); |
4973 | atomic_set(&t->trace_overrun, 0); | 4954 | atomic_set(&t->trace_overrun, 0); |
4974 | t->curr_ret_stack = -1; | 4955 | t->curr_ret_stack = -1; |
4975 | /* Make sure the tasks see the -1 first: */ | 4956 | /* Make sure the tasks see the -1 first: */ |
4976 | smp_wmb(); | 4957 | smp_wmb(); |
4977 | t->ret_stack = ret_stack_list[start++]; | 4958 | t->ret_stack = ret_stack_list[start++]; |
4978 | } | 4959 | } |
4979 | } while_each_thread(g, t); | 4960 | } while_each_thread(g, t); |
4980 | 4961 | ||
4981 | unlock: | 4962 | unlock: |
4982 | read_unlock_irqrestore(&tasklist_lock, flags); | 4963 | read_unlock_irqrestore(&tasklist_lock, flags); |
4983 | free: | 4964 | free: |
4984 | for (i = start; i < end; i++) | 4965 | for (i = start; i < end; i++) |
4985 | kfree(ret_stack_list[i]); | 4966 | kfree(ret_stack_list[i]); |
4986 | return ret; | 4967 | return ret; |
4987 | } | 4968 | } |
4988 | 4969 | ||
4989 | static void | 4970 | static void |
4990 | ftrace_graph_probe_sched_switch(void *ignore, | 4971 | ftrace_graph_probe_sched_switch(void *ignore, |
4991 | struct task_struct *prev, struct task_struct *next) | 4972 | struct task_struct *prev, struct task_struct *next) |
4992 | { | 4973 | { |
4993 | unsigned long long timestamp; | 4974 | unsigned long long timestamp; |
4994 | int index; | 4975 | int index; |
4995 | 4976 | ||
4996 | /* | 4977 | /* |
4997 | * Does the user want to count the time a function was asleep. | 4978 | * Does the user want to count the time a function was asleep. |
4998 | * If so, do not update the time stamps. | 4979 | * If so, do not update the time stamps. |
4999 | */ | 4980 | */ |
5000 | if (trace_flags & TRACE_ITER_SLEEP_TIME) | 4981 | if (trace_flags & TRACE_ITER_SLEEP_TIME) |
5001 | return; | 4982 | return; |
5002 | 4983 | ||
5003 | timestamp = trace_clock_local(); | 4984 | timestamp = trace_clock_local(); |
5004 | 4985 | ||
5005 | prev->ftrace_timestamp = timestamp; | 4986 | prev->ftrace_timestamp = timestamp; |
5006 | 4987 | ||
5007 | /* only process tasks that we timestamped */ | 4988 | /* only process tasks that we timestamped */ |
5008 | if (!next->ftrace_timestamp) | 4989 | if (!next->ftrace_timestamp) |
5009 | return; | 4990 | return; |
5010 | 4991 | ||
5011 | /* | 4992 | /* |
5012 | * Update all the counters in next to make up for the | 4993 | * Update all the counters in next to make up for the |
5013 | * time next was sleeping. | 4994 | * time next was sleeping. |
5014 | */ | 4995 | */ |
5015 | timestamp -= next->ftrace_timestamp; | 4996 | timestamp -= next->ftrace_timestamp; |
5016 | 4997 | ||
5017 | for (index = next->curr_ret_stack; index >= 0; index--) | 4998 | for (index = next->curr_ret_stack; index >= 0; index--) |
5018 | next->ret_stack[index].calltime += timestamp; | 4999 | next->ret_stack[index].calltime += timestamp; |
5019 | } | 5000 | } |
5020 | 5001 | ||
5021 | /* Allocate a return stack for each task */ | 5002 | /* Allocate a return stack for each task */ |
5022 | static int start_graph_tracing(void) | 5003 | static int start_graph_tracing(void) |
5023 | { | 5004 | { |
5024 | struct ftrace_ret_stack **ret_stack_list; | 5005 | struct ftrace_ret_stack **ret_stack_list; |
5025 | int ret, cpu; | 5006 | int ret, cpu; |
5026 | 5007 | ||
5027 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 5008 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
5028 | sizeof(struct ftrace_ret_stack *), | 5009 | sizeof(struct ftrace_ret_stack *), |
5029 | GFP_KERNEL); | 5010 | GFP_KERNEL); |
5030 | 5011 | ||
5031 | if (!ret_stack_list) | 5012 | if (!ret_stack_list) |
5032 | return -ENOMEM; | 5013 | return -ENOMEM; |
5033 | 5014 | ||
5034 | /* The cpu_boot init_task->ret_stack will never be freed */ | 5015 | /* The cpu_boot init_task->ret_stack will never be freed */ |
5035 | for_each_online_cpu(cpu) { | 5016 | for_each_online_cpu(cpu) { |
5036 | if (!idle_task(cpu)->ret_stack) | 5017 | if (!idle_task(cpu)->ret_stack) |
5037 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); | 5018 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
5038 | } | 5019 | } |
5039 | 5020 | ||
5040 | do { | 5021 | do { |
5041 | ret = alloc_retstack_tasklist(ret_stack_list); | 5022 | ret = alloc_retstack_tasklist(ret_stack_list); |
5042 | } while (ret == -EAGAIN); | 5023 | } while (ret == -EAGAIN); |
5043 | 5024 | ||
5044 | if (!ret) { | 5025 | if (!ret) { |
5045 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5026 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5046 | if (ret) | 5027 | if (ret) |
5047 | pr_info("ftrace_graph: Couldn't activate tracepoint" | 5028 | pr_info("ftrace_graph: Couldn't activate tracepoint" |
5048 | " probe to kernel_sched_switch\n"); | 5029 | " probe to kernel_sched_switch\n"); |
5049 | } | 5030 | } |
5050 | 5031 | ||
5051 | kfree(ret_stack_list); | 5032 | kfree(ret_stack_list); |
5052 | return ret; | 5033 | return ret; |
5053 | } | 5034 | } |
5054 | 5035 | ||
5055 | /* | 5036 | /* |
5056 | * Hibernation protection. | 5037 | * Hibernation protection. |
5057 | * The state of the current task is too much unstable during | 5038 | * The state of the current task is too much unstable during |
5058 | * suspend/restore to disk. We want to protect against that. | 5039 | * suspend/restore to disk. We want to protect against that. |
5059 | */ | 5040 | */ |
5060 | static int | 5041 | static int |
5061 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | 5042 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, |
5062 | void *unused) | 5043 | void *unused) |
5063 | { | 5044 | { |
5064 | switch (state) { | 5045 | switch (state) { |
5065 | case PM_HIBERNATION_PREPARE: | 5046 | case PM_HIBERNATION_PREPARE: |
5066 | pause_graph_tracing(); | 5047 | pause_graph_tracing(); |
5067 | break; | 5048 | break; |
5068 | 5049 | ||
5069 | case PM_POST_HIBERNATION: | 5050 | case PM_POST_HIBERNATION: |
5070 | unpause_graph_tracing(); | 5051 | unpause_graph_tracing(); |
5071 | break; | 5052 | break; |
5072 | } | 5053 | } |
5073 | return NOTIFY_DONE; | 5054 | return NOTIFY_DONE; |
5074 | } | 5055 | } |
5075 | 5056 | ||
5076 | /* Just a place holder for function graph */ | 5057 | /* Just a place holder for function graph */ |
5077 | static struct ftrace_ops fgraph_ops __read_mostly = { | 5058 | static struct ftrace_ops fgraph_ops __read_mostly = { |
5078 | .func = ftrace_stub, | 5059 | .func = ftrace_stub, |
5079 | .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL | | 5060 | .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL | |
5080 | FTRACE_OPS_FL_RECURSION_SAFE, | 5061 | FTRACE_OPS_FL_RECURSION_SAFE, |
5081 | }; | 5062 | }; |
5082 | 5063 | ||
5083 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | 5064 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) |
5084 | { | 5065 | { |
5085 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | 5066 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) |
5086 | return 0; | 5067 | return 0; |
5087 | return __ftrace_graph_entry(trace); | 5068 | return __ftrace_graph_entry(trace); |
5088 | } | 5069 | } |
5089 | 5070 | ||
5090 | /* | 5071 | /* |
5091 | * The function graph tracer should only trace the functions defined | 5072 | * The function graph tracer should only trace the functions defined |
5092 | * by set_ftrace_filter and set_ftrace_notrace. If another function | 5073 | * by set_ftrace_filter and set_ftrace_notrace. If another function |
5093 | * tracer ops is registered, the graph tracer requires testing the | 5074 | * tracer ops is registered, the graph tracer requires testing the |
5094 | * function against the global ops, and not just trace any function | 5075 | * function against the global ops, and not just trace any function |
5095 | * that any ftrace_ops registered. | 5076 | * that any ftrace_ops registered. |
5096 | */ | 5077 | */ |
5097 | static void update_function_graph_func(void) | 5078 | static void update_function_graph_func(void) |
5098 | { | 5079 | { |
5099 | if (ftrace_ops_list == &ftrace_list_end || | 5080 | if (ftrace_ops_list == &ftrace_list_end || |
5100 | (ftrace_ops_list == &global_ops && | 5081 | (ftrace_ops_list == &global_ops && |
5101 | global_ops.next == &ftrace_list_end)) | 5082 | global_ops.next == &ftrace_list_end)) |
5102 | ftrace_graph_entry = __ftrace_graph_entry; | 5083 | ftrace_graph_entry = __ftrace_graph_entry; |
5103 | else | 5084 | else |
5104 | ftrace_graph_entry = ftrace_graph_entry_test; | 5085 | ftrace_graph_entry = ftrace_graph_entry_test; |
5105 | } | 5086 | } |
5106 | 5087 | ||
5107 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 5088 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
5108 | trace_func_graph_ent_t entryfunc) | 5089 | trace_func_graph_ent_t entryfunc) |
5109 | { | 5090 | { |
5110 | int ret = 0; | 5091 | int ret = 0; |
5111 | 5092 | ||
5112 | mutex_lock(&ftrace_lock); | 5093 | mutex_lock(&ftrace_lock); |
5113 | 5094 | ||
5114 | /* we currently allow only one tracer registered at a time */ | 5095 | /* we currently allow only one tracer registered at a time */ |
5115 | if (ftrace_graph_active) { | 5096 | if (ftrace_graph_active) { |
5116 | ret = -EBUSY; | 5097 | ret = -EBUSY; |
5117 | goto out; | 5098 | goto out; |
5118 | } | 5099 | } |
5119 | 5100 | ||
5120 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | 5101 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
5121 | register_pm_notifier(&ftrace_suspend_notifier); | 5102 | register_pm_notifier(&ftrace_suspend_notifier); |
5122 | 5103 | ||
5123 | ftrace_graph_active++; | 5104 | ftrace_graph_active++; |
5124 | ret = start_graph_tracing(); | 5105 | ret = start_graph_tracing(); |
5125 | if (ret) { | 5106 | if (ret) { |
5126 | ftrace_graph_active--; | 5107 | ftrace_graph_active--; |
5127 | goto out; | 5108 | goto out; |
5128 | } | 5109 | } |
5129 | 5110 | ||
5130 | ftrace_graph_return = retfunc; | 5111 | ftrace_graph_return = retfunc; |
5131 | 5112 | ||
5132 | /* | 5113 | /* |
5133 | * Update the indirect function to the entryfunc, and the | 5114 | * Update the indirect function to the entryfunc, and the |
5134 | * function that gets called to the entry_test first. Then | 5115 | * function that gets called to the entry_test first. Then |
5135 | * call the update fgraph entry function to determine if | 5116 | * call the update fgraph entry function to determine if |
5136 | * the entryfunc should be called directly or not. | 5117 | * the entryfunc should be called directly or not. |
5137 | */ | 5118 | */ |
5138 | __ftrace_graph_entry = entryfunc; | 5119 | __ftrace_graph_entry = entryfunc; |
5139 | ftrace_graph_entry = ftrace_graph_entry_test; | 5120 | ftrace_graph_entry = ftrace_graph_entry_test; |
5140 | update_function_graph_func(); | 5121 | update_function_graph_func(); |
5141 | 5122 | ||
5142 | ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); | 5123 | ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); |
5143 | 5124 | ||
5144 | out: | 5125 | out: |
5145 | mutex_unlock(&ftrace_lock); | 5126 | mutex_unlock(&ftrace_lock); |
5146 | return ret; | 5127 | return ret; |
5147 | } | 5128 | } |
5148 | 5129 | ||
5149 | void unregister_ftrace_graph(void) | 5130 | void unregister_ftrace_graph(void) |
5150 | { | 5131 | { |
5151 | mutex_lock(&ftrace_lock); | 5132 | mutex_lock(&ftrace_lock); |
5152 | 5133 | ||
5153 | if (unlikely(!ftrace_graph_active)) | 5134 | if (unlikely(!ftrace_graph_active)) |
5154 | goto out; | 5135 | goto out; |
5155 | 5136 | ||
5156 | ftrace_graph_active--; | 5137 | ftrace_graph_active--; |
5157 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5138 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
5158 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5139 | ftrace_graph_entry = ftrace_graph_entry_stub; |
5159 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5140 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
5160 | ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); | 5141 | ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); |
5161 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5142 | unregister_pm_notifier(&ftrace_suspend_notifier); |
5162 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5143 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5163 | 5144 | ||
5164 | out: | 5145 | out: |
5165 | mutex_unlock(&ftrace_lock); | 5146 | mutex_unlock(&ftrace_lock); |
5166 | } | 5147 | } |
5167 | 5148 | ||
5168 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); | 5149 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); |
5169 | 5150 | ||
5170 | static void | 5151 | static void |
5171 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | 5152 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
5172 | { | 5153 | { |
5173 | atomic_set(&t->tracing_graph_pause, 0); | 5154 | atomic_set(&t->tracing_graph_pause, 0); |
5174 | atomic_set(&t->trace_overrun, 0); | 5155 | atomic_set(&t->trace_overrun, 0); |
5175 | t->ftrace_timestamp = 0; | 5156 | t->ftrace_timestamp = 0; |
5176 | /* make curr_ret_stack visible before we add the ret_stack */ | 5157 | /* make curr_ret_stack visible before we add the ret_stack */ |
5177 | smp_wmb(); | 5158 | smp_wmb(); |
5178 | t->ret_stack = ret_stack; | 5159 | t->ret_stack = ret_stack; |
5179 | } | 5160 | } |
5180 | 5161 | ||
5181 | /* | 5162 | /* |
5182 | * Allocate a return stack for the idle task. May be the first | 5163 | * Allocate a return stack for the idle task. May be the first |
5183 | * time through, or it may be done by CPU hotplug online. | 5164 | * time through, or it may be done by CPU hotplug online. |
5184 | */ | 5165 | */ |
5185 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | 5166 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) |
5186 | { | 5167 | { |
5187 | t->curr_ret_stack = -1; | 5168 | t->curr_ret_stack = -1; |
5188 | /* | 5169 | /* |
5189 | * The idle task has no parent, it either has its own | 5170 | * The idle task has no parent, it either has its own |
5190 | * stack or no stack at all. | 5171 | * stack or no stack at all. |
5191 | */ | 5172 | */ |
5192 | if (t->ret_stack) | 5173 | if (t->ret_stack) |
5193 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | 5174 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); |
5194 | 5175 | ||
5195 | if (ftrace_graph_active) { | 5176 | if (ftrace_graph_active) { |
5196 | struct ftrace_ret_stack *ret_stack; | 5177 | struct ftrace_ret_stack *ret_stack; |
5197 | 5178 | ||
5198 | ret_stack = per_cpu(idle_ret_stack, cpu); | 5179 | ret_stack = per_cpu(idle_ret_stack, cpu); |
5199 | if (!ret_stack) { | 5180 | if (!ret_stack) { |
5200 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 5181 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
5201 | * sizeof(struct ftrace_ret_stack), | 5182 | * sizeof(struct ftrace_ret_stack), |
5202 | GFP_KERNEL); | 5183 | GFP_KERNEL); |
5203 | if (!ret_stack) | 5184 | if (!ret_stack) |
5204 | return; | 5185 | return; |
5205 | per_cpu(idle_ret_stack, cpu) = ret_stack; | 5186 | per_cpu(idle_ret_stack, cpu) = ret_stack; |
5206 | } | 5187 | } |
5207 | graph_init_task(t, ret_stack); | 5188 | graph_init_task(t, ret_stack); |
5208 | } | 5189 | } |
5209 | } | 5190 | } |
5210 | 5191 | ||
5211 | /* Allocate a return stack for newly created task */ | 5192 | /* Allocate a return stack for newly created task */ |
5212 | void ftrace_graph_init_task(struct task_struct *t) | 5193 | void ftrace_graph_init_task(struct task_struct *t) |
5213 | { | 5194 | { |
5214 | /* Make sure we do not use the parent ret_stack */ | 5195 | /* Make sure we do not use the parent ret_stack */ |
5215 | t->ret_stack = NULL; | 5196 | t->ret_stack = NULL; |
5216 | t->curr_ret_stack = -1; | 5197 | t->curr_ret_stack = -1; |
5217 | 5198 | ||
5218 | if (ftrace_graph_active) { | 5199 | if (ftrace_graph_active) { |
5219 | struct ftrace_ret_stack *ret_stack; | 5200 | struct ftrace_ret_stack *ret_stack; |
5220 | 5201 | ||
5221 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 5202 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
5222 | * sizeof(struct ftrace_ret_stack), | 5203 | * sizeof(struct ftrace_ret_stack), |
5223 | GFP_KERNEL); | 5204 | GFP_KERNEL); |
5224 | if (!ret_stack) | 5205 | if (!ret_stack) |
5225 | return; | 5206 | return; |
5226 | graph_init_task(t, ret_stack); | 5207 | graph_init_task(t, ret_stack); |
5227 | } | 5208 | } |
5228 | } | 5209 | } |
5229 | 5210 | ||
5230 | void ftrace_graph_exit_task(struct task_struct *t) | 5211 | void ftrace_graph_exit_task(struct task_struct *t) |
5231 | { | 5212 | { |
5232 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | 5213 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
5233 | 5214 | ||
5234 | t->ret_stack = NULL; | 5215 | t->ret_stack = NULL; |
5235 | /* NULL must become visible to IRQs before we free it: */ | 5216 | /* NULL must become visible to IRQs before we free it: */ |
5236 | barrier(); | 5217 | barrier(); |
5237 | 5218 | ||
5238 | kfree(ret_stack); | 5219 | kfree(ret_stack); |
5239 | } | 5220 | } |
5240 | 5221 | ||
5241 | void ftrace_graph_stop(void) | 5222 | void ftrace_graph_stop(void) |
5242 | { | 5223 | { |
5243 | ftrace_stop(); | 5224 | ftrace_stop(); |
5244 | } | 5225 | } |
5245 | #endif | 5226 | #endif |
5246 | 5227 |
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585
-
mentioned in commit 21f585