Commit 049fb9bd416077b3622d317a45796be4f2431df3

Authored by Steven Rostedt (Red Hat)
Committed by Steven Rostedt
1 parent f36d1be293

ftrace/module: Call clean up function when module init fails early

If the module init code fails after calling ftrace_module_init() and before
calling do_init_module(), we can suffer from a memory leak. This is because
ftrace_module_init() allocates pages to store the locations that ftrace
hooks are placed in the module text. If do_init_module() fails, it still
calls the MODULE_GOING notifiers which will tell ftrace to do a clean up of
the pages it allocated for the module. But if load_module() fails before
then, the pages allocated by ftrace_module_init() will never be freed.

Call ftrace_release_mod() on the module if load_module() fails before
getting to do_init_module().

Link: http://lkml.kernel.org/r/567CEA31.1070507@intel.com

Reported-by: "Qiu, PeiyangX" <peiyangx.qiu@intel.com>
Fixes: a949ae560a511 "ftrace/module: Hardcode ftrace_module_init() call into load_module()"
Cc: stable@vger.kernel.org # v2.6.38+
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Showing 2 changed files with 7 additions and 0 deletions Inline Diff

include/linux/ftrace.h
1 /* 1 /*
2 * Ftrace header. For implementation details beyond the random comments 2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt 3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */ 4 */
5 5
6 #ifndef _LINUX_FTRACE_H 6 #ifndef _LINUX_FTRACE_H
7 #define _LINUX_FTRACE_H 7 #define _LINUX_FTRACE_H
8 8
9 #include <linux/trace_clock.h> 9 #include <linux/trace_clock.h>
10 #include <linux/kallsyms.h> 10 #include <linux/kallsyms.h>
11 #include <linux/linkage.h> 11 #include <linux/linkage.h>
12 #include <linux/bitops.h> 12 #include <linux/bitops.h>
13 #include <linux/ptrace.h> 13 #include <linux/ptrace.h>
14 #include <linux/ktime.h> 14 #include <linux/ktime.h>
15 #include <linux/sched.h> 15 #include <linux/sched.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/fs.h> 18 #include <linux/fs.h>
19 19
20 #include <asm/ftrace.h> 20 #include <asm/ftrace.h>
21 21
22 /* 22 /*
23 * If the arch supports passing the variable contents of 23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the 24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1. 25 * mcount call, then the arch should define this as 1.
26 */ 26 */
27 #ifndef ARCH_SUPPORTS_FTRACE_OPS 27 #ifndef ARCH_SUPPORTS_FTRACE_OPS
28 #define ARCH_SUPPORTS_FTRACE_OPS 0 28 #define ARCH_SUPPORTS_FTRACE_OPS 0
29 #endif 29 #endif
30 30
31 /* 31 /*
32 * If the arch's mcount caller does not support all of ftrace's 32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that 33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects. 34 * does. Or at least does enough to prevent any unwelcomed side effects.
35 */ 35 */
36 #if !ARCH_SUPPORTS_FTRACE_OPS 36 #if !ARCH_SUPPORTS_FTRACE_OPS
37 # define FTRACE_FORCE_LIST_FUNC 1 37 # define FTRACE_FORCE_LIST_FUNC 1
38 #else 38 #else
39 # define FTRACE_FORCE_LIST_FUNC 0 39 # define FTRACE_FORCE_LIST_FUNC 0
40 #endif 40 #endif
41 41
42 /* Main tracing buffer and events set up */ 42 /* Main tracing buffer and events set up */
43 #ifdef CONFIG_TRACING 43 #ifdef CONFIG_TRACING
44 void trace_init(void); 44 void trace_init(void);
45 #else 45 #else
46 static inline void trace_init(void) { } 46 static inline void trace_init(void) { }
47 #endif 47 #endif
48 48
49 struct module; 49 struct module;
50 struct ftrace_hash; 50 struct ftrace_hash;
51 51
52 #ifdef CONFIG_FUNCTION_TRACER 52 #ifdef CONFIG_FUNCTION_TRACER
53 53
54 extern int ftrace_enabled; 54 extern int ftrace_enabled;
55 extern int 55 extern int
56 ftrace_enable_sysctl(struct ctl_table *table, int write, 56 ftrace_enable_sysctl(struct ctl_table *table, int write,
57 void __user *buffer, size_t *lenp, 57 void __user *buffer, size_t *lenp,
58 loff_t *ppos); 58 loff_t *ppos);
59 59
60 struct ftrace_ops; 60 struct ftrace_ops;
61 61
62 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 62 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
63 struct ftrace_ops *op, struct pt_regs *regs); 63 struct ftrace_ops *op, struct pt_regs *regs);
64 64
65 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); 65 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
66 66
67 /* 67 /*
68 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 68 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
69 * set in the flags member. 69 * set in the flags member.
70 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and 70 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
71 * IPMODIFY are a kind of attribute flags which can be set only before 71 * IPMODIFY are a kind of attribute flags which can be set only before
72 * registering the ftrace_ops, and can not be modified while registered. 72 * registering the ftrace_ops, and can not be modified while registered.
73 * Changing those attribute flags after regsitering ftrace_ops will 73 * Changing those attribute flags after regsitering ftrace_ops will
74 * cause unexpected results. 74 * cause unexpected results.
75 * 75 *
76 * ENABLED - set/unset when ftrace_ops is registered/unregistered 76 * ENABLED - set/unset when ftrace_ops is registered/unregistered
77 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 77 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
78 * allocated ftrace_ops which need special care 78 * allocated ftrace_ops which need special care
79 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops 79 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
80 * could be controled by following calls: 80 * could be controled by following calls:
81 * ftrace_function_local_enable 81 * ftrace_function_local_enable
82 * ftrace_function_local_disable 82 * ftrace_function_local_disable
83 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 83 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
84 * and passed to the callback. If this flag is set, but the 84 * and passed to the callback. If this flag is set, but the
85 * architecture does not support passing regs 85 * architecture does not support passing regs
86 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the 86 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
87 * ftrace_ops will fail to register, unless the next flag 87 * ftrace_ops will fail to register, unless the next flag
88 * is set. 88 * is set.
89 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 89 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
90 * handler can handle an arch that does not save regs 90 * handler can handle an arch that does not save regs
91 * (the handler tests if regs == NULL), then it can set 91 * (the handler tests if regs == NULL), then it can set
92 * this flag instead. It will not fail registering the ftrace_ops 92 * this flag instead. It will not fail registering the ftrace_ops
93 * but, the regs field will be NULL if the arch does not support 93 * but, the regs field will be NULL if the arch does not support
94 * passing regs to the handler. 94 * passing regs to the handler.
95 * Note, if this flag is set, the SAVE_REGS flag will automatically 95 * Note, if this flag is set, the SAVE_REGS flag will automatically
96 * get set upon registering the ftrace_ops, if the arch supports it. 96 * get set upon registering the ftrace_ops, if the arch supports it.
97 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure 97 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
98 * that the call back has its own recursion protection. If it does 98 * that the call back has its own recursion protection. If it does
99 * not set this, then the ftrace infrastructure will add recursion 99 * not set this, then the ftrace infrastructure will add recursion
100 * protection for the caller. 100 * protection for the caller.
101 * STUB - The ftrace_ops is just a place holder. 101 * STUB - The ftrace_ops is just a place holder.
102 * INITIALIZED - The ftrace_ops has already been initialized (first use time 102 * INITIALIZED - The ftrace_ops has already been initialized (first use time
103 * register_ftrace_function() is called, it will initialized the ops) 103 * register_ftrace_function() is called, it will initialized the ops)
104 * DELETED - The ops are being deleted, do not let them be registered again. 104 * DELETED - The ops are being deleted, do not let them be registered again.
105 * ADDING - The ops is in the process of being added. 105 * ADDING - The ops is in the process of being added.
106 * REMOVING - The ops is in the process of being removed. 106 * REMOVING - The ops is in the process of being removed.
107 * MODIFYING - The ops is in the process of changing its filter functions. 107 * MODIFYING - The ops is in the process of changing its filter functions.
108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. 108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
109 * The arch specific code sets this flag when it allocated a 109 * The arch specific code sets this flag when it allocated a
110 * trampoline. This lets the arch know that it can update the 110 * trampoline. This lets the arch know that it can update the
111 * trampoline in case the callback function changes. 111 * trampoline in case the callback function changes.
112 * The ftrace_ops trampoline can be set by the ftrace users, and 112 * The ftrace_ops trampoline can be set by the ftrace users, and
113 * in such cases the arch must not modify it. Only the arch ftrace 113 * in such cases the arch must not modify it. Only the arch ftrace
114 * core code should set this flag. 114 * core code should set this flag.
115 * IPMODIFY - The ops can modify the IP register. This can only be set with 115 * IPMODIFY - The ops can modify the IP register. This can only be set with
116 * SAVE_REGS. If another ops with this flag set is already registered 116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then 117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip. 118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
120 */ 120 */
121 enum { 121 enum {
122 FTRACE_OPS_FL_ENABLED = 1 << 0, 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
123 FTRACE_OPS_FL_DYNAMIC = 1 << 1, 123 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
124 FTRACE_OPS_FL_CONTROL = 1 << 2, 124 FTRACE_OPS_FL_CONTROL = 1 << 2,
125 FTRACE_OPS_FL_SAVE_REGS = 1 << 3, 125 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
126 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, 126 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
127 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, 127 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
128 FTRACE_OPS_FL_STUB = 1 << 6, 128 FTRACE_OPS_FL_STUB = 1 << 6,
129 FTRACE_OPS_FL_INITIALIZED = 1 << 7, 129 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
130 FTRACE_OPS_FL_DELETED = 1 << 8, 130 FTRACE_OPS_FL_DELETED = 1 << 8,
131 FTRACE_OPS_FL_ADDING = 1 << 9, 131 FTRACE_OPS_FL_ADDING = 1 << 9,
132 FTRACE_OPS_FL_REMOVING = 1 << 10, 132 FTRACE_OPS_FL_REMOVING = 1 << 10,
133 FTRACE_OPS_FL_MODIFYING = 1 << 11, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
135 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14, 136 FTRACE_OPS_FL_PID = 1 << 14,
137 }; 137 };
138 138
139 #ifdef CONFIG_DYNAMIC_FTRACE 139 #ifdef CONFIG_DYNAMIC_FTRACE
140 /* The hash used to know what functions callbacks trace */ 140 /* The hash used to know what functions callbacks trace */
141 struct ftrace_ops_hash { 141 struct ftrace_ops_hash {
142 struct ftrace_hash *notrace_hash; 142 struct ftrace_hash *notrace_hash;
143 struct ftrace_hash *filter_hash; 143 struct ftrace_hash *filter_hash;
144 struct mutex regex_lock; 144 struct mutex regex_lock;
145 }; 145 };
146 #endif 146 #endif
147 147
148 /* 148 /*
149 * Note, ftrace_ops can be referenced outside of RCU protection. 149 * Note, ftrace_ops can be referenced outside of RCU protection.
150 * (Although, for perf, the control ops prevent that). If ftrace_ops is 150 * (Although, for perf, the control ops prevent that). If ftrace_ops is
151 * allocated and not part of kernel core data, the unregistering of it will 151 * allocated and not part of kernel core data, the unregistering of it will
152 * perform a scheduling on all CPUs to make sure that there are no more users. 152 * perform a scheduling on all CPUs to make sure that there are no more users.
153 * Depending on the load of the system that may take a bit of time. 153 * Depending on the load of the system that may take a bit of time.
154 * 154 *
155 * Any private data added must also take care not to be freed and if private 155 * Any private data added must also take care not to be freed and if private
156 * data is added to a ftrace_ops that is in core code, the user of the 156 * data is added to a ftrace_ops that is in core code, the user of the
157 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. 157 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
158 */ 158 */
159 struct ftrace_ops { 159 struct ftrace_ops {
160 ftrace_func_t func; 160 ftrace_func_t func;
161 struct ftrace_ops *next; 161 struct ftrace_ops *next;
162 unsigned long flags; 162 unsigned long flags;
163 void *private; 163 void *private;
164 ftrace_func_t saved_func; 164 ftrace_func_t saved_func;
165 int __percpu *disabled; 165 int __percpu *disabled;
166 #ifdef CONFIG_DYNAMIC_FTRACE 166 #ifdef CONFIG_DYNAMIC_FTRACE
167 int nr_trampolines; 167 int nr_trampolines;
168 struct ftrace_ops_hash local_hash; 168 struct ftrace_ops_hash local_hash;
169 struct ftrace_ops_hash *func_hash; 169 struct ftrace_ops_hash *func_hash;
170 struct ftrace_ops_hash old_hash; 170 struct ftrace_ops_hash old_hash;
171 unsigned long trampoline; 171 unsigned long trampoline;
172 unsigned long trampoline_size; 172 unsigned long trampoline_size;
173 #endif 173 #endif
174 }; 174 };
175 175
176 /* 176 /*
177 * Type of the current tracing. 177 * Type of the current tracing.
178 */ 178 */
179 enum ftrace_tracing_type_t { 179 enum ftrace_tracing_type_t {
180 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 180 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
181 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 181 FTRACE_TYPE_RETURN, /* Hook the return of the function */
182 }; 182 };
183 183
184 /* Current tracing type, default is FTRACE_TYPE_ENTER */ 184 /* Current tracing type, default is FTRACE_TYPE_ENTER */
185 extern enum ftrace_tracing_type_t ftrace_tracing_type; 185 extern enum ftrace_tracing_type_t ftrace_tracing_type;
186 186
187 /* 187 /*
188 * The ftrace_ops must be a static and should also 188 * The ftrace_ops must be a static and should also
189 * be read_mostly. These functions do modify read_mostly variables 189 * be read_mostly. These functions do modify read_mostly variables
190 * so use them sparely. Never free an ftrace_op or modify the 190 * so use them sparely. Never free an ftrace_op or modify the
191 * next pointer after it has been registered. Even after unregistering 191 * next pointer after it has been registered. Even after unregistering
192 * it, the next pointer may still be used internally. 192 * it, the next pointer may still be used internally.
193 */ 193 */
194 int register_ftrace_function(struct ftrace_ops *ops); 194 int register_ftrace_function(struct ftrace_ops *ops);
195 int unregister_ftrace_function(struct ftrace_ops *ops); 195 int unregister_ftrace_function(struct ftrace_ops *ops);
196 void clear_ftrace_function(void); 196 void clear_ftrace_function(void);
197 197
198 /** 198 /**
199 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu 199 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
200 * 200 *
201 * This function enables tracing on current cpu by decreasing 201 * This function enables tracing on current cpu by decreasing
202 * the per cpu control variable. 202 * the per cpu control variable.
203 * It must be called with preemption disabled and only on ftrace_ops 203 * It must be called with preemption disabled and only on ftrace_ops
204 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption 204 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
205 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. 205 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
206 */ 206 */
207 static inline void ftrace_function_local_enable(struct ftrace_ops *ops) 207 static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
208 { 208 {
209 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) 209 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
210 return; 210 return;
211 211
212 (*this_cpu_ptr(ops->disabled))--; 212 (*this_cpu_ptr(ops->disabled))--;
213 } 213 }
214 214
215 /** 215 /**
216 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu 216 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
217 * 217 *
218 * This function enables tracing on current cpu by decreasing 218 * This function enables tracing on current cpu by decreasing
219 * the per cpu control variable. 219 * the per cpu control variable.
220 * It must be called with preemption disabled and only on ftrace_ops 220 * It must be called with preemption disabled and only on ftrace_ops
221 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption 221 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
222 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. 222 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
223 */ 223 */
224 static inline void ftrace_function_local_disable(struct ftrace_ops *ops) 224 static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
225 { 225 {
226 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) 226 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
227 return; 227 return;
228 228
229 (*this_cpu_ptr(ops->disabled))++; 229 (*this_cpu_ptr(ops->disabled))++;
230 } 230 }
231 231
232 /** 232 /**
233 * ftrace_function_local_disabled - returns ftrace_ops disabled value 233 * ftrace_function_local_disabled - returns ftrace_ops disabled value
234 * on current cpu 234 * on current cpu
235 * 235 *
236 * This function returns value of ftrace_ops::disabled on current cpu. 236 * This function returns value of ftrace_ops::disabled on current cpu.
237 * It must be called with preemption disabled and only on ftrace_ops 237 * It must be called with preemption disabled and only on ftrace_ops
238 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption 238 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
239 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. 239 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
240 */ 240 */
241 static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) 241 static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
242 { 242 {
243 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); 243 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
244 return *this_cpu_ptr(ops->disabled); 244 return *this_cpu_ptr(ops->disabled);
245 } 245 }
246 246
247 extern void ftrace_stub(unsigned long a0, unsigned long a1, 247 extern void ftrace_stub(unsigned long a0, unsigned long a1,
248 struct ftrace_ops *op, struct pt_regs *regs); 248 struct ftrace_ops *op, struct pt_regs *regs);
249 249
250 #else /* !CONFIG_FUNCTION_TRACER */ 250 #else /* !CONFIG_FUNCTION_TRACER */
251 /* 251 /*
252 * (un)register_ftrace_function must be a macro since the ops parameter 252 * (un)register_ftrace_function must be a macro since the ops parameter
253 * must not be evaluated. 253 * must not be evaluated.
254 */ 254 */
255 #define register_ftrace_function(ops) ({ 0; }) 255 #define register_ftrace_function(ops) ({ 0; })
256 #define unregister_ftrace_function(ops) ({ 0; }) 256 #define unregister_ftrace_function(ops) ({ 0; })
257 static inline int ftrace_nr_registered_ops(void) 257 static inline int ftrace_nr_registered_ops(void)
258 { 258 {
259 return 0; 259 return 0;
260 } 260 }
261 static inline void clear_ftrace_function(void) { } 261 static inline void clear_ftrace_function(void) { }
262 static inline void ftrace_kill(void) { } 262 static inline void ftrace_kill(void) { }
263 #endif /* CONFIG_FUNCTION_TRACER */ 263 #endif /* CONFIG_FUNCTION_TRACER */
264 264
265 #ifdef CONFIG_STACK_TRACER 265 #ifdef CONFIG_STACK_TRACER
266 266
267 #define STACK_TRACE_ENTRIES 500 267 #define STACK_TRACE_ENTRIES 500
268 268
269 struct stack_trace; 269 struct stack_trace;
270 270
271 extern unsigned stack_trace_index[]; 271 extern unsigned stack_trace_index[];
272 extern struct stack_trace stack_trace_max; 272 extern struct stack_trace stack_trace_max;
273 extern unsigned long stack_trace_max_size; 273 extern unsigned long stack_trace_max_size;
274 extern arch_spinlock_t stack_trace_max_lock; 274 extern arch_spinlock_t stack_trace_max_lock;
275 275
276 extern int stack_tracer_enabled; 276 extern int stack_tracer_enabled;
277 void stack_trace_print(void); 277 void stack_trace_print(void);
278 int 278 int
279 stack_trace_sysctl(struct ctl_table *table, int write, 279 stack_trace_sysctl(struct ctl_table *table, int write,
280 void __user *buffer, size_t *lenp, 280 void __user *buffer, size_t *lenp,
281 loff_t *ppos); 281 loff_t *ppos);
282 #endif 282 #endif
283 283
284 struct ftrace_func_command { 284 struct ftrace_func_command {
285 struct list_head list; 285 struct list_head list;
286 char *name; 286 char *name;
287 int (*func)(struct ftrace_hash *hash, 287 int (*func)(struct ftrace_hash *hash,
288 char *func, char *cmd, 288 char *func, char *cmd,
289 char *params, int enable); 289 char *params, int enable);
290 }; 290 };
291 291
292 #ifdef CONFIG_DYNAMIC_FTRACE 292 #ifdef CONFIG_DYNAMIC_FTRACE
293 293
294 int ftrace_arch_code_modify_prepare(void); 294 int ftrace_arch_code_modify_prepare(void);
295 int ftrace_arch_code_modify_post_process(void); 295 int ftrace_arch_code_modify_post_process(void);
296 296
297 struct dyn_ftrace; 297 struct dyn_ftrace;
298 298
299 void ftrace_bug(int err, struct dyn_ftrace *rec); 299 void ftrace_bug(int err, struct dyn_ftrace *rec);
300 300
301 struct seq_file; 301 struct seq_file;
302 302
303 struct ftrace_probe_ops { 303 struct ftrace_probe_ops {
304 void (*func)(unsigned long ip, 304 void (*func)(unsigned long ip,
305 unsigned long parent_ip, 305 unsigned long parent_ip,
306 void **data); 306 void **data);
307 int (*init)(struct ftrace_probe_ops *ops, 307 int (*init)(struct ftrace_probe_ops *ops,
308 unsigned long ip, void **data); 308 unsigned long ip, void **data);
309 void (*free)(struct ftrace_probe_ops *ops, 309 void (*free)(struct ftrace_probe_ops *ops,
310 unsigned long ip, void **data); 310 unsigned long ip, void **data);
311 int (*print)(struct seq_file *m, 311 int (*print)(struct seq_file *m,
312 unsigned long ip, 312 unsigned long ip,
313 struct ftrace_probe_ops *ops, 313 struct ftrace_probe_ops *ops,
314 void *data); 314 void *data);
315 }; 315 };
316 316
317 extern int 317 extern int
318 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 318 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
319 void *data); 319 void *data);
320 extern void 320 extern void
321 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 321 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
322 void *data); 322 void *data);
323 extern void 323 extern void
324 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 324 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
325 extern void unregister_ftrace_function_probe_all(char *glob); 325 extern void unregister_ftrace_function_probe_all(char *glob);
326 326
327 extern int ftrace_text_reserved(const void *start, const void *end); 327 extern int ftrace_text_reserved(const void *start, const void *end);
328 328
329 extern int ftrace_nr_registered_ops(void); 329 extern int ftrace_nr_registered_ops(void);
330 330
331 bool is_ftrace_trampoline(unsigned long addr); 331 bool is_ftrace_trampoline(unsigned long addr);
332 332
333 /* 333 /*
334 * The dyn_ftrace record's flags field is split into two parts. 334 * The dyn_ftrace record's flags field is split into two parts.
335 * the first part which is '0-FTRACE_REF_MAX' is a counter of 335 * the first part which is '0-FTRACE_REF_MAX' is a counter of
336 * the number of callbacks that have registered the function that 336 * the number of callbacks that have registered the function that
337 * the dyn_ftrace descriptor represents. 337 * the dyn_ftrace descriptor represents.
338 * 338 *
339 * The second part is a mask: 339 * The second part is a mask:
340 * ENABLED - the function is being traced 340 * ENABLED - the function is being traced
341 * REGS - the record wants the function to save regs 341 * REGS - the record wants the function to save regs
342 * REGS_EN - the function is set up to save regs. 342 * REGS_EN - the function is set up to save regs.
343 * IPMODIFY - the record allows for the IP address to be changed. 343 * IPMODIFY - the record allows for the IP address to be changed.
344 * 344 *
345 * When a new ftrace_ops is registered and wants a function to save 345 * When a new ftrace_ops is registered and wants a function to save
346 * pt_regs, the rec->flag REGS is set. When the function has been 346 * pt_regs, the rec->flag REGS is set. When the function has been
347 * set up to save regs, the REG_EN flag is set. Once a function 347 * set up to save regs, the REG_EN flag is set. Once a function
348 * starts saving regs it will do so until all ftrace_ops are removed 348 * starts saving regs it will do so until all ftrace_ops are removed
349 * from tracing that function. 349 * from tracing that function.
350 */ 350 */
351 enum { 351 enum {
352 FTRACE_FL_ENABLED = (1UL << 31), 352 FTRACE_FL_ENABLED = (1UL << 31),
353 FTRACE_FL_REGS = (1UL << 30), 353 FTRACE_FL_REGS = (1UL << 30),
354 FTRACE_FL_REGS_EN = (1UL << 29), 354 FTRACE_FL_REGS_EN = (1UL << 29),
355 FTRACE_FL_TRAMP = (1UL << 28), 355 FTRACE_FL_TRAMP = (1UL << 28),
356 FTRACE_FL_TRAMP_EN = (1UL << 27), 356 FTRACE_FL_TRAMP_EN = (1UL << 27),
357 FTRACE_FL_IPMODIFY = (1UL << 26), 357 FTRACE_FL_IPMODIFY = (1UL << 26),
358 }; 358 };
359 359
360 #define FTRACE_REF_MAX_SHIFT 26 360 #define FTRACE_REF_MAX_SHIFT 26
361 #define FTRACE_FL_BITS 6 361 #define FTRACE_FL_BITS 6
362 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) 362 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
363 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) 363 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
364 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 364 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
365 365
366 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) 366 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
367 367
368 struct dyn_ftrace { 368 struct dyn_ftrace {
369 unsigned long ip; /* address of mcount call-site */ 369 unsigned long ip; /* address of mcount call-site */
370 unsigned long flags; 370 unsigned long flags;
371 struct dyn_arch_ftrace arch; 371 struct dyn_arch_ftrace arch;
372 }; 372 };
373 373
374 int ftrace_force_update(void); 374 int ftrace_force_update(void);
375 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 375 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
376 int remove, int reset); 376 int remove, int reset);
377 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 377 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
378 int len, int reset); 378 int len, int reset);
379 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 379 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
380 int len, int reset); 380 int len, int reset);
381 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 381 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
382 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 382 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
383 void ftrace_free_filter(struct ftrace_ops *ops); 383 void ftrace_free_filter(struct ftrace_ops *ops);
384 384
385 int register_ftrace_command(struct ftrace_func_command *cmd); 385 int register_ftrace_command(struct ftrace_func_command *cmd);
386 int unregister_ftrace_command(struct ftrace_func_command *cmd); 386 int unregister_ftrace_command(struct ftrace_func_command *cmd);
387 387
388 enum { 388 enum {
389 FTRACE_UPDATE_CALLS = (1 << 0), 389 FTRACE_UPDATE_CALLS = (1 << 0),
390 FTRACE_DISABLE_CALLS = (1 << 1), 390 FTRACE_DISABLE_CALLS = (1 << 1),
391 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 391 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
392 FTRACE_START_FUNC_RET = (1 << 3), 392 FTRACE_START_FUNC_RET = (1 << 3),
393 FTRACE_STOP_FUNC_RET = (1 << 4), 393 FTRACE_STOP_FUNC_RET = (1 << 4),
394 }; 394 };
395 395
396 /* 396 /*
397 * The FTRACE_UPDATE_* enum is used to pass information back 397 * The FTRACE_UPDATE_* enum is used to pass information back
398 * from the ftrace_update_record() and ftrace_test_record() 398 * from the ftrace_update_record() and ftrace_test_record()
399 * functions. These are called by the code update routines 399 * functions. These are called by the code update routines
400 * to find out what is to be done for a given function. 400 * to find out what is to be done for a given function.
401 * 401 *
402 * IGNORE - The function is already what we want it to be 402 * IGNORE - The function is already what we want it to be
403 * MAKE_CALL - Start tracing the function 403 * MAKE_CALL - Start tracing the function
404 * MODIFY_CALL - Stop saving regs for the function 404 * MODIFY_CALL - Stop saving regs for the function
405 * MAKE_NOP - Stop tracing the function 405 * MAKE_NOP - Stop tracing the function
406 */ 406 */
407 enum { 407 enum {
408 FTRACE_UPDATE_IGNORE, 408 FTRACE_UPDATE_IGNORE,
409 FTRACE_UPDATE_MAKE_CALL, 409 FTRACE_UPDATE_MAKE_CALL,
410 FTRACE_UPDATE_MODIFY_CALL, 410 FTRACE_UPDATE_MODIFY_CALL,
411 FTRACE_UPDATE_MAKE_NOP, 411 FTRACE_UPDATE_MAKE_NOP,
412 }; 412 };
413 413
414 enum { 414 enum {
415 FTRACE_ITER_FILTER = (1 << 0), 415 FTRACE_ITER_FILTER = (1 << 0),
416 FTRACE_ITER_NOTRACE = (1 << 1), 416 FTRACE_ITER_NOTRACE = (1 << 1),
417 FTRACE_ITER_PRINTALL = (1 << 2), 417 FTRACE_ITER_PRINTALL = (1 << 2),
418 FTRACE_ITER_DO_HASH = (1 << 3), 418 FTRACE_ITER_DO_HASH = (1 << 3),
419 FTRACE_ITER_HASH = (1 << 4), 419 FTRACE_ITER_HASH = (1 << 4),
420 FTRACE_ITER_ENABLED = (1 << 5), 420 FTRACE_ITER_ENABLED = (1 << 5),
421 }; 421 };
422 422
423 void arch_ftrace_update_code(int command); 423 void arch_ftrace_update_code(int command);
424 424
425 struct ftrace_rec_iter; 425 struct ftrace_rec_iter;
426 426
427 struct ftrace_rec_iter *ftrace_rec_iter_start(void); 427 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
428 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 428 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
429 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 429 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
430 430
431 #define for_ftrace_rec_iter(iter) \ 431 #define for_ftrace_rec_iter(iter) \
432 for (iter = ftrace_rec_iter_start(); \ 432 for (iter = ftrace_rec_iter_start(); \
433 iter; \ 433 iter; \
434 iter = ftrace_rec_iter_next(iter)) 434 iter = ftrace_rec_iter_next(iter))
435 435
436 436
437 int ftrace_update_record(struct dyn_ftrace *rec, int enable); 437 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
438 int ftrace_test_record(struct dyn_ftrace *rec, int enable); 438 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
439 void ftrace_run_stop_machine(int command); 439 void ftrace_run_stop_machine(int command);
440 unsigned long ftrace_location(unsigned long ip); 440 unsigned long ftrace_location(unsigned long ip);
441 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); 441 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
442 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); 442 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
443 443
444 extern ftrace_func_t ftrace_trace_function; 444 extern ftrace_func_t ftrace_trace_function;
445 445
446 int ftrace_regex_open(struct ftrace_ops *ops, int flag, 446 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
447 struct inode *inode, struct file *file); 447 struct inode *inode, struct file *file);
448 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 448 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
449 size_t cnt, loff_t *ppos); 449 size_t cnt, loff_t *ppos);
450 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 450 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
451 size_t cnt, loff_t *ppos); 451 size_t cnt, loff_t *ppos);
452 int ftrace_regex_release(struct inode *inode, struct file *file); 452 int ftrace_regex_release(struct inode *inode, struct file *file);
453 453
454 void __init 454 void __init
455 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 455 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
456 456
457 /* defined in arch */ 457 /* defined in arch */
458 extern int ftrace_ip_converted(unsigned long ip); 458 extern int ftrace_ip_converted(unsigned long ip);
459 extern int ftrace_dyn_arch_init(void); 459 extern int ftrace_dyn_arch_init(void);
460 extern void ftrace_replace_code(int enable); 460 extern void ftrace_replace_code(int enable);
461 extern int ftrace_update_ftrace_func(ftrace_func_t func); 461 extern int ftrace_update_ftrace_func(ftrace_func_t func);
462 extern void ftrace_caller(void); 462 extern void ftrace_caller(void);
463 extern void ftrace_regs_caller(void); 463 extern void ftrace_regs_caller(void);
464 extern void ftrace_call(void); 464 extern void ftrace_call(void);
465 extern void ftrace_regs_call(void); 465 extern void ftrace_regs_call(void);
466 extern void mcount_call(void); 466 extern void mcount_call(void);
467 467
468 void ftrace_modify_all_code(int command); 468 void ftrace_modify_all_code(int command);
469 469
470 #ifndef FTRACE_ADDR 470 #ifndef FTRACE_ADDR
471 #define FTRACE_ADDR ((unsigned long)ftrace_caller) 471 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
472 #endif 472 #endif
473 473
474 #ifndef FTRACE_GRAPH_ADDR 474 #ifndef FTRACE_GRAPH_ADDR
475 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) 475 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
476 #endif 476 #endif
477 477
478 #ifndef FTRACE_REGS_ADDR 478 #ifndef FTRACE_REGS_ADDR
479 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 479 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
480 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 480 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
481 #else 481 #else
482 # define FTRACE_REGS_ADDR FTRACE_ADDR 482 # define FTRACE_REGS_ADDR FTRACE_ADDR
483 #endif 483 #endif
484 #endif 484 #endif
485 485
486 /* 486 /*
487 * If an arch would like functions that are only traced 487 * If an arch would like functions that are only traced
488 * by the function graph tracer to jump directly to its own 488 * by the function graph tracer to jump directly to its own
489 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR 489 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
490 * to be that address to jump to. 490 * to be that address to jump to.
491 */ 491 */
492 #ifndef FTRACE_GRAPH_TRAMP_ADDR 492 #ifndef FTRACE_GRAPH_TRAMP_ADDR
493 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) 493 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
494 #endif 494 #endif
495 495
496 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 496 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
497 extern void ftrace_graph_caller(void); 497 extern void ftrace_graph_caller(void);
498 extern int ftrace_enable_ftrace_graph_caller(void); 498 extern int ftrace_enable_ftrace_graph_caller(void);
499 extern int ftrace_disable_ftrace_graph_caller(void); 499 extern int ftrace_disable_ftrace_graph_caller(void);
500 #else 500 #else
501 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 501 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
502 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 502 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
503 #endif 503 #endif
504 504
505 /** 505 /**
506 * ftrace_make_nop - convert code into nop 506 * ftrace_make_nop - convert code into nop
507 * @mod: module structure if called by module load initialization 507 * @mod: module structure if called by module load initialization
508 * @rec: the mcount call site record 508 * @rec: the mcount call site record
509 * @addr: the address that the call site should be calling 509 * @addr: the address that the call site should be calling
510 * 510 *
511 * This is a very sensitive operation and great care needs 511 * This is a very sensitive operation and great care needs
512 * to be taken by the arch. The operation should carefully 512 * to be taken by the arch. The operation should carefully
513 * read the location, check to see if what is read is indeed 513 * read the location, check to see if what is read is indeed
514 * what we expect it to be, and then on success of the compare, 514 * what we expect it to be, and then on success of the compare,
515 * it should write to the location. 515 * it should write to the location.
516 * 516 *
517 * The code segment at @rec->ip should be a caller to @addr 517 * The code segment at @rec->ip should be a caller to @addr
518 * 518 *
519 * Return must be: 519 * Return must be:
520 * 0 on success 520 * 0 on success
521 * -EFAULT on error reading the location 521 * -EFAULT on error reading the location
522 * -EINVAL on a failed compare of the contents 522 * -EINVAL on a failed compare of the contents
523 * -EPERM on error writing to the location 523 * -EPERM on error writing to the location
524 * Any other value will be considered a failure. 524 * Any other value will be considered a failure.
525 */ 525 */
526 extern int ftrace_make_nop(struct module *mod, 526 extern int ftrace_make_nop(struct module *mod,
527 struct dyn_ftrace *rec, unsigned long addr); 527 struct dyn_ftrace *rec, unsigned long addr);
528 528
529 /** 529 /**
530 * ftrace_make_call - convert a nop call site into a call to addr 530 * ftrace_make_call - convert a nop call site into a call to addr
531 * @rec: the mcount call site record 531 * @rec: the mcount call site record
532 * @addr: the address that the call site should call 532 * @addr: the address that the call site should call
533 * 533 *
534 * This is a very sensitive operation and great care needs 534 * This is a very sensitive operation and great care needs
535 * to be taken by the arch. The operation should carefully 535 * to be taken by the arch. The operation should carefully
536 * read the location, check to see if what is read is indeed 536 * read the location, check to see if what is read is indeed
537 * what we expect it to be, and then on success of the compare, 537 * what we expect it to be, and then on success of the compare,
538 * it should write to the location. 538 * it should write to the location.
539 * 539 *
540 * The code segment at @rec->ip should be a nop 540 * The code segment at @rec->ip should be a nop
541 * 541 *
542 * Return must be: 542 * Return must be:
543 * 0 on success 543 * 0 on success
544 * -EFAULT on error reading the location 544 * -EFAULT on error reading the location
545 * -EINVAL on a failed compare of the contents 545 * -EINVAL on a failed compare of the contents
546 * -EPERM on error writing to the location 546 * -EPERM on error writing to the location
547 * Any other value will be considered a failure. 547 * Any other value will be considered a failure.
548 */ 548 */
549 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 549 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
550 550
551 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 551 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
552 /** 552 /**
553 * ftrace_modify_call - convert from one addr to another (no nop) 553 * ftrace_modify_call - convert from one addr to another (no nop)
554 * @rec: the mcount call site record 554 * @rec: the mcount call site record
555 * @old_addr: the address expected to be currently called to 555 * @old_addr: the address expected to be currently called to
556 * @addr: the address to change to 556 * @addr: the address to change to
557 * 557 *
558 * This is a very sensitive operation and great care needs 558 * This is a very sensitive operation and great care needs
559 * to be taken by the arch. The operation should carefully 559 * to be taken by the arch. The operation should carefully
560 * read the location, check to see if what is read is indeed 560 * read the location, check to see if what is read is indeed
561 * what we expect it to be, and then on success of the compare, 561 * what we expect it to be, and then on success of the compare,
562 * it should write to the location. 562 * it should write to the location.
563 * 563 *
564 * The code segment at @rec->ip should be a caller to @old_addr 564 * The code segment at @rec->ip should be a caller to @old_addr
565 * 565 *
566 * Return must be: 566 * Return must be:
567 * 0 on success 567 * 0 on success
568 * -EFAULT on error reading the location 568 * -EFAULT on error reading the location
569 * -EINVAL on a failed compare of the contents 569 * -EINVAL on a failed compare of the contents
570 * -EPERM on error writing to the location 570 * -EPERM on error writing to the location
571 * Any other value will be considered a failure. 571 * Any other value will be considered a failure.
572 */ 572 */
573 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 573 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
574 unsigned long addr); 574 unsigned long addr);
575 #else 575 #else
576 /* Should never be called */ 576 /* Should never be called */
577 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 577 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
578 unsigned long addr) 578 unsigned long addr)
579 { 579 {
580 return -EINVAL; 580 return -EINVAL;
581 } 581 }
582 #endif 582 #endif
583 583
584 /* May be defined in arch */ 584 /* May be defined in arch */
585 extern int ftrace_arch_read_dyn_info(char *buf, int size); 585 extern int ftrace_arch_read_dyn_info(char *buf, int size);
586 586
587 extern int skip_trace(unsigned long ip); 587 extern int skip_trace(unsigned long ip);
588 extern void ftrace_module_init(struct module *mod); 588 extern void ftrace_module_init(struct module *mod);
589 extern void ftrace_release_mod(struct module *mod);
589 590
590 extern void ftrace_disable_daemon(void); 591 extern void ftrace_disable_daemon(void);
591 extern void ftrace_enable_daemon(void); 592 extern void ftrace_enable_daemon(void);
592 #else /* CONFIG_DYNAMIC_FTRACE */ 593 #else /* CONFIG_DYNAMIC_FTRACE */
593 static inline int skip_trace(unsigned long ip) { return 0; } 594 static inline int skip_trace(unsigned long ip) { return 0; }
594 static inline int ftrace_force_update(void) { return 0; } 595 static inline int ftrace_force_update(void) { return 0; }
595 static inline void ftrace_disable_daemon(void) { } 596 static inline void ftrace_disable_daemon(void) { }
596 static inline void ftrace_enable_daemon(void) { } 597 static inline void ftrace_enable_daemon(void) { }
597 static inline void ftrace_release_mod(struct module *mod) {} 598 static inline void ftrace_release_mod(struct module *mod) {}
598 static inline void ftrace_module_init(struct module *mod) {} 599 static inline void ftrace_module_init(struct module *mod) {}
599 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 600 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
600 { 601 {
601 return -EINVAL; 602 return -EINVAL;
602 } 603 }
603 static inline __init int unregister_ftrace_command(char *cmd_name) 604 static inline __init int unregister_ftrace_command(char *cmd_name)
604 { 605 {
605 return -EINVAL; 606 return -EINVAL;
606 } 607 }
607 static inline int ftrace_text_reserved(const void *start, const void *end) 608 static inline int ftrace_text_reserved(const void *start, const void *end)
608 { 609 {
609 return 0; 610 return 0;
610 } 611 }
611 static inline unsigned long ftrace_location(unsigned long ip) 612 static inline unsigned long ftrace_location(unsigned long ip)
612 { 613 {
613 return 0; 614 return 0;
614 } 615 }
615 616
616 /* 617 /*
617 * Again users of functions that have ftrace_ops may not 618 * Again users of functions that have ftrace_ops may not
618 * have them defined when ftrace is not enabled, but these 619 * have them defined when ftrace is not enabled, but these
619 * functions may still be called. Use a macro instead of inline. 620 * functions may still be called. Use a macro instead of inline.
620 */ 621 */
621 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 622 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
622 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 623 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
623 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) 624 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
624 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 625 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
625 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 626 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
626 #define ftrace_free_filter(ops) do { } while (0) 627 #define ftrace_free_filter(ops) do { } while (0)
627 628
628 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 629 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
629 size_t cnt, loff_t *ppos) { return -ENODEV; } 630 size_t cnt, loff_t *ppos) { return -ENODEV; }
630 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 631 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
631 size_t cnt, loff_t *ppos) { return -ENODEV; } 632 size_t cnt, loff_t *ppos) { return -ENODEV; }
632 static inline int 633 static inline int
633 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 634 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
634 635
635 static inline bool is_ftrace_trampoline(unsigned long addr) 636 static inline bool is_ftrace_trampoline(unsigned long addr)
636 { 637 {
637 return false; 638 return false;
638 } 639 }
639 #endif /* CONFIG_DYNAMIC_FTRACE */ 640 #endif /* CONFIG_DYNAMIC_FTRACE */
640 641
641 /* totally disable ftrace - can not re-enable after this */ 642 /* totally disable ftrace - can not re-enable after this */
642 void ftrace_kill(void); 643 void ftrace_kill(void);
643 644
644 static inline void tracer_disable(void) 645 static inline void tracer_disable(void)
645 { 646 {
646 #ifdef CONFIG_FUNCTION_TRACER 647 #ifdef CONFIG_FUNCTION_TRACER
647 ftrace_enabled = 0; 648 ftrace_enabled = 0;
648 #endif 649 #endif
649 } 650 }
650 651
651 /* 652 /*
652 * Ftrace disable/restore without lock. Some synchronization mechanism 653 * Ftrace disable/restore without lock. Some synchronization mechanism
653 * must be used to prevent ftrace_enabled to be changed between 654 * must be used to prevent ftrace_enabled to be changed between
654 * disable/restore. 655 * disable/restore.
655 */ 656 */
656 static inline int __ftrace_enabled_save(void) 657 static inline int __ftrace_enabled_save(void)
657 { 658 {
658 #ifdef CONFIG_FUNCTION_TRACER 659 #ifdef CONFIG_FUNCTION_TRACER
659 int saved_ftrace_enabled = ftrace_enabled; 660 int saved_ftrace_enabled = ftrace_enabled;
660 ftrace_enabled = 0; 661 ftrace_enabled = 0;
661 return saved_ftrace_enabled; 662 return saved_ftrace_enabled;
662 #else 663 #else
663 return 0; 664 return 0;
664 #endif 665 #endif
665 } 666 }
666 667
667 static inline void __ftrace_enabled_restore(int enabled) 668 static inline void __ftrace_enabled_restore(int enabled)
668 { 669 {
669 #ifdef CONFIG_FUNCTION_TRACER 670 #ifdef CONFIG_FUNCTION_TRACER
670 ftrace_enabled = enabled; 671 ftrace_enabled = enabled;
671 #endif 672 #endif
672 } 673 }
673 674
674 /* All archs should have this, but we define it for consistency */ 675 /* All archs should have this, but we define it for consistency */
675 #ifndef ftrace_return_address0 676 #ifndef ftrace_return_address0
676 # define ftrace_return_address0 __builtin_return_address(0) 677 # define ftrace_return_address0 __builtin_return_address(0)
677 #endif 678 #endif
678 679
679 /* Archs may use other ways for ADDR1 and beyond */ 680 /* Archs may use other ways for ADDR1 and beyond */
680 #ifndef ftrace_return_address 681 #ifndef ftrace_return_address
681 # ifdef CONFIG_FRAME_POINTER 682 # ifdef CONFIG_FRAME_POINTER
682 # define ftrace_return_address(n) __builtin_return_address(n) 683 # define ftrace_return_address(n) __builtin_return_address(n)
683 # else 684 # else
684 # define ftrace_return_address(n) 0UL 685 # define ftrace_return_address(n) 0UL
685 # endif 686 # endif
686 #endif 687 #endif
687 688
688 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) 689 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
689 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) 690 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
690 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) 691 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
691 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) 692 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
692 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) 693 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
693 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 694 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
694 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 695 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
695 696
696 #ifdef CONFIG_IRQSOFF_TRACER 697 #ifdef CONFIG_IRQSOFF_TRACER
697 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 698 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
698 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 699 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
699 #else 700 #else
700 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } 701 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
701 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } 702 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
702 #endif 703 #endif
703 704
704 #ifdef CONFIG_PREEMPT_TRACER 705 #ifdef CONFIG_PREEMPT_TRACER
705 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 706 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
706 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 707 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
707 #else 708 #else
708 /* 709 /*
709 * Use defines instead of static inlines because some arches will make code out 710 * Use defines instead of static inlines because some arches will make code out
710 * of the CALLER_ADDR, when we really want these to be a real nop. 711 * of the CALLER_ADDR, when we really want these to be a real nop.
711 */ 712 */
712 # define trace_preempt_on(a0, a1) do { } while (0) 713 # define trace_preempt_on(a0, a1) do { } while (0)
713 # define trace_preempt_off(a0, a1) do { } while (0) 714 # define trace_preempt_off(a0, a1) do { } while (0)
714 #endif 715 #endif
715 716
716 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 717 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
717 extern void ftrace_init(void); 718 extern void ftrace_init(void);
718 #else 719 #else
719 static inline void ftrace_init(void) { } 720 static inline void ftrace_init(void) { }
720 #endif 721 #endif
721 722
722 /* 723 /*
723 * Structure that defines an entry function trace. 724 * Structure that defines an entry function trace.
724 */ 725 */
725 struct ftrace_graph_ent { 726 struct ftrace_graph_ent {
726 unsigned long func; /* Current function */ 727 unsigned long func; /* Current function */
727 int depth; 728 int depth;
728 }; 729 };
729 730
730 /* 731 /*
731 * Structure that defines a return function trace. 732 * Structure that defines a return function trace.
732 */ 733 */
733 struct ftrace_graph_ret { 734 struct ftrace_graph_ret {
734 unsigned long func; /* Current function */ 735 unsigned long func; /* Current function */
735 unsigned long long calltime; 736 unsigned long long calltime;
736 unsigned long long rettime; 737 unsigned long long rettime;
737 /* Number of functions that overran the depth limit for current task */ 738 /* Number of functions that overran the depth limit for current task */
738 unsigned long overrun; 739 unsigned long overrun;
739 int depth; 740 int depth;
740 }; 741 };
741 742
742 /* Type of the callback handlers for tracing function graph*/ 743 /* Type of the callback handlers for tracing function graph*/
743 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 744 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
744 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 745 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
745 746
746 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 747 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
747 748
748 /* for init task */ 749 /* for init task */
749 #define INIT_FTRACE_GRAPH .ret_stack = NULL, 750 #define INIT_FTRACE_GRAPH .ret_stack = NULL,
750 751
751 /* 752 /*
752 * Stack of return addresses for functions 753 * Stack of return addresses for functions
753 * of a thread. 754 * of a thread.
754 * Used in struct thread_info 755 * Used in struct thread_info
755 */ 756 */
756 struct ftrace_ret_stack { 757 struct ftrace_ret_stack {
757 unsigned long ret; 758 unsigned long ret;
758 unsigned long func; 759 unsigned long func;
759 unsigned long long calltime; 760 unsigned long long calltime;
760 unsigned long long subtime; 761 unsigned long long subtime;
761 unsigned long fp; 762 unsigned long fp;
762 }; 763 };
763 764
764 /* 765 /*
765 * Primary handler of a function return. 766 * Primary handler of a function return.
766 * It relays on ftrace_return_to_handler. 767 * It relays on ftrace_return_to_handler.
767 * Defined in entry_32/64.S 768 * Defined in entry_32/64.S
768 */ 769 */
769 extern void return_to_handler(void); 770 extern void return_to_handler(void);
770 771
771 extern int 772 extern int
772 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 773 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
773 unsigned long frame_pointer); 774 unsigned long frame_pointer);
774 775
775 /* 776 /*
776 * Sometimes we don't want to trace a function with the function 777 * Sometimes we don't want to trace a function with the function
777 * graph tracer but we want them to keep traced by the usual function 778 * graph tracer but we want them to keep traced by the usual function
778 * tracer if the function graph tracer is not configured. 779 * tracer if the function graph tracer is not configured.
779 */ 780 */
780 #define __notrace_funcgraph notrace 781 #define __notrace_funcgraph notrace
781 782
782 /* 783 /*
783 * We want to which function is an entrypoint of a hardirq. 784 * We want to which function is an entrypoint of a hardirq.
784 * That will help us to put a signal on output. 785 * That will help us to put a signal on output.
785 */ 786 */
786 #define __irq_entry __attribute__((__section__(".irqentry.text"))) 787 #define __irq_entry __attribute__((__section__(".irqentry.text")))
787 788
788 /* Limits of hardirq entrypoints */ 789 /* Limits of hardirq entrypoints */
789 extern char __irqentry_text_start[]; 790 extern char __irqentry_text_start[];
790 extern char __irqentry_text_end[]; 791 extern char __irqentry_text_end[];
791 792
792 #define FTRACE_NOTRACE_DEPTH 65536 793 #define FTRACE_NOTRACE_DEPTH 65536
793 #define FTRACE_RETFUNC_DEPTH 50 794 #define FTRACE_RETFUNC_DEPTH 50
794 #define FTRACE_RETSTACK_ALLOC_SIZE 32 795 #define FTRACE_RETSTACK_ALLOC_SIZE 32
795 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 796 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
796 trace_func_graph_ent_t entryfunc); 797 trace_func_graph_ent_t entryfunc);
797 798
798 extern bool ftrace_graph_is_dead(void); 799 extern bool ftrace_graph_is_dead(void);
799 extern void ftrace_graph_stop(void); 800 extern void ftrace_graph_stop(void);
800 801
801 /* The current handlers in use */ 802 /* The current handlers in use */
802 extern trace_func_graph_ret_t ftrace_graph_return; 803 extern trace_func_graph_ret_t ftrace_graph_return;
803 extern trace_func_graph_ent_t ftrace_graph_entry; 804 extern trace_func_graph_ent_t ftrace_graph_entry;
804 805
805 extern void unregister_ftrace_graph(void); 806 extern void unregister_ftrace_graph(void);
806 807
807 extern void ftrace_graph_init_task(struct task_struct *t); 808 extern void ftrace_graph_init_task(struct task_struct *t);
808 extern void ftrace_graph_exit_task(struct task_struct *t); 809 extern void ftrace_graph_exit_task(struct task_struct *t);
809 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 810 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
810 811
811 static inline int task_curr_ret_stack(struct task_struct *t) 812 static inline int task_curr_ret_stack(struct task_struct *t)
812 { 813 {
813 return t->curr_ret_stack; 814 return t->curr_ret_stack;
814 } 815 }
815 816
816 static inline void pause_graph_tracing(void) 817 static inline void pause_graph_tracing(void)
817 { 818 {
818 atomic_inc(&current->tracing_graph_pause); 819 atomic_inc(&current->tracing_graph_pause);
819 } 820 }
820 821
821 static inline void unpause_graph_tracing(void) 822 static inline void unpause_graph_tracing(void)
822 { 823 {
823 atomic_dec(&current->tracing_graph_pause); 824 atomic_dec(&current->tracing_graph_pause);
824 } 825 }
825 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 826 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
826 827
827 #define __notrace_funcgraph 828 #define __notrace_funcgraph
828 #define __irq_entry 829 #define __irq_entry
829 #define INIT_FTRACE_GRAPH 830 #define INIT_FTRACE_GRAPH
830 831
831 static inline void ftrace_graph_init_task(struct task_struct *t) { } 832 static inline void ftrace_graph_init_task(struct task_struct *t) { }
832 static inline void ftrace_graph_exit_task(struct task_struct *t) { } 833 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
833 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 834 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
834 835
835 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, 836 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
836 trace_func_graph_ent_t entryfunc) 837 trace_func_graph_ent_t entryfunc)
837 { 838 {
838 return -1; 839 return -1;
839 } 840 }
840 static inline void unregister_ftrace_graph(void) { } 841 static inline void unregister_ftrace_graph(void) { }
841 842
842 static inline int task_curr_ret_stack(struct task_struct *tsk) 843 static inline int task_curr_ret_stack(struct task_struct *tsk)
843 { 844 {
844 return -1; 845 return -1;
845 } 846 }
846 847
847 static inline void pause_graph_tracing(void) { } 848 static inline void pause_graph_tracing(void) { }
848 static inline void unpause_graph_tracing(void) { } 849 static inline void unpause_graph_tracing(void) { }
849 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 850 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
850 851
851 #ifdef CONFIG_TRACING 852 #ifdef CONFIG_TRACING
852 853
853 /* flags for current->trace */ 854 /* flags for current->trace */
854 enum { 855 enum {
855 TSK_TRACE_FL_TRACE_BIT = 0, 856 TSK_TRACE_FL_TRACE_BIT = 0,
856 TSK_TRACE_FL_GRAPH_BIT = 1, 857 TSK_TRACE_FL_GRAPH_BIT = 1,
857 }; 858 };
858 enum { 859 enum {
859 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 860 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
860 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 861 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
861 }; 862 };
862 863
863 static inline void set_tsk_trace_trace(struct task_struct *tsk) 864 static inline void set_tsk_trace_trace(struct task_struct *tsk)
864 { 865 {
865 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 866 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
866 } 867 }
867 868
868 static inline void clear_tsk_trace_trace(struct task_struct *tsk) 869 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
869 { 870 {
870 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 871 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
871 } 872 }
872 873
873 static inline int test_tsk_trace_trace(struct task_struct *tsk) 874 static inline int test_tsk_trace_trace(struct task_struct *tsk)
874 { 875 {
875 return tsk->trace & TSK_TRACE_FL_TRACE; 876 return tsk->trace & TSK_TRACE_FL_TRACE;
876 } 877 }
877 878
878 static inline void set_tsk_trace_graph(struct task_struct *tsk) 879 static inline void set_tsk_trace_graph(struct task_struct *tsk)
879 { 880 {
880 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 881 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
881 } 882 }
882 883
883 static inline void clear_tsk_trace_graph(struct task_struct *tsk) 884 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
884 { 885 {
885 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 886 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
886 } 887 }
887 888
888 static inline int test_tsk_trace_graph(struct task_struct *tsk) 889 static inline int test_tsk_trace_graph(struct task_struct *tsk)
889 { 890 {
890 return tsk->trace & TSK_TRACE_FL_GRAPH; 891 return tsk->trace & TSK_TRACE_FL_GRAPH;
891 } 892 }
892 893
893 enum ftrace_dump_mode; 894 enum ftrace_dump_mode;
894 895
895 extern enum ftrace_dump_mode ftrace_dump_on_oops; 896 extern enum ftrace_dump_mode ftrace_dump_on_oops;
896 extern int tracepoint_printk; 897 extern int tracepoint_printk;
897 898
898 extern void disable_trace_on_warning(void); 899 extern void disable_trace_on_warning(void);
899 extern int __disable_trace_on_warning; 900 extern int __disable_trace_on_warning;
900 901
901 #ifdef CONFIG_PREEMPT 902 #ifdef CONFIG_PREEMPT
902 #define INIT_TRACE_RECURSION .trace_recursion = 0, 903 #define INIT_TRACE_RECURSION .trace_recursion = 0,
903 #endif 904 #endif
904 905
905 #else /* CONFIG_TRACING */ 906 #else /* CONFIG_TRACING */
906 static inline void disable_trace_on_warning(void) { } 907 static inline void disable_trace_on_warning(void) { }
907 #endif /* CONFIG_TRACING */ 908 #endif /* CONFIG_TRACING */
908 909
909 #ifndef INIT_TRACE_RECURSION 910 #ifndef INIT_TRACE_RECURSION
910 #define INIT_TRACE_RECURSION 911 #define INIT_TRACE_RECURSION
911 #endif 912 #endif
912 913
913 #ifdef CONFIG_FTRACE_SYSCALLS 914 #ifdef CONFIG_FTRACE_SYSCALLS
914 915
915 unsigned long arch_syscall_addr(int nr); 916 unsigned long arch_syscall_addr(int nr);
916 917
917 #endif /* CONFIG_FTRACE_SYSCALLS */ 918 #endif /* CONFIG_FTRACE_SYSCALLS */
918 919
919 #endif /* _LINUX_FTRACE_H */ 920 #endif /* _LINUX_FTRACE_H */
920 921
1 /* 1 /*
2 Copyright (C) 2002 Richard Henderson 2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by 6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or 7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version. 8 (at your option) any later version.
9 9
10 This program is distributed in the hope that it will be useful, 10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details. 13 GNU General Public License for more details.
14 14
15 You should have received a copy of the GNU General Public License 15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software 16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 #include <linux/export.h> 19 #include <linux/export.h>
20 #include <linux/moduleloader.h> 20 #include <linux/moduleloader.h>
21 #include <linux/trace_events.h> 21 #include <linux/trace_events.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/kallsyms.h> 23 #include <linux/kallsyms.h>
24 #include <linux/file.h> 24 #include <linux/file.h>
25 #include <linux/fs.h> 25 #include <linux/fs.h>
26 #include <linux/sysfs.h> 26 #include <linux/sysfs.h>
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/slab.h> 28 #include <linux/slab.h>
29 #include <linux/vmalloc.h> 29 #include <linux/vmalloc.h>
30 #include <linux/elf.h> 30 #include <linux/elf.h>
31 #include <linux/proc_fs.h> 31 #include <linux/proc_fs.h>
32 #include <linux/security.h> 32 #include <linux/security.h>
33 #include <linux/seq_file.h> 33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h> 34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h> 35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h> 36 #include <linux/rcupdate.h>
37 #include <linux/capability.h> 37 #include <linux/capability.h>
38 #include <linux/cpu.h> 38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h> 39 #include <linux/moduleparam.h>
40 #include <linux/errno.h> 40 #include <linux/errno.h>
41 #include <linux/err.h> 41 #include <linux/err.h>
42 #include <linux/vermagic.h> 42 #include <linux/vermagic.h>
43 #include <linux/notifier.h> 43 #include <linux/notifier.h>
44 #include <linux/sched.h> 44 #include <linux/sched.h>
45 #include <linux/device.h> 45 #include <linux/device.h>
46 #include <linux/string.h> 46 #include <linux/string.h>
47 #include <linux/mutex.h> 47 #include <linux/mutex.h>
48 #include <linux/rculist.h> 48 #include <linux/rculist.h>
49 #include <asm/uaccess.h> 49 #include <asm/uaccess.h>
50 #include <asm/cacheflush.h> 50 #include <asm/cacheflush.h>
51 #include <asm/mmu_context.h> 51 #include <asm/mmu_context.h>
52 #include <linux/license.h> 52 #include <linux/license.h>
53 #include <asm/sections.h> 53 #include <asm/sections.h>
54 #include <linux/tracepoint.h> 54 #include <linux/tracepoint.h>
55 #include <linux/ftrace.h> 55 #include <linux/ftrace.h>
56 #include <linux/async.h> 56 #include <linux/async.h>
57 #include <linux/percpu.h> 57 #include <linux/percpu.h>
58 #include <linux/kmemleak.h> 58 #include <linux/kmemleak.h>
59 #include <linux/jump_label.h> 59 #include <linux/jump_label.h>
60 #include <linux/pfn.h> 60 #include <linux/pfn.h>
61 #include <linux/bsearch.h> 61 #include <linux/bsearch.h>
62 #include <uapi/linux/module.h> 62 #include <uapi/linux/module.h>
63 #include "module-internal.h" 63 #include "module-internal.h"
64 64
65 #define CREATE_TRACE_POINTS 65 #define CREATE_TRACE_POINTS
66 #include <trace/events/module.h> 66 #include <trace/events/module.h>
67 67
68 #ifndef ARCH_SHF_SMALL 68 #ifndef ARCH_SHF_SMALL
69 #define ARCH_SHF_SMALL 0 69 #define ARCH_SHF_SMALL 0
70 #endif 70 #endif
71 71
72 /* 72 /*
73 * Modules' sections will be aligned on page boundaries 73 * Modules' sections will be aligned on page boundaries
74 * to ensure complete separation of code and data, but 74 * to ensure complete separation of code and data, but
75 * only when CONFIG_DEBUG_SET_MODULE_RONX=y 75 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
76 */ 76 */
77 #ifdef CONFIG_DEBUG_SET_MODULE_RONX 77 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
78 # define debug_align(X) ALIGN(X, PAGE_SIZE) 78 # define debug_align(X) ALIGN(X, PAGE_SIZE)
79 #else 79 #else
80 # define debug_align(X) (X) 80 # define debug_align(X) (X)
81 #endif 81 #endif
82 82
83 /* 83 /*
84 * Given BASE and SIZE this macro calculates the number of pages the 84 * Given BASE and SIZE this macro calculates the number of pages the
85 * memory regions occupies 85 * memory regions occupies
86 */ 86 */
87 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ 87 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
88 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ 88 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
89 PFN_DOWN((unsigned long)BASE) + 1) \ 89 PFN_DOWN((unsigned long)BASE) + 1) \
90 : (0UL)) 90 : (0UL))
91 91
92 /* If this is set, the section belongs in the init part of the module */ 92 /* If this is set, the section belongs in the init part of the module */
93 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 93 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
94 94
95 /* 95 /*
96 * Mutex protects: 96 * Mutex protects:
97 * 1) List of modules (also safely readable with preempt_disable), 97 * 1) List of modules (also safely readable with preempt_disable),
98 * 2) module_use links, 98 * 2) module_use links,
99 * 3) module_addr_min/module_addr_max. 99 * 3) module_addr_min/module_addr_max.
100 * (delete and add uses RCU list operations). */ 100 * (delete and add uses RCU list operations). */
101 DEFINE_MUTEX(module_mutex); 101 DEFINE_MUTEX(module_mutex);
102 EXPORT_SYMBOL_GPL(module_mutex); 102 EXPORT_SYMBOL_GPL(module_mutex);
103 static LIST_HEAD(modules); 103 static LIST_HEAD(modules);
104 104
105 #ifdef CONFIG_MODULES_TREE_LOOKUP 105 #ifdef CONFIG_MODULES_TREE_LOOKUP
106 106
107 /* 107 /*
108 * Use a latched RB-tree for __module_address(); this allows us to use 108 * Use a latched RB-tree for __module_address(); this allows us to use
109 * RCU-sched lookups of the address from any context. 109 * RCU-sched lookups of the address from any context.
110 * 110 *
111 * Because modules have two address ranges: init and core, we need two 111 * Because modules have two address ranges: init and core, we need two
112 * latch_tree_nodes entries. Therefore we need the back-pointer from 112 * latch_tree_nodes entries. Therefore we need the back-pointer from
113 * mod_tree_node. 113 * mod_tree_node.
114 * 114 *
115 * Because init ranges are short lived we mark them unlikely and have placed 115 * Because init ranges are short lived we mark them unlikely and have placed
116 * them outside the critical cacheline in struct module. 116 * them outside the critical cacheline in struct module.
117 * 117 *
118 * This is conditional on PERF_EVENTS || TRACING because those can really hit 118 * This is conditional on PERF_EVENTS || TRACING because those can really hit
119 * __module_address() hard by doing a lot of stack unwinding; potentially from 119 * __module_address() hard by doing a lot of stack unwinding; potentially from
120 * NMI context. 120 * NMI context.
121 */ 121 */
122 122
123 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) 123 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
124 { 124 {
125 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); 125 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
126 struct module *mod = mtn->mod; 126 struct module *mod = mtn->mod;
127 127
128 if (unlikely(mtn == &mod->mtn_init)) 128 if (unlikely(mtn == &mod->mtn_init))
129 return (unsigned long)mod->module_init; 129 return (unsigned long)mod->module_init;
130 130
131 return (unsigned long)mod->module_core; 131 return (unsigned long)mod->module_core;
132 } 132 }
133 133
134 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) 134 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
135 { 135 {
136 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); 136 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
137 struct module *mod = mtn->mod; 137 struct module *mod = mtn->mod;
138 138
139 if (unlikely(mtn == &mod->mtn_init)) 139 if (unlikely(mtn == &mod->mtn_init))
140 return (unsigned long)mod->init_size; 140 return (unsigned long)mod->init_size;
141 141
142 return (unsigned long)mod->core_size; 142 return (unsigned long)mod->core_size;
143 } 143 }
144 144
145 static __always_inline bool 145 static __always_inline bool
146 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) 146 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
147 { 147 {
148 return __mod_tree_val(a) < __mod_tree_val(b); 148 return __mod_tree_val(a) < __mod_tree_val(b);
149 } 149 }
150 150
151 static __always_inline int 151 static __always_inline int
152 mod_tree_comp(void *key, struct latch_tree_node *n) 152 mod_tree_comp(void *key, struct latch_tree_node *n)
153 { 153 {
154 unsigned long val = (unsigned long)key; 154 unsigned long val = (unsigned long)key;
155 unsigned long start, end; 155 unsigned long start, end;
156 156
157 start = __mod_tree_val(n); 157 start = __mod_tree_val(n);
158 if (val < start) 158 if (val < start)
159 return -1; 159 return -1;
160 160
161 end = start + __mod_tree_size(n); 161 end = start + __mod_tree_size(n);
162 if (val >= end) 162 if (val >= end)
163 return 1; 163 return 1;
164 164
165 return 0; 165 return 0;
166 } 166 }
167 167
168 static const struct latch_tree_ops mod_tree_ops = { 168 static const struct latch_tree_ops mod_tree_ops = {
169 .less = mod_tree_less, 169 .less = mod_tree_less,
170 .comp = mod_tree_comp, 170 .comp = mod_tree_comp,
171 }; 171 };
172 172
173 static struct mod_tree_root { 173 static struct mod_tree_root {
174 struct latch_tree_root root; 174 struct latch_tree_root root;
175 unsigned long addr_min; 175 unsigned long addr_min;
176 unsigned long addr_max; 176 unsigned long addr_max;
177 } mod_tree __cacheline_aligned = { 177 } mod_tree __cacheline_aligned = {
178 .addr_min = -1UL, 178 .addr_min = -1UL,
179 }; 179 };
180 180
181 #define module_addr_min mod_tree.addr_min 181 #define module_addr_min mod_tree.addr_min
182 #define module_addr_max mod_tree.addr_max 182 #define module_addr_max mod_tree.addr_max
183 183
184 static noinline void __mod_tree_insert(struct mod_tree_node *node) 184 static noinline void __mod_tree_insert(struct mod_tree_node *node)
185 { 185 {
186 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); 186 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
187 } 187 }
188 188
189 static void __mod_tree_remove(struct mod_tree_node *node) 189 static void __mod_tree_remove(struct mod_tree_node *node)
190 { 190 {
191 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); 191 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
192 } 192 }
193 193
194 /* 194 /*
195 * These modifications: insert, remove_init and remove; are serialized by the 195 * These modifications: insert, remove_init and remove; are serialized by the
196 * module_mutex. 196 * module_mutex.
197 */ 197 */
198 static void mod_tree_insert(struct module *mod) 198 static void mod_tree_insert(struct module *mod)
199 { 199 {
200 mod->mtn_core.mod = mod; 200 mod->mtn_core.mod = mod;
201 mod->mtn_init.mod = mod; 201 mod->mtn_init.mod = mod;
202 202
203 __mod_tree_insert(&mod->mtn_core); 203 __mod_tree_insert(&mod->mtn_core);
204 if (mod->init_size) 204 if (mod->init_size)
205 __mod_tree_insert(&mod->mtn_init); 205 __mod_tree_insert(&mod->mtn_init);
206 } 206 }
207 207
208 static void mod_tree_remove_init(struct module *mod) 208 static void mod_tree_remove_init(struct module *mod)
209 { 209 {
210 if (mod->init_size) 210 if (mod->init_size)
211 __mod_tree_remove(&mod->mtn_init); 211 __mod_tree_remove(&mod->mtn_init);
212 } 212 }
213 213
214 static void mod_tree_remove(struct module *mod) 214 static void mod_tree_remove(struct module *mod)
215 { 215 {
216 __mod_tree_remove(&mod->mtn_core); 216 __mod_tree_remove(&mod->mtn_core);
217 mod_tree_remove_init(mod); 217 mod_tree_remove_init(mod);
218 } 218 }
219 219
220 static struct module *mod_find(unsigned long addr) 220 static struct module *mod_find(unsigned long addr)
221 { 221 {
222 struct latch_tree_node *ltn; 222 struct latch_tree_node *ltn;
223 223
224 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); 224 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
225 if (!ltn) 225 if (!ltn)
226 return NULL; 226 return NULL;
227 227
228 return container_of(ltn, struct mod_tree_node, node)->mod; 228 return container_of(ltn, struct mod_tree_node, node)->mod;
229 } 229 }
230 230
231 #else /* MODULES_TREE_LOOKUP */ 231 #else /* MODULES_TREE_LOOKUP */
232 232
233 static unsigned long module_addr_min = -1UL, module_addr_max = 0; 233 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
234 234
235 static void mod_tree_insert(struct module *mod) { } 235 static void mod_tree_insert(struct module *mod) { }
236 static void mod_tree_remove_init(struct module *mod) { } 236 static void mod_tree_remove_init(struct module *mod) { }
237 static void mod_tree_remove(struct module *mod) { } 237 static void mod_tree_remove(struct module *mod) { }
238 238
239 static struct module *mod_find(unsigned long addr) 239 static struct module *mod_find(unsigned long addr)
240 { 240 {
241 struct module *mod; 241 struct module *mod;
242 242
243 list_for_each_entry_rcu(mod, &modules, list) { 243 list_for_each_entry_rcu(mod, &modules, list) {
244 if (within_module(addr, mod)) 244 if (within_module(addr, mod))
245 return mod; 245 return mod;
246 } 246 }
247 247
248 return NULL; 248 return NULL;
249 } 249 }
250 250
251 #endif /* MODULES_TREE_LOOKUP */ 251 #endif /* MODULES_TREE_LOOKUP */
252 252
253 /* 253 /*
254 * Bounds of module text, for speeding up __module_address. 254 * Bounds of module text, for speeding up __module_address.
255 * Protected by module_mutex. 255 * Protected by module_mutex.
256 */ 256 */
257 static void __mod_update_bounds(void *base, unsigned int size) 257 static void __mod_update_bounds(void *base, unsigned int size)
258 { 258 {
259 unsigned long min = (unsigned long)base; 259 unsigned long min = (unsigned long)base;
260 unsigned long max = min + size; 260 unsigned long max = min + size;
261 261
262 if (min < module_addr_min) 262 if (min < module_addr_min)
263 module_addr_min = min; 263 module_addr_min = min;
264 if (max > module_addr_max) 264 if (max > module_addr_max)
265 module_addr_max = max; 265 module_addr_max = max;
266 } 266 }
267 267
268 static void mod_update_bounds(struct module *mod) 268 static void mod_update_bounds(struct module *mod)
269 { 269 {
270 __mod_update_bounds(mod->module_core, mod->core_size); 270 __mod_update_bounds(mod->module_core, mod->core_size);
271 if (mod->init_size) 271 if (mod->init_size)
272 __mod_update_bounds(mod->module_init, mod->init_size); 272 __mod_update_bounds(mod->module_init, mod->init_size);
273 } 273 }
274 274
275 #ifdef CONFIG_KGDB_KDB 275 #ifdef CONFIG_KGDB_KDB
276 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ 276 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
277 #endif /* CONFIG_KGDB_KDB */ 277 #endif /* CONFIG_KGDB_KDB */
278 278
279 static void module_assert_mutex(void) 279 static void module_assert_mutex(void)
280 { 280 {
281 lockdep_assert_held(&module_mutex); 281 lockdep_assert_held(&module_mutex);
282 } 282 }
283 283
284 static void module_assert_mutex_or_preempt(void) 284 static void module_assert_mutex_or_preempt(void)
285 { 285 {
286 #ifdef CONFIG_LOCKDEP 286 #ifdef CONFIG_LOCKDEP
287 if (unlikely(!debug_locks)) 287 if (unlikely(!debug_locks))
288 return; 288 return;
289 289
290 WARN_ON(!rcu_read_lock_sched_held() && 290 WARN_ON(!rcu_read_lock_sched_held() &&
291 !lockdep_is_held(&module_mutex)); 291 !lockdep_is_held(&module_mutex));
292 #endif 292 #endif
293 } 293 }
294 294
295 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); 295 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
296 #ifndef CONFIG_MODULE_SIG_FORCE 296 #ifndef CONFIG_MODULE_SIG_FORCE
297 module_param(sig_enforce, bool_enable_only, 0644); 297 module_param(sig_enforce, bool_enable_only, 0644);
298 #endif /* !CONFIG_MODULE_SIG_FORCE */ 298 #endif /* !CONFIG_MODULE_SIG_FORCE */
299 299
300 /* Block module loading/unloading? */ 300 /* Block module loading/unloading? */
301 int modules_disabled = 0; 301 int modules_disabled = 0;
302 core_param(nomodule, modules_disabled, bint, 0); 302 core_param(nomodule, modules_disabled, bint, 0);
303 303
304 /* Waiting for a module to finish initializing? */ 304 /* Waiting for a module to finish initializing? */
305 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 305 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
306 306
307 static BLOCKING_NOTIFIER_HEAD(module_notify_list); 307 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
308 308
309 int register_module_notifier(struct notifier_block *nb) 309 int register_module_notifier(struct notifier_block *nb)
310 { 310 {
311 return blocking_notifier_chain_register(&module_notify_list, nb); 311 return blocking_notifier_chain_register(&module_notify_list, nb);
312 } 312 }
313 EXPORT_SYMBOL(register_module_notifier); 313 EXPORT_SYMBOL(register_module_notifier);
314 314
315 int unregister_module_notifier(struct notifier_block *nb) 315 int unregister_module_notifier(struct notifier_block *nb)
316 { 316 {
317 return blocking_notifier_chain_unregister(&module_notify_list, nb); 317 return blocking_notifier_chain_unregister(&module_notify_list, nb);
318 } 318 }
319 EXPORT_SYMBOL(unregister_module_notifier); 319 EXPORT_SYMBOL(unregister_module_notifier);
320 320
321 struct load_info { 321 struct load_info {
322 Elf_Ehdr *hdr; 322 Elf_Ehdr *hdr;
323 unsigned long len; 323 unsigned long len;
324 Elf_Shdr *sechdrs; 324 Elf_Shdr *sechdrs;
325 char *secstrings, *strtab; 325 char *secstrings, *strtab;
326 unsigned long symoffs, stroffs; 326 unsigned long symoffs, stroffs;
327 struct _ddebug *debug; 327 struct _ddebug *debug;
328 unsigned int num_debug; 328 unsigned int num_debug;
329 bool sig_ok; 329 bool sig_ok;
330 struct { 330 struct {
331 unsigned int sym, str, mod, vers, info, pcpu; 331 unsigned int sym, str, mod, vers, info, pcpu;
332 } index; 332 } index;
333 }; 333 };
334 334
335 /* We require a truly strong try_module_get(): 0 means failure due to 335 /* We require a truly strong try_module_get(): 0 means failure due to
336 ongoing or failed initialization etc. */ 336 ongoing or failed initialization etc. */
337 static inline int strong_try_module_get(struct module *mod) 337 static inline int strong_try_module_get(struct module *mod)
338 { 338 {
339 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 339 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
340 if (mod && mod->state == MODULE_STATE_COMING) 340 if (mod && mod->state == MODULE_STATE_COMING)
341 return -EBUSY; 341 return -EBUSY;
342 if (try_module_get(mod)) 342 if (try_module_get(mod))
343 return 0; 343 return 0;
344 else 344 else
345 return -ENOENT; 345 return -ENOENT;
346 } 346 }
347 347
348 static inline void add_taint_module(struct module *mod, unsigned flag, 348 static inline void add_taint_module(struct module *mod, unsigned flag,
349 enum lockdep_ok lockdep_ok) 349 enum lockdep_ok lockdep_ok)
350 { 350 {
351 add_taint(flag, lockdep_ok); 351 add_taint(flag, lockdep_ok);
352 mod->taints |= (1U << flag); 352 mod->taints |= (1U << flag);
353 } 353 }
354 354
355 /* 355 /*
356 * A thread that wants to hold a reference to a module only while it 356 * A thread that wants to hold a reference to a module only while it
357 * is running can call this to safely exit. nfsd and lockd use this. 357 * is running can call this to safely exit. nfsd and lockd use this.
358 */ 358 */
359 void __module_put_and_exit(struct module *mod, long code) 359 void __module_put_and_exit(struct module *mod, long code)
360 { 360 {
361 module_put(mod); 361 module_put(mod);
362 do_exit(code); 362 do_exit(code);
363 } 363 }
364 EXPORT_SYMBOL(__module_put_and_exit); 364 EXPORT_SYMBOL(__module_put_and_exit);
365 365
366 /* Find a module section: 0 means not found. */ 366 /* Find a module section: 0 means not found. */
367 static unsigned int find_sec(const struct load_info *info, const char *name) 367 static unsigned int find_sec(const struct load_info *info, const char *name)
368 { 368 {
369 unsigned int i; 369 unsigned int i;
370 370
371 for (i = 1; i < info->hdr->e_shnum; i++) { 371 for (i = 1; i < info->hdr->e_shnum; i++) {
372 Elf_Shdr *shdr = &info->sechdrs[i]; 372 Elf_Shdr *shdr = &info->sechdrs[i];
373 /* Alloc bit cleared means "ignore it." */ 373 /* Alloc bit cleared means "ignore it." */
374 if ((shdr->sh_flags & SHF_ALLOC) 374 if ((shdr->sh_flags & SHF_ALLOC)
375 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 375 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
376 return i; 376 return i;
377 } 377 }
378 return 0; 378 return 0;
379 } 379 }
380 380
381 /* Find a module section, or NULL. */ 381 /* Find a module section, or NULL. */
382 static void *section_addr(const struct load_info *info, const char *name) 382 static void *section_addr(const struct load_info *info, const char *name)
383 { 383 {
384 /* Section 0 has sh_addr 0. */ 384 /* Section 0 has sh_addr 0. */
385 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 385 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
386 } 386 }
387 387
388 /* Find a module section, or NULL. Fill in number of "objects" in section. */ 388 /* Find a module section, or NULL. Fill in number of "objects" in section. */
389 static void *section_objs(const struct load_info *info, 389 static void *section_objs(const struct load_info *info,
390 const char *name, 390 const char *name,
391 size_t object_size, 391 size_t object_size,
392 unsigned int *num) 392 unsigned int *num)
393 { 393 {
394 unsigned int sec = find_sec(info, name); 394 unsigned int sec = find_sec(info, name);
395 395
396 /* Section 0 has sh_addr 0 and sh_size 0. */ 396 /* Section 0 has sh_addr 0 and sh_size 0. */
397 *num = info->sechdrs[sec].sh_size / object_size; 397 *num = info->sechdrs[sec].sh_size / object_size;
398 return (void *)info->sechdrs[sec].sh_addr; 398 return (void *)info->sechdrs[sec].sh_addr;
399 } 399 }
400 400
401 /* Provided by the linker */ 401 /* Provided by the linker */
402 extern const struct kernel_symbol __start___ksymtab[]; 402 extern const struct kernel_symbol __start___ksymtab[];
403 extern const struct kernel_symbol __stop___ksymtab[]; 403 extern const struct kernel_symbol __stop___ksymtab[];
404 extern const struct kernel_symbol __start___ksymtab_gpl[]; 404 extern const struct kernel_symbol __start___ksymtab_gpl[];
405 extern const struct kernel_symbol __stop___ksymtab_gpl[]; 405 extern const struct kernel_symbol __stop___ksymtab_gpl[];
406 extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 406 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
407 extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 407 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
408 extern const unsigned long __start___kcrctab[]; 408 extern const unsigned long __start___kcrctab[];
409 extern const unsigned long __start___kcrctab_gpl[]; 409 extern const unsigned long __start___kcrctab_gpl[];
410 extern const unsigned long __start___kcrctab_gpl_future[]; 410 extern const unsigned long __start___kcrctab_gpl_future[];
411 #ifdef CONFIG_UNUSED_SYMBOLS 411 #ifdef CONFIG_UNUSED_SYMBOLS
412 extern const struct kernel_symbol __start___ksymtab_unused[]; 412 extern const struct kernel_symbol __start___ksymtab_unused[];
413 extern const struct kernel_symbol __stop___ksymtab_unused[]; 413 extern const struct kernel_symbol __stop___ksymtab_unused[];
414 extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; 414 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
415 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; 415 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
416 extern const unsigned long __start___kcrctab_unused[]; 416 extern const unsigned long __start___kcrctab_unused[];
417 extern const unsigned long __start___kcrctab_unused_gpl[]; 417 extern const unsigned long __start___kcrctab_unused_gpl[];
418 #endif 418 #endif
419 419
420 #ifndef CONFIG_MODVERSIONS 420 #ifndef CONFIG_MODVERSIONS
421 #define symversion(base, idx) NULL 421 #define symversion(base, idx) NULL
422 #else 422 #else
423 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 423 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
424 #endif 424 #endif
425 425
426 static bool each_symbol_in_section(const struct symsearch *arr, 426 static bool each_symbol_in_section(const struct symsearch *arr,
427 unsigned int arrsize, 427 unsigned int arrsize,
428 struct module *owner, 428 struct module *owner,
429 bool (*fn)(const struct symsearch *syms, 429 bool (*fn)(const struct symsearch *syms,
430 struct module *owner, 430 struct module *owner,
431 void *data), 431 void *data),
432 void *data) 432 void *data)
433 { 433 {
434 unsigned int j; 434 unsigned int j;
435 435
436 for (j = 0; j < arrsize; j++) { 436 for (j = 0; j < arrsize; j++) {
437 if (fn(&arr[j], owner, data)) 437 if (fn(&arr[j], owner, data))
438 return true; 438 return true;
439 } 439 }
440 440
441 return false; 441 return false;
442 } 442 }
443 443
444 /* Returns true as soon as fn returns true, otherwise false. */ 444 /* Returns true as soon as fn returns true, otherwise false. */
445 bool each_symbol_section(bool (*fn)(const struct symsearch *arr, 445 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
446 struct module *owner, 446 struct module *owner,
447 void *data), 447 void *data),
448 void *data) 448 void *data)
449 { 449 {
450 struct module *mod; 450 struct module *mod;
451 static const struct symsearch arr[] = { 451 static const struct symsearch arr[] = {
452 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 452 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
453 NOT_GPL_ONLY, false }, 453 NOT_GPL_ONLY, false },
454 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 454 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
455 __start___kcrctab_gpl, 455 __start___kcrctab_gpl,
456 GPL_ONLY, false }, 456 GPL_ONLY, false },
457 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, 457 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
458 __start___kcrctab_gpl_future, 458 __start___kcrctab_gpl_future,
459 WILL_BE_GPL_ONLY, false }, 459 WILL_BE_GPL_ONLY, false },
460 #ifdef CONFIG_UNUSED_SYMBOLS 460 #ifdef CONFIG_UNUSED_SYMBOLS
461 { __start___ksymtab_unused, __stop___ksymtab_unused, 461 { __start___ksymtab_unused, __stop___ksymtab_unused,
462 __start___kcrctab_unused, 462 __start___kcrctab_unused,
463 NOT_GPL_ONLY, true }, 463 NOT_GPL_ONLY, true },
464 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, 464 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
465 __start___kcrctab_unused_gpl, 465 __start___kcrctab_unused_gpl,
466 GPL_ONLY, true }, 466 GPL_ONLY, true },
467 #endif 467 #endif
468 }; 468 };
469 469
470 module_assert_mutex_or_preempt(); 470 module_assert_mutex_or_preempt();
471 471
472 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) 472 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
473 return true; 473 return true;
474 474
475 list_for_each_entry_rcu(mod, &modules, list) { 475 list_for_each_entry_rcu(mod, &modules, list) {
476 struct symsearch arr[] = { 476 struct symsearch arr[] = {
477 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 477 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
478 NOT_GPL_ONLY, false }, 478 NOT_GPL_ONLY, false },
479 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 479 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
480 mod->gpl_crcs, 480 mod->gpl_crcs,
481 GPL_ONLY, false }, 481 GPL_ONLY, false },
482 { mod->gpl_future_syms, 482 { mod->gpl_future_syms,
483 mod->gpl_future_syms + mod->num_gpl_future_syms, 483 mod->gpl_future_syms + mod->num_gpl_future_syms,
484 mod->gpl_future_crcs, 484 mod->gpl_future_crcs,
485 WILL_BE_GPL_ONLY, false }, 485 WILL_BE_GPL_ONLY, false },
486 #ifdef CONFIG_UNUSED_SYMBOLS 486 #ifdef CONFIG_UNUSED_SYMBOLS
487 { mod->unused_syms, 487 { mod->unused_syms,
488 mod->unused_syms + mod->num_unused_syms, 488 mod->unused_syms + mod->num_unused_syms,
489 mod->unused_crcs, 489 mod->unused_crcs,
490 NOT_GPL_ONLY, true }, 490 NOT_GPL_ONLY, true },
491 { mod->unused_gpl_syms, 491 { mod->unused_gpl_syms,
492 mod->unused_gpl_syms + mod->num_unused_gpl_syms, 492 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
493 mod->unused_gpl_crcs, 493 mod->unused_gpl_crcs,
494 GPL_ONLY, true }, 494 GPL_ONLY, true },
495 #endif 495 #endif
496 }; 496 };
497 497
498 if (mod->state == MODULE_STATE_UNFORMED) 498 if (mod->state == MODULE_STATE_UNFORMED)
499 continue; 499 continue;
500 500
501 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) 501 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
502 return true; 502 return true;
503 } 503 }
504 return false; 504 return false;
505 } 505 }
506 EXPORT_SYMBOL_GPL(each_symbol_section); 506 EXPORT_SYMBOL_GPL(each_symbol_section);
507 507
508 struct find_symbol_arg { 508 struct find_symbol_arg {
509 /* Input */ 509 /* Input */
510 const char *name; 510 const char *name;
511 bool gplok; 511 bool gplok;
512 bool warn; 512 bool warn;
513 513
514 /* Output */ 514 /* Output */
515 struct module *owner; 515 struct module *owner;
516 const unsigned long *crc; 516 const unsigned long *crc;
517 const struct kernel_symbol *sym; 517 const struct kernel_symbol *sym;
518 }; 518 };
519 519
520 static bool check_symbol(const struct symsearch *syms, 520 static bool check_symbol(const struct symsearch *syms,
521 struct module *owner, 521 struct module *owner,
522 unsigned int symnum, void *data) 522 unsigned int symnum, void *data)
523 { 523 {
524 struct find_symbol_arg *fsa = data; 524 struct find_symbol_arg *fsa = data;
525 525
526 if (!fsa->gplok) { 526 if (!fsa->gplok) {
527 if (syms->licence == GPL_ONLY) 527 if (syms->licence == GPL_ONLY)
528 return false; 528 return false;
529 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { 529 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
530 pr_warn("Symbol %s is being used by a non-GPL module, " 530 pr_warn("Symbol %s is being used by a non-GPL module, "
531 "which will not be allowed in the future\n", 531 "which will not be allowed in the future\n",
532 fsa->name); 532 fsa->name);
533 } 533 }
534 } 534 }
535 535
536 #ifdef CONFIG_UNUSED_SYMBOLS 536 #ifdef CONFIG_UNUSED_SYMBOLS
537 if (syms->unused && fsa->warn) { 537 if (syms->unused && fsa->warn) {
538 pr_warn("Symbol %s is marked as UNUSED, however this module is " 538 pr_warn("Symbol %s is marked as UNUSED, however this module is "
539 "using it.\n", fsa->name); 539 "using it.\n", fsa->name);
540 pr_warn("This symbol will go away in the future.\n"); 540 pr_warn("This symbol will go away in the future.\n");
541 pr_warn("Please evaluate if this is the right api to use and " 541 pr_warn("Please evaluate if this is the right api to use and "
542 "if it really is, submit a report to the linux kernel " 542 "if it really is, submit a report to the linux kernel "
543 "mailing list together with submitting your code for " 543 "mailing list together with submitting your code for "
544 "inclusion.\n"); 544 "inclusion.\n");
545 } 545 }
546 #endif 546 #endif
547 547
548 fsa->owner = owner; 548 fsa->owner = owner;
549 fsa->crc = symversion(syms->crcs, symnum); 549 fsa->crc = symversion(syms->crcs, symnum);
550 fsa->sym = &syms->start[symnum]; 550 fsa->sym = &syms->start[symnum];
551 return true; 551 return true;
552 } 552 }
553 553
554 static int cmp_name(const void *va, const void *vb) 554 static int cmp_name(const void *va, const void *vb)
555 { 555 {
556 const char *a; 556 const char *a;
557 const struct kernel_symbol *b; 557 const struct kernel_symbol *b;
558 a = va; b = vb; 558 a = va; b = vb;
559 return strcmp(a, b->name); 559 return strcmp(a, b->name);
560 } 560 }
561 561
562 static bool find_symbol_in_section(const struct symsearch *syms, 562 static bool find_symbol_in_section(const struct symsearch *syms,
563 struct module *owner, 563 struct module *owner,
564 void *data) 564 void *data)
565 { 565 {
566 struct find_symbol_arg *fsa = data; 566 struct find_symbol_arg *fsa = data;
567 struct kernel_symbol *sym; 567 struct kernel_symbol *sym;
568 568
569 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 569 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
570 sizeof(struct kernel_symbol), cmp_name); 570 sizeof(struct kernel_symbol), cmp_name);
571 571
572 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) 572 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
573 return true; 573 return true;
574 574
575 return false; 575 return false;
576 } 576 }
577 577
578 /* Find a symbol and return it, along with, (optional) crc and 578 /* Find a symbol and return it, along with, (optional) crc and
579 * (optional) module which owns it. Needs preempt disabled or module_mutex. */ 579 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
580 const struct kernel_symbol *find_symbol(const char *name, 580 const struct kernel_symbol *find_symbol(const char *name,
581 struct module **owner, 581 struct module **owner,
582 const unsigned long **crc, 582 const unsigned long **crc,
583 bool gplok, 583 bool gplok,
584 bool warn) 584 bool warn)
585 { 585 {
586 struct find_symbol_arg fsa; 586 struct find_symbol_arg fsa;
587 587
588 fsa.name = name; 588 fsa.name = name;
589 fsa.gplok = gplok; 589 fsa.gplok = gplok;
590 fsa.warn = warn; 590 fsa.warn = warn;
591 591
592 if (each_symbol_section(find_symbol_in_section, &fsa)) { 592 if (each_symbol_section(find_symbol_in_section, &fsa)) {
593 if (owner) 593 if (owner)
594 *owner = fsa.owner; 594 *owner = fsa.owner;
595 if (crc) 595 if (crc)
596 *crc = fsa.crc; 596 *crc = fsa.crc;
597 return fsa.sym; 597 return fsa.sym;
598 } 598 }
599 599
600 pr_debug("Failed to find symbol %s\n", name); 600 pr_debug("Failed to find symbol %s\n", name);
601 return NULL; 601 return NULL;
602 } 602 }
603 EXPORT_SYMBOL_GPL(find_symbol); 603 EXPORT_SYMBOL_GPL(find_symbol);
604 604
605 /* 605 /*
606 * Search for module by name: must hold module_mutex (or preempt disabled 606 * Search for module by name: must hold module_mutex (or preempt disabled
607 * for read-only access). 607 * for read-only access).
608 */ 608 */
609 static struct module *find_module_all(const char *name, size_t len, 609 static struct module *find_module_all(const char *name, size_t len,
610 bool even_unformed) 610 bool even_unformed)
611 { 611 {
612 struct module *mod; 612 struct module *mod;
613 613
614 module_assert_mutex_or_preempt(); 614 module_assert_mutex_or_preempt();
615 615
616 list_for_each_entry(mod, &modules, list) { 616 list_for_each_entry(mod, &modules, list) {
617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
618 continue; 618 continue;
619 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 619 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
620 return mod; 620 return mod;
621 } 621 }
622 return NULL; 622 return NULL;
623 } 623 }
624 624
625 struct module *find_module(const char *name) 625 struct module *find_module(const char *name)
626 { 626 {
627 module_assert_mutex(); 627 module_assert_mutex();
628 return find_module_all(name, strlen(name), false); 628 return find_module_all(name, strlen(name), false);
629 } 629 }
630 EXPORT_SYMBOL_GPL(find_module); 630 EXPORT_SYMBOL_GPL(find_module);
631 631
632 #ifdef CONFIG_SMP 632 #ifdef CONFIG_SMP
633 633
634 static inline void __percpu *mod_percpu(struct module *mod) 634 static inline void __percpu *mod_percpu(struct module *mod)
635 { 635 {
636 return mod->percpu; 636 return mod->percpu;
637 } 637 }
638 638
639 static int percpu_modalloc(struct module *mod, struct load_info *info) 639 static int percpu_modalloc(struct module *mod, struct load_info *info)
640 { 640 {
641 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 641 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
642 unsigned long align = pcpusec->sh_addralign; 642 unsigned long align = pcpusec->sh_addralign;
643 643
644 if (!pcpusec->sh_size) 644 if (!pcpusec->sh_size)
645 return 0; 645 return 0;
646 646
647 if (align > PAGE_SIZE) { 647 if (align > PAGE_SIZE) {
648 pr_warn("%s: per-cpu alignment %li > %li\n", 648 pr_warn("%s: per-cpu alignment %li > %li\n",
649 mod->name, align, PAGE_SIZE); 649 mod->name, align, PAGE_SIZE);
650 align = PAGE_SIZE; 650 align = PAGE_SIZE;
651 } 651 }
652 652
653 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 653 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
654 if (!mod->percpu) { 654 if (!mod->percpu) {
655 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 655 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
656 mod->name, (unsigned long)pcpusec->sh_size); 656 mod->name, (unsigned long)pcpusec->sh_size);
657 return -ENOMEM; 657 return -ENOMEM;
658 } 658 }
659 mod->percpu_size = pcpusec->sh_size; 659 mod->percpu_size = pcpusec->sh_size;
660 return 0; 660 return 0;
661 } 661 }
662 662
663 static void percpu_modfree(struct module *mod) 663 static void percpu_modfree(struct module *mod)
664 { 664 {
665 free_percpu(mod->percpu); 665 free_percpu(mod->percpu);
666 } 666 }
667 667
668 static unsigned int find_pcpusec(struct load_info *info) 668 static unsigned int find_pcpusec(struct load_info *info)
669 { 669 {
670 return find_sec(info, ".data..percpu"); 670 return find_sec(info, ".data..percpu");
671 } 671 }
672 672
673 static void percpu_modcopy(struct module *mod, 673 static void percpu_modcopy(struct module *mod,
674 const void *from, unsigned long size) 674 const void *from, unsigned long size)
675 { 675 {
676 int cpu; 676 int cpu;
677 677
678 for_each_possible_cpu(cpu) 678 for_each_possible_cpu(cpu)
679 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 679 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
680 } 680 }
681 681
682 /** 682 /**
683 * is_module_percpu_address - test whether address is from module static percpu 683 * is_module_percpu_address - test whether address is from module static percpu
684 * @addr: address to test 684 * @addr: address to test
685 * 685 *
686 * Test whether @addr belongs to module static percpu area. 686 * Test whether @addr belongs to module static percpu area.
687 * 687 *
688 * RETURNS: 688 * RETURNS:
689 * %true if @addr is from module static percpu area 689 * %true if @addr is from module static percpu area
690 */ 690 */
691 bool is_module_percpu_address(unsigned long addr) 691 bool is_module_percpu_address(unsigned long addr)
692 { 692 {
693 struct module *mod; 693 struct module *mod;
694 unsigned int cpu; 694 unsigned int cpu;
695 695
696 preempt_disable(); 696 preempt_disable();
697 697
698 list_for_each_entry_rcu(mod, &modules, list) { 698 list_for_each_entry_rcu(mod, &modules, list) {
699 if (mod->state == MODULE_STATE_UNFORMED) 699 if (mod->state == MODULE_STATE_UNFORMED)
700 continue; 700 continue;
701 if (!mod->percpu_size) 701 if (!mod->percpu_size)
702 continue; 702 continue;
703 for_each_possible_cpu(cpu) { 703 for_each_possible_cpu(cpu) {
704 void *start = per_cpu_ptr(mod->percpu, cpu); 704 void *start = per_cpu_ptr(mod->percpu, cpu);
705 705
706 if ((void *)addr >= start && 706 if ((void *)addr >= start &&
707 (void *)addr < start + mod->percpu_size) { 707 (void *)addr < start + mod->percpu_size) {
708 preempt_enable(); 708 preempt_enable();
709 return true; 709 return true;
710 } 710 }
711 } 711 }
712 } 712 }
713 713
714 preempt_enable(); 714 preempt_enable();
715 return false; 715 return false;
716 } 716 }
717 717
718 #else /* ... !CONFIG_SMP */ 718 #else /* ... !CONFIG_SMP */
719 719
720 static inline void __percpu *mod_percpu(struct module *mod) 720 static inline void __percpu *mod_percpu(struct module *mod)
721 { 721 {
722 return NULL; 722 return NULL;
723 } 723 }
724 static int percpu_modalloc(struct module *mod, struct load_info *info) 724 static int percpu_modalloc(struct module *mod, struct load_info *info)
725 { 725 {
726 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 726 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
727 if (info->sechdrs[info->index.pcpu].sh_size != 0) 727 if (info->sechdrs[info->index.pcpu].sh_size != 0)
728 return -ENOMEM; 728 return -ENOMEM;
729 return 0; 729 return 0;
730 } 730 }
731 static inline void percpu_modfree(struct module *mod) 731 static inline void percpu_modfree(struct module *mod)
732 { 732 {
733 } 733 }
734 static unsigned int find_pcpusec(struct load_info *info) 734 static unsigned int find_pcpusec(struct load_info *info)
735 { 735 {
736 return 0; 736 return 0;
737 } 737 }
738 static inline void percpu_modcopy(struct module *mod, 738 static inline void percpu_modcopy(struct module *mod,
739 const void *from, unsigned long size) 739 const void *from, unsigned long size)
740 { 740 {
741 /* pcpusec should be 0, and size of that section should be 0. */ 741 /* pcpusec should be 0, and size of that section should be 0. */
742 BUG_ON(size != 0); 742 BUG_ON(size != 0);
743 } 743 }
744 bool is_module_percpu_address(unsigned long addr) 744 bool is_module_percpu_address(unsigned long addr)
745 { 745 {
746 return false; 746 return false;
747 } 747 }
748 748
749 #endif /* CONFIG_SMP */ 749 #endif /* CONFIG_SMP */
750 750
751 #define MODINFO_ATTR(field) \ 751 #define MODINFO_ATTR(field) \
752 static void setup_modinfo_##field(struct module *mod, const char *s) \ 752 static void setup_modinfo_##field(struct module *mod, const char *s) \
753 { \ 753 { \
754 mod->field = kstrdup(s, GFP_KERNEL); \ 754 mod->field = kstrdup(s, GFP_KERNEL); \
755 } \ 755 } \
756 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ 756 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
757 struct module_kobject *mk, char *buffer) \ 757 struct module_kobject *mk, char *buffer) \
758 { \ 758 { \
759 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 759 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
760 } \ 760 } \
761 static int modinfo_##field##_exists(struct module *mod) \ 761 static int modinfo_##field##_exists(struct module *mod) \
762 { \ 762 { \
763 return mod->field != NULL; \ 763 return mod->field != NULL; \
764 } \ 764 } \
765 static void free_modinfo_##field(struct module *mod) \ 765 static void free_modinfo_##field(struct module *mod) \
766 { \ 766 { \
767 kfree(mod->field); \ 767 kfree(mod->field); \
768 mod->field = NULL; \ 768 mod->field = NULL; \
769 } \ 769 } \
770 static struct module_attribute modinfo_##field = { \ 770 static struct module_attribute modinfo_##field = { \
771 .attr = { .name = __stringify(field), .mode = 0444 }, \ 771 .attr = { .name = __stringify(field), .mode = 0444 }, \
772 .show = show_modinfo_##field, \ 772 .show = show_modinfo_##field, \
773 .setup = setup_modinfo_##field, \ 773 .setup = setup_modinfo_##field, \
774 .test = modinfo_##field##_exists, \ 774 .test = modinfo_##field##_exists, \
775 .free = free_modinfo_##field, \ 775 .free = free_modinfo_##field, \
776 }; 776 };
777 777
778 MODINFO_ATTR(version); 778 MODINFO_ATTR(version);
779 MODINFO_ATTR(srcversion); 779 MODINFO_ATTR(srcversion);
780 780
781 static char last_unloaded_module[MODULE_NAME_LEN+1]; 781 static char last_unloaded_module[MODULE_NAME_LEN+1];
782 782
783 #ifdef CONFIG_MODULE_UNLOAD 783 #ifdef CONFIG_MODULE_UNLOAD
784 784
785 EXPORT_TRACEPOINT_SYMBOL(module_get); 785 EXPORT_TRACEPOINT_SYMBOL(module_get);
786 786
787 /* MODULE_REF_BASE is the base reference count by kmodule loader. */ 787 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
788 #define MODULE_REF_BASE 1 788 #define MODULE_REF_BASE 1
789 789
790 /* Init the unload section of the module. */ 790 /* Init the unload section of the module. */
791 static int module_unload_init(struct module *mod) 791 static int module_unload_init(struct module *mod)
792 { 792 {
793 /* 793 /*
794 * Initialize reference counter to MODULE_REF_BASE. 794 * Initialize reference counter to MODULE_REF_BASE.
795 * refcnt == 0 means module is going. 795 * refcnt == 0 means module is going.
796 */ 796 */
797 atomic_set(&mod->refcnt, MODULE_REF_BASE); 797 atomic_set(&mod->refcnt, MODULE_REF_BASE);
798 798
799 INIT_LIST_HEAD(&mod->source_list); 799 INIT_LIST_HEAD(&mod->source_list);
800 INIT_LIST_HEAD(&mod->target_list); 800 INIT_LIST_HEAD(&mod->target_list);
801 801
802 /* Hold reference count during initialization. */ 802 /* Hold reference count during initialization. */
803 atomic_inc(&mod->refcnt); 803 atomic_inc(&mod->refcnt);
804 804
805 return 0; 805 return 0;
806 } 806 }
807 807
808 /* Does a already use b? */ 808 /* Does a already use b? */
809 static int already_uses(struct module *a, struct module *b) 809 static int already_uses(struct module *a, struct module *b)
810 { 810 {
811 struct module_use *use; 811 struct module_use *use;
812 812
813 list_for_each_entry(use, &b->source_list, source_list) { 813 list_for_each_entry(use, &b->source_list, source_list) {
814 if (use->source == a) { 814 if (use->source == a) {
815 pr_debug("%s uses %s!\n", a->name, b->name); 815 pr_debug("%s uses %s!\n", a->name, b->name);
816 return 1; 816 return 1;
817 } 817 }
818 } 818 }
819 pr_debug("%s does not use %s!\n", a->name, b->name); 819 pr_debug("%s does not use %s!\n", a->name, b->name);
820 return 0; 820 return 0;
821 } 821 }
822 822
823 /* 823 /*
824 * Module a uses b 824 * Module a uses b
825 * - we add 'a' as a "source", 'b' as a "target" of module use 825 * - we add 'a' as a "source", 'b' as a "target" of module use
826 * - the module_use is added to the list of 'b' sources (so 826 * - the module_use is added to the list of 'b' sources (so
827 * 'b' can walk the list to see who sourced them), and of 'a' 827 * 'b' can walk the list to see who sourced them), and of 'a'
828 * targets (so 'a' can see what modules it targets). 828 * targets (so 'a' can see what modules it targets).
829 */ 829 */
830 static int add_module_usage(struct module *a, struct module *b) 830 static int add_module_usage(struct module *a, struct module *b)
831 { 831 {
832 struct module_use *use; 832 struct module_use *use;
833 833
834 pr_debug("Allocating new usage for %s.\n", a->name); 834 pr_debug("Allocating new usage for %s.\n", a->name);
835 use = kmalloc(sizeof(*use), GFP_ATOMIC); 835 use = kmalloc(sizeof(*use), GFP_ATOMIC);
836 if (!use) { 836 if (!use) {
837 pr_warn("%s: out of memory loading\n", a->name); 837 pr_warn("%s: out of memory loading\n", a->name);
838 return -ENOMEM; 838 return -ENOMEM;
839 } 839 }
840 840
841 use->source = a; 841 use->source = a;
842 use->target = b; 842 use->target = b;
843 list_add(&use->source_list, &b->source_list); 843 list_add(&use->source_list, &b->source_list);
844 list_add(&use->target_list, &a->target_list); 844 list_add(&use->target_list, &a->target_list);
845 return 0; 845 return 0;
846 } 846 }
847 847
848 /* Module a uses b: caller needs module_mutex() */ 848 /* Module a uses b: caller needs module_mutex() */
849 int ref_module(struct module *a, struct module *b) 849 int ref_module(struct module *a, struct module *b)
850 { 850 {
851 int err; 851 int err;
852 852
853 if (b == NULL || already_uses(a, b)) 853 if (b == NULL || already_uses(a, b))
854 return 0; 854 return 0;
855 855
856 /* If module isn't available, we fail. */ 856 /* If module isn't available, we fail. */
857 err = strong_try_module_get(b); 857 err = strong_try_module_get(b);
858 if (err) 858 if (err)
859 return err; 859 return err;
860 860
861 err = add_module_usage(a, b); 861 err = add_module_usage(a, b);
862 if (err) { 862 if (err) {
863 module_put(b); 863 module_put(b);
864 return err; 864 return err;
865 } 865 }
866 return 0; 866 return 0;
867 } 867 }
868 EXPORT_SYMBOL_GPL(ref_module); 868 EXPORT_SYMBOL_GPL(ref_module);
869 869
870 /* Clear the unload stuff of the module. */ 870 /* Clear the unload stuff of the module. */
871 static void module_unload_free(struct module *mod) 871 static void module_unload_free(struct module *mod)
872 { 872 {
873 struct module_use *use, *tmp; 873 struct module_use *use, *tmp;
874 874
875 mutex_lock(&module_mutex); 875 mutex_lock(&module_mutex);
876 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 876 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
877 struct module *i = use->target; 877 struct module *i = use->target;
878 pr_debug("%s unusing %s\n", mod->name, i->name); 878 pr_debug("%s unusing %s\n", mod->name, i->name);
879 module_put(i); 879 module_put(i);
880 list_del(&use->source_list); 880 list_del(&use->source_list);
881 list_del(&use->target_list); 881 list_del(&use->target_list);
882 kfree(use); 882 kfree(use);
883 } 883 }
884 mutex_unlock(&module_mutex); 884 mutex_unlock(&module_mutex);
885 } 885 }
886 886
887 #ifdef CONFIG_MODULE_FORCE_UNLOAD 887 #ifdef CONFIG_MODULE_FORCE_UNLOAD
888 static inline int try_force_unload(unsigned int flags) 888 static inline int try_force_unload(unsigned int flags)
889 { 889 {
890 int ret = (flags & O_TRUNC); 890 int ret = (flags & O_TRUNC);
891 if (ret) 891 if (ret)
892 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 892 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
893 return ret; 893 return ret;
894 } 894 }
895 #else 895 #else
896 static inline int try_force_unload(unsigned int flags) 896 static inline int try_force_unload(unsigned int flags)
897 { 897 {
898 return 0; 898 return 0;
899 } 899 }
900 #endif /* CONFIG_MODULE_FORCE_UNLOAD */ 900 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
901 901
902 /* Try to release refcount of module, 0 means success. */ 902 /* Try to release refcount of module, 0 means success. */
903 static int try_release_module_ref(struct module *mod) 903 static int try_release_module_ref(struct module *mod)
904 { 904 {
905 int ret; 905 int ret;
906 906
907 /* Try to decrement refcnt which we set at loading */ 907 /* Try to decrement refcnt which we set at loading */
908 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 908 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
909 BUG_ON(ret < 0); 909 BUG_ON(ret < 0);
910 if (ret) 910 if (ret)
911 /* Someone can put this right now, recover with checking */ 911 /* Someone can put this right now, recover with checking */
912 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 912 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
913 913
914 return ret; 914 return ret;
915 } 915 }
916 916
917 static int try_stop_module(struct module *mod, int flags, int *forced) 917 static int try_stop_module(struct module *mod, int flags, int *forced)
918 { 918 {
919 /* If it's not unused, quit unless we're forcing. */ 919 /* If it's not unused, quit unless we're forcing. */
920 if (try_release_module_ref(mod) != 0) { 920 if (try_release_module_ref(mod) != 0) {
921 *forced = try_force_unload(flags); 921 *forced = try_force_unload(flags);
922 if (!(*forced)) 922 if (!(*forced))
923 return -EWOULDBLOCK; 923 return -EWOULDBLOCK;
924 } 924 }
925 925
926 /* Mark it as dying. */ 926 /* Mark it as dying. */
927 mod->state = MODULE_STATE_GOING; 927 mod->state = MODULE_STATE_GOING;
928 928
929 return 0; 929 return 0;
930 } 930 }
931 931
932 /** 932 /**
933 * module_refcount - return the refcount or -1 if unloading 933 * module_refcount - return the refcount or -1 if unloading
934 * 934 *
935 * @mod: the module we're checking 935 * @mod: the module we're checking
936 * 936 *
937 * Returns: 937 * Returns:
938 * -1 if the module is in the process of unloading 938 * -1 if the module is in the process of unloading
939 * otherwise the number of references in the kernel to the module 939 * otherwise the number of references in the kernel to the module
940 */ 940 */
941 int module_refcount(struct module *mod) 941 int module_refcount(struct module *mod)
942 { 942 {
943 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 943 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
944 } 944 }
945 EXPORT_SYMBOL(module_refcount); 945 EXPORT_SYMBOL(module_refcount);
946 946
947 /* This exists whether we can unload or not */ 947 /* This exists whether we can unload or not */
948 static void free_module(struct module *mod); 948 static void free_module(struct module *mod);
949 949
950 SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 950 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
951 unsigned int, flags) 951 unsigned int, flags)
952 { 952 {
953 struct module *mod; 953 struct module *mod;
954 char name[MODULE_NAME_LEN]; 954 char name[MODULE_NAME_LEN];
955 int ret, forced = 0; 955 int ret, forced = 0;
956 956
957 if (!capable(CAP_SYS_MODULE) || modules_disabled) 957 if (!capable(CAP_SYS_MODULE) || modules_disabled)
958 return -EPERM; 958 return -EPERM;
959 959
960 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 960 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
961 return -EFAULT; 961 return -EFAULT;
962 name[MODULE_NAME_LEN-1] = '\0'; 962 name[MODULE_NAME_LEN-1] = '\0';
963 963
964 if (mutex_lock_interruptible(&module_mutex) != 0) 964 if (mutex_lock_interruptible(&module_mutex) != 0)
965 return -EINTR; 965 return -EINTR;
966 966
967 mod = find_module(name); 967 mod = find_module(name);
968 if (!mod) { 968 if (!mod) {
969 ret = -ENOENT; 969 ret = -ENOENT;
970 goto out; 970 goto out;
971 } 971 }
972 972
973 if (!list_empty(&mod->source_list)) { 973 if (!list_empty(&mod->source_list)) {
974 /* Other modules depend on us: get rid of them first. */ 974 /* Other modules depend on us: get rid of them first. */
975 ret = -EWOULDBLOCK; 975 ret = -EWOULDBLOCK;
976 goto out; 976 goto out;
977 } 977 }
978 978
979 /* Doing init or already dying? */ 979 /* Doing init or already dying? */
980 if (mod->state != MODULE_STATE_LIVE) { 980 if (mod->state != MODULE_STATE_LIVE) {
981 /* FIXME: if (force), slam module count damn the torpedoes */ 981 /* FIXME: if (force), slam module count damn the torpedoes */
982 pr_debug("%s already dying\n", mod->name); 982 pr_debug("%s already dying\n", mod->name);
983 ret = -EBUSY; 983 ret = -EBUSY;
984 goto out; 984 goto out;
985 } 985 }
986 986
987 /* If it has an init func, it must have an exit func to unload */ 987 /* If it has an init func, it must have an exit func to unload */
988 if (mod->init && !mod->exit) { 988 if (mod->init && !mod->exit) {
989 forced = try_force_unload(flags); 989 forced = try_force_unload(flags);
990 if (!forced) { 990 if (!forced) {
991 /* This module can't be removed */ 991 /* This module can't be removed */
992 ret = -EBUSY; 992 ret = -EBUSY;
993 goto out; 993 goto out;
994 } 994 }
995 } 995 }
996 996
997 /* Stop the machine so refcounts can't move and disable module. */ 997 /* Stop the machine so refcounts can't move and disable module. */
998 ret = try_stop_module(mod, flags, &forced); 998 ret = try_stop_module(mod, flags, &forced);
999 if (ret != 0) 999 if (ret != 0)
1000 goto out; 1000 goto out;
1001 1001
1002 mutex_unlock(&module_mutex); 1002 mutex_unlock(&module_mutex);
1003 /* Final destruction now no one is using it. */ 1003 /* Final destruction now no one is using it. */
1004 if (mod->exit != NULL) 1004 if (mod->exit != NULL)
1005 mod->exit(); 1005 mod->exit();
1006 blocking_notifier_call_chain(&module_notify_list, 1006 blocking_notifier_call_chain(&module_notify_list,
1007 MODULE_STATE_GOING, mod); 1007 MODULE_STATE_GOING, mod);
1008 async_synchronize_full(); 1008 async_synchronize_full();
1009 1009
1010 /* Store the name of the last unloaded module for diagnostic purposes */ 1010 /* Store the name of the last unloaded module for diagnostic purposes */
1011 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 1011 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1012 1012
1013 free_module(mod); 1013 free_module(mod);
1014 return 0; 1014 return 0;
1015 out: 1015 out:
1016 mutex_unlock(&module_mutex); 1016 mutex_unlock(&module_mutex);
1017 return ret; 1017 return ret;
1018 } 1018 }
1019 1019
1020 static inline void print_unload_info(struct seq_file *m, struct module *mod) 1020 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1021 { 1021 {
1022 struct module_use *use; 1022 struct module_use *use;
1023 int printed_something = 0; 1023 int printed_something = 0;
1024 1024
1025 seq_printf(m, " %i ", module_refcount(mod)); 1025 seq_printf(m, " %i ", module_refcount(mod));
1026 1026
1027 /* 1027 /*
1028 * Always include a trailing , so userspace can differentiate 1028 * Always include a trailing , so userspace can differentiate
1029 * between this and the old multi-field proc format. 1029 * between this and the old multi-field proc format.
1030 */ 1030 */
1031 list_for_each_entry(use, &mod->source_list, source_list) { 1031 list_for_each_entry(use, &mod->source_list, source_list) {
1032 printed_something = 1; 1032 printed_something = 1;
1033 seq_printf(m, "%s,", use->source->name); 1033 seq_printf(m, "%s,", use->source->name);
1034 } 1034 }
1035 1035
1036 if (mod->init != NULL && mod->exit == NULL) { 1036 if (mod->init != NULL && mod->exit == NULL) {
1037 printed_something = 1; 1037 printed_something = 1;
1038 seq_puts(m, "[permanent],"); 1038 seq_puts(m, "[permanent],");
1039 } 1039 }
1040 1040
1041 if (!printed_something) 1041 if (!printed_something)
1042 seq_puts(m, "-"); 1042 seq_puts(m, "-");
1043 } 1043 }
1044 1044
1045 void __symbol_put(const char *symbol) 1045 void __symbol_put(const char *symbol)
1046 { 1046 {
1047 struct module *owner; 1047 struct module *owner;
1048 1048
1049 preempt_disable(); 1049 preempt_disable();
1050 if (!find_symbol(symbol, &owner, NULL, true, false)) 1050 if (!find_symbol(symbol, &owner, NULL, true, false))
1051 BUG(); 1051 BUG();
1052 module_put(owner); 1052 module_put(owner);
1053 preempt_enable(); 1053 preempt_enable();
1054 } 1054 }
1055 EXPORT_SYMBOL(__symbol_put); 1055 EXPORT_SYMBOL(__symbol_put);
1056 1056
1057 /* Note this assumes addr is a function, which it currently always is. */ 1057 /* Note this assumes addr is a function, which it currently always is. */
1058 void symbol_put_addr(void *addr) 1058 void symbol_put_addr(void *addr)
1059 { 1059 {
1060 struct module *modaddr; 1060 struct module *modaddr;
1061 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 1061 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1062 1062
1063 if (core_kernel_text(a)) 1063 if (core_kernel_text(a))
1064 return; 1064 return;
1065 1065
1066 /* 1066 /*
1067 * Even though we hold a reference on the module; we still need to 1067 * Even though we hold a reference on the module; we still need to
1068 * disable preemption in order to safely traverse the data structure. 1068 * disable preemption in order to safely traverse the data structure.
1069 */ 1069 */
1070 preempt_disable(); 1070 preempt_disable();
1071 modaddr = __module_text_address(a); 1071 modaddr = __module_text_address(a);
1072 BUG_ON(!modaddr); 1072 BUG_ON(!modaddr);
1073 module_put(modaddr); 1073 module_put(modaddr);
1074 preempt_enable(); 1074 preempt_enable();
1075 } 1075 }
1076 EXPORT_SYMBOL_GPL(symbol_put_addr); 1076 EXPORT_SYMBOL_GPL(symbol_put_addr);
1077 1077
1078 static ssize_t show_refcnt(struct module_attribute *mattr, 1078 static ssize_t show_refcnt(struct module_attribute *mattr,
1079 struct module_kobject *mk, char *buffer) 1079 struct module_kobject *mk, char *buffer)
1080 { 1080 {
1081 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 1081 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1082 } 1082 }
1083 1083
1084 static struct module_attribute modinfo_refcnt = 1084 static struct module_attribute modinfo_refcnt =
1085 __ATTR(refcnt, 0444, show_refcnt, NULL); 1085 __ATTR(refcnt, 0444, show_refcnt, NULL);
1086 1086
1087 void __module_get(struct module *module) 1087 void __module_get(struct module *module)
1088 { 1088 {
1089 if (module) { 1089 if (module) {
1090 preempt_disable(); 1090 preempt_disable();
1091 atomic_inc(&module->refcnt); 1091 atomic_inc(&module->refcnt);
1092 trace_module_get(module, _RET_IP_); 1092 trace_module_get(module, _RET_IP_);
1093 preempt_enable(); 1093 preempt_enable();
1094 } 1094 }
1095 } 1095 }
1096 EXPORT_SYMBOL(__module_get); 1096 EXPORT_SYMBOL(__module_get);
1097 1097
1098 bool try_module_get(struct module *module) 1098 bool try_module_get(struct module *module)
1099 { 1099 {
1100 bool ret = true; 1100 bool ret = true;
1101 1101
1102 if (module) { 1102 if (module) {
1103 preempt_disable(); 1103 preempt_disable();
1104 /* Note: here, we can fail to get a reference */ 1104 /* Note: here, we can fail to get a reference */
1105 if (likely(module_is_live(module) && 1105 if (likely(module_is_live(module) &&
1106 atomic_inc_not_zero(&module->refcnt) != 0)) 1106 atomic_inc_not_zero(&module->refcnt) != 0))
1107 trace_module_get(module, _RET_IP_); 1107 trace_module_get(module, _RET_IP_);
1108 else 1108 else
1109 ret = false; 1109 ret = false;
1110 1110
1111 preempt_enable(); 1111 preempt_enable();
1112 } 1112 }
1113 return ret; 1113 return ret;
1114 } 1114 }
1115 EXPORT_SYMBOL(try_module_get); 1115 EXPORT_SYMBOL(try_module_get);
1116 1116
1117 void module_put(struct module *module) 1117 void module_put(struct module *module)
1118 { 1118 {
1119 int ret; 1119 int ret;
1120 1120
1121 if (module) { 1121 if (module) {
1122 preempt_disable(); 1122 preempt_disable();
1123 ret = atomic_dec_if_positive(&module->refcnt); 1123 ret = atomic_dec_if_positive(&module->refcnt);
1124 WARN_ON(ret < 0); /* Failed to put refcount */ 1124 WARN_ON(ret < 0); /* Failed to put refcount */
1125 trace_module_put(module, _RET_IP_); 1125 trace_module_put(module, _RET_IP_);
1126 preempt_enable(); 1126 preempt_enable();
1127 } 1127 }
1128 } 1128 }
1129 EXPORT_SYMBOL(module_put); 1129 EXPORT_SYMBOL(module_put);
1130 1130
1131 #else /* !CONFIG_MODULE_UNLOAD */ 1131 #else /* !CONFIG_MODULE_UNLOAD */
1132 static inline void print_unload_info(struct seq_file *m, struct module *mod) 1132 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1133 { 1133 {
1134 /* We don't know the usage count, or what modules are using. */ 1134 /* We don't know the usage count, or what modules are using. */
1135 seq_puts(m, " - -"); 1135 seq_puts(m, " - -");
1136 } 1136 }
1137 1137
1138 static inline void module_unload_free(struct module *mod) 1138 static inline void module_unload_free(struct module *mod)
1139 { 1139 {
1140 } 1140 }
1141 1141
1142 int ref_module(struct module *a, struct module *b) 1142 int ref_module(struct module *a, struct module *b)
1143 { 1143 {
1144 return strong_try_module_get(b); 1144 return strong_try_module_get(b);
1145 } 1145 }
1146 EXPORT_SYMBOL_GPL(ref_module); 1146 EXPORT_SYMBOL_GPL(ref_module);
1147 1147
1148 static inline int module_unload_init(struct module *mod) 1148 static inline int module_unload_init(struct module *mod)
1149 { 1149 {
1150 return 0; 1150 return 0;
1151 } 1151 }
1152 #endif /* CONFIG_MODULE_UNLOAD */ 1152 #endif /* CONFIG_MODULE_UNLOAD */
1153 1153
1154 static size_t module_flags_taint(struct module *mod, char *buf) 1154 static size_t module_flags_taint(struct module *mod, char *buf)
1155 { 1155 {
1156 size_t l = 0; 1156 size_t l = 0;
1157 1157
1158 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE)) 1158 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1159 buf[l++] = 'P'; 1159 buf[l++] = 'P';
1160 if (mod->taints & (1 << TAINT_OOT_MODULE)) 1160 if (mod->taints & (1 << TAINT_OOT_MODULE))
1161 buf[l++] = 'O'; 1161 buf[l++] = 'O';
1162 if (mod->taints & (1 << TAINT_FORCED_MODULE)) 1162 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1163 buf[l++] = 'F'; 1163 buf[l++] = 'F';
1164 if (mod->taints & (1 << TAINT_CRAP)) 1164 if (mod->taints & (1 << TAINT_CRAP))
1165 buf[l++] = 'C'; 1165 buf[l++] = 'C';
1166 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) 1166 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1167 buf[l++] = 'E'; 1167 buf[l++] = 'E';
1168 /* 1168 /*
1169 * TAINT_FORCED_RMMOD: could be added. 1169 * TAINT_FORCED_RMMOD: could be added.
1170 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't 1170 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1171 * apply to modules. 1171 * apply to modules.
1172 */ 1172 */
1173 return l; 1173 return l;
1174 } 1174 }
1175 1175
1176 static ssize_t show_initstate(struct module_attribute *mattr, 1176 static ssize_t show_initstate(struct module_attribute *mattr,
1177 struct module_kobject *mk, char *buffer) 1177 struct module_kobject *mk, char *buffer)
1178 { 1178 {
1179 const char *state = "unknown"; 1179 const char *state = "unknown";
1180 1180
1181 switch (mk->mod->state) { 1181 switch (mk->mod->state) {
1182 case MODULE_STATE_LIVE: 1182 case MODULE_STATE_LIVE:
1183 state = "live"; 1183 state = "live";
1184 break; 1184 break;
1185 case MODULE_STATE_COMING: 1185 case MODULE_STATE_COMING:
1186 state = "coming"; 1186 state = "coming";
1187 break; 1187 break;
1188 case MODULE_STATE_GOING: 1188 case MODULE_STATE_GOING:
1189 state = "going"; 1189 state = "going";
1190 break; 1190 break;
1191 default: 1191 default:
1192 BUG(); 1192 BUG();
1193 } 1193 }
1194 return sprintf(buffer, "%s\n", state); 1194 return sprintf(buffer, "%s\n", state);
1195 } 1195 }
1196 1196
1197 static struct module_attribute modinfo_initstate = 1197 static struct module_attribute modinfo_initstate =
1198 __ATTR(initstate, 0444, show_initstate, NULL); 1198 __ATTR(initstate, 0444, show_initstate, NULL);
1199 1199
1200 static ssize_t store_uevent(struct module_attribute *mattr, 1200 static ssize_t store_uevent(struct module_attribute *mattr,
1201 struct module_kobject *mk, 1201 struct module_kobject *mk,
1202 const char *buffer, size_t count) 1202 const char *buffer, size_t count)
1203 { 1203 {
1204 enum kobject_action action; 1204 enum kobject_action action;
1205 1205
1206 if (kobject_action_type(buffer, count, &action) == 0) 1206 if (kobject_action_type(buffer, count, &action) == 0)
1207 kobject_uevent(&mk->kobj, action); 1207 kobject_uevent(&mk->kobj, action);
1208 return count; 1208 return count;
1209 } 1209 }
1210 1210
1211 struct module_attribute module_uevent = 1211 struct module_attribute module_uevent =
1212 __ATTR(uevent, 0200, NULL, store_uevent); 1212 __ATTR(uevent, 0200, NULL, store_uevent);
1213 1213
1214 static ssize_t show_coresize(struct module_attribute *mattr, 1214 static ssize_t show_coresize(struct module_attribute *mattr,
1215 struct module_kobject *mk, char *buffer) 1215 struct module_kobject *mk, char *buffer)
1216 { 1216 {
1217 return sprintf(buffer, "%u\n", mk->mod->core_size); 1217 return sprintf(buffer, "%u\n", mk->mod->core_size);
1218 } 1218 }
1219 1219
1220 static struct module_attribute modinfo_coresize = 1220 static struct module_attribute modinfo_coresize =
1221 __ATTR(coresize, 0444, show_coresize, NULL); 1221 __ATTR(coresize, 0444, show_coresize, NULL);
1222 1222
1223 static ssize_t show_initsize(struct module_attribute *mattr, 1223 static ssize_t show_initsize(struct module_attribute *mattr,
1224 struct module_kobject *mk, char *buffer) 1224 struct module_kobject *mk, char *buffer)
1225 { 1225 {
1226 return sprintf(buffer, "%u\n", mk->mod->init_size); 1226 return sprintf(buffer, "%u\n", mk->mod->init_size);
1227 } 1227 }
1228 1228
1229 static struct module_attribute modinfo_initsize = 1229 static struct module_attribute modinfo_initsize =
1230 __ATTR(initsize, 0444, show_initsize, NULL); 1230 __ATTR(initsize, 0444, show_initsize, NULL);
1231 1231
1232 static ssize_t show_taint(struct module_attribute *mattr, 1232 static ssize_t show_taint(struct module_attribute *mattr,
1233 struct module_kobject *mk, char *buffer) 1233 struct module_kobject *mk, char *buffer)
1234 { 1234 {
1235 size_t l; 1235 size_t l;
1236 1236
1237 l = module_flags_taint(mk->mod, buffer); 1237 l = module_flags_taint(mk->mod, buffer);
1238 buffer[l++] = '\n'; 1238 buffer[l++] = '\n';
1239 return l; 1239 return l;
1240 } 1240 }
1241 1241
1242 static struct module_attribute modinfo_taint = 1242 static struct module_attribute modinfo_taint =
1243 __ATTR(taint, 0444, show_taint, NULL); 1243 __ATTR(taint, 0444, show_taint, NULL);
1244 1244
1245 static struct module_attribute *modinfo_attrs[] = { 1245 static struct module_attribute *modinfo_attrs[] = {
1246 &module_uevent, 1246 &module_uevent,
1247 &modinfo_version, 1247 &modinfo_version,
1248 &modinfo_srcversion, 1248 &modinfo_srcversion,
1249 &modinfo_initstate, 1249 &modinfo_initstate,
1250 &modinfo_coresize, 1250 &modinfo_coresize,
1251 &modinfo_initsize, 1251 &modinfo_initsize,
1252 &modinfo_taint, 1252 &modinfo_taint,
1253 #ifdef CONFIG_MODULE_UNLOAD 1253 #ifdef CONFIG_MODULE_UNLOAD
1254 &modinfo_refcnt, 1254 &modinfo_refcnt,
1255 #endif 1255 #endif
1256 NULL, 1256 NULL,
1257 }; 1257 };
1258 1258
1259 static const char vermagic[] = VERMAGIC_STRING; 1259 static const char vermagic[] = VERMAGIC_STRING;
1260 1260
1261 static int try_to_force_load(struct module *mod, const char *reason) 1261 static int try_to_force_load(struct module *mod, const char *reason)
1262 { 1262 {
1263 #ifdef CONFIG_MODULE_FORCE_LOAD 1263 #ifdef CONFIG_MODULE_FORCE_LOAD
1264 if (!test_taint(TAINT_FORCED_MODULE)) 1264 if (!test_taint(TAINT_FORCED_MODULE))
1265 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1265 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1266 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1266 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1267 return 0; 1267 return 0;
1268 #else 1268 #else
1269 return -ENOEXEC; 1269 return -ENOEXEC;
1270 #endif 1270 #endif
1271 } 1271 }
1272 1272
1273 #ifdef CONFIG_MODVERSIONS 1273 #ifdef CONFIG_MODVERSIONS
1274 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ 1274 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1275 static unsigned long maybe_relocated(unsigned long crc, 1275 static unsigned long maybe_relocated(unsigned long crc,
1276 const struct module *crc_owner) 1276 const struct module *crc_owner)
1277 { 1277 {
1278 #ifdef ARCH_RELOCATES_KCRCTAB 1278 #ifdef ARCH_RELOCATES_KCRCTAB
1279 if (crc_owner == NULL) 1279 if (crc_owner == NULL)
1280 return crc - (unsigned long)reloc_start; 1280 return crc - (unsigned long)reloc_start;
1281 #endif 1281 #endif
1282 return crc; 1282 return crc;
1283 } 1283 }
1284 1284
1285 static int check_version(Elf_Shdr *sechdrs, 1285 static int check_version(Elf_Shdr *sechdrs,
1286 unsigned int versindex, 1286 unsigned int versindex,
1287 const char *symname, 1287 const char *symname,
1288 struct module *mod, 1288 struct module *mod,
1289 const unsigned long *crc, 1289 const unsigned long *crc,
1290 const struct module *crc_owner) 1290 const struct module *crc_owner)
1291 { 1291 {
1292 unsigned int i, num_versions; 1292 unsigned int i, num_versions;
1293 struct modversion_info *versions; 1293 struct modversion_info *versions;
1294 1294
1295 /* Exporting module didn't supply crcs? OK, we're already tainted. */ 1295 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1296 if (!crc) 1296 if (!crc)
1297 return 1; 1297 return 1;
1298 1298
1299 /* No versions at all? modprobe --force does this. */ 1299 /* No versions at all? modprobe --force does this. */
1300 if (versindex == 0) 1300 if (versindex == 0)
1301 return try_to_force_load(mod, symname) == 0; 1301 return try_to_force_load(mod, symname) == 0;
1302 1302
1303 versions = (void *) sechdrs[versindex].sh_addr; 1303 versions = (void *) sechdrs[versindex].sh_addr;
1304 num_versions = sechdrs[versindex].sh_size 1304 num_versions = sechdrs[versindex].sh_size
1305 / sizeof(struct modversion_info); 1305 / sizeof(struct modversion_info);
1306 1306
1307 for (i = 0; i < num_versions; i++) { 1307 for (i = 0; i < num_versions; i++) {
1308 if (strcmp(versions[i].name, symname) != 0) 1308 if (strcmp(versions[i].name, symname) != 0)
1309 continue; 1309 continue;
1310 1310
1311 if (versions[i].crc == maybe_relocated(*crc, crc_owner)) 1311 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1312 return 1; 1312 return 1;
1313 pr_debug("Found checksum %lX vs module %lX\n", 1313 pr_debug("Found checksum %lX vs module %lX\n",
1314 maybe_relocated(*crc, crc_owner), versions[i].crc); 1314 maybe_relocated(*crc, crc_owner), versions[i].crc);
1315 goto bad_version; 1315 goto bad_version;
1316 } 1316 }
1317 1317
1318 pr_warn("%s: no symbol version for %s\n", mod->name, symname); 1318 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1319 return 0; 1319 return 0;
1320 1320
1321 bad_version: 1321 bad_version:
1322 pr_warn("%s: disagrees about version of symbol %s\n", 1322 pr_warn("%s: disagrees about version of symbol %s\n",
1323 mod->name, symname); 1323 mod->name, symname);
1324 return 0; 1324 return 0;
1325 } 1325 }
1326 1326
1327 static inline int check_modstruct_version(Elf_Shdr *sechdrs, 1327 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1328 unsigned int versindex, 1328 unsigned int versindex,
1329 struct module *mod) 1329 struct module *mod)
1330 { 1330 {
1331 const unsigned long *crc; 1331 const unsigned long *crc;
1332 1332
1333 /* 1333 /*
1334 * Since this should be found in kernel (which can't be removed), no 1334 * Since this should be found in kernel (which can't be removed), no
1335 * locking is necessary -- use preempt_disable() to placate lockdep. 1335 * locking is necessary -- use preempt_disable() to placate lockdep.
1336 */ 1336 */
1337 preempt_disable(); 1337 preempt_disable();
1338 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL, 1338 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1339 &crc, true, false)) { 1339 &crc, true, false)) {
1340 preempt_enable(); 1340 preempt_enable();
1341 BUG(); 1341 BUG();
1342 } 1342 }
1343 preempt_enable(); 1343 preempt_enable();
1344 return check_version(sechdrs, versindex, 1344 return check_version(sechdrs, versindex,
1345 VMLINUX_SYMBOL_STR(module_layout), mod, crc, 1345 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1346 NULL); 1346 NULL);
1347 } 1347 }
1348 1348
1349 /* First part is kernel version, which we ignore if module has crcs. */ 1349 /* First part is kernel version, which we ignore if module has crcs. */
1350 static inline int same_magic(const char *amagic, const char *bmagic, 1350 static inline int same_magic(const char *amagic, const char *bmagic,
1351 bool has_crcs) 1351 bool has_crcs)
1352 { 1352 {
1353 if (has_crcs) { 1353 if (has_crcs) {
1354 amagic += strcspn(amagic, " "); 1354 amagic += strcspn(amagic, " ");
1355 bmagic += strcspn(bmagic, " "); 1355 bmagic += strcspn(bmagic, " ");
1356 } 1356 }
1357 return strcmp(amagic, bmagic) == 0; 1357 return strcmp(amagic, bmagic) == 0;
1358 } 1358 }
1359 #else 1359 #else
1360 static inline int check_version(Elf_Shdr *sechdrs, 1360 static inline int check_version(Elf_Shdr *sechdrs,
1361 unsigned int versindex, 1361 unsigned int versindex,
1362 const char *symname, 1362 const char *symname,
1363 struct module *mod, 1363 struct module *mod,
1364 const unsigned long *crc, 1364 const unsigned long *crc,
1365 const struct module *crc_owner) 1365 const struct module *crc_owner)
1366 { 1366 {
1367 return 1; 1367 return 1;
1368 } 1368 }
1369 1369
1370 static inline int check_modstruct_version(Elf_Shdr *sechdrs, 1370 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1371 unsigned int versindex, 1371 unsigned int versindex,
1372 struct module *mod) 1372 struct module *mod)
1373 { 1373 {
1374 return 1; 1374 return 1;
1375 } 1375 }
1376 1376
1377 static inline int same_magic(const char *amagic, const char *bmagic, 1377 static inline int same_magic(const char *amagic, const char *bmagic,
1378 bool has_crcs) 1378 bool has_crcs)
1379 { 1379 {
1380 return strcmp(amagic, bmagic) == 0; 1380 return strcmp(amagic, bmagic) == 0;
1381 } 1381 }
1382 #endif /* CONFIG_MODVERSIONS */ 1382 #endif /* CONFIG_MODVERSIONS */
1383 1383
1384 /* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1384 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1385 static const struct kernel_symbol *resolve_symbol(struct module *mod, 1385 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1386 const struct load_info *info, 1386 const struct load_info *info,
1387 const char *name, 1387 const char *name,
1388 char ownername[]) 1388 char ownername[])
1389 { 1389 {
1390 struct module *owner; 1390 struct module *owner;
1391 const struct kernel_symbol *sym; 1391 const struct kernel_symbol *sym;
1392 const unsigned long *crc; 1392 const unsigned long *crc;
1393 int err; 1393 int err;
1394 1394
1395 /* 1395 /*
1396 * The module_mutex should not be a heavily contended lock; 1396 * The module_mutex should not be a heavily contended lock;
1397 * if we get the occasional sleep here, we'll go an extra iteration 1397 * if we get the occasional sleep here, we'll go an extra iteration
1398 * in the wait_event_interruptible(), which is harmless. 1398 * in the wait_event_interruptible(), which is harmless.
1399 */ 1399 */
1400 sched_annotate_sleep(); 1400 sched_annotate_sleep();
1401 mutex_lock(&module_mutex); 1401 mutex_lock(&module_mutex);
1402 sym = find_symbol(name, &owner, &crc, 1402 sym = find_symbol(name, &owner, &crc,
1403 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); 1403 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1404 if (!sym) 1404 if (!sym)
1405 goto unlock; 1405 goto unlock;
1406 1406
1407 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, 1407 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1408 owner)) { 1408 owner)) {
1409 sym = ERR_PTR(-EINVAL); 1409 sym = ERR_PTR(-EINVAL);
1410 goto getname; 1410 goto getname;
1411 } 1411 }
1412 1412
1413 err = ref_module(mod, owner); 1413 err = ref_module(mod, owner);
1414 if (err) { 1414 if (err) {
1415 sym = ERR_PTR(err); 1415 sym = ERR_PTR(err);
1416 goto getname; 1416 goto getname;
1417 } 1417 }
1418 1418
1419 getname: 1419 getname:
1420 /* We must make copy under the lock if we failed to get ref. */ 1420 /* We must make copy under the lock if we failed to get ref. */
1421 strncpy(ownername, module_name(owner), MODULE_NAME_LEN); 1421 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1422 unlock: 1422 unlock:
1423 mutex_unlock(&module_mutex); 1423 mutex_unlock(&module_mutex);
1424 return sym; 1424 return sym;
1425 } 1425 }
1426 1426
1427 static const struct kernel_symbol * 1427 static const struct kernel_symbol *
1428 resolve_symbol_wait(struct module *mod, 1428 resolve_symbol_wait(struct module *mod,
1429 const struct load_info *info, 1429 const struct load_info *info,
1430 const char *name) 1430 const char *name)
1431 { 1431 {
1432 const struct kernel_symbol *ksym; 1432 const struct kernel_symbol *ksym;
1433 char owner[MODULE_NAME_LEN]; 1433 char owner[MODULE_NAME_LEN];
1434 1434
1435 if (wait_event_interruptible_timeout(module_wq, 1435 if (wait_event_interruptible_timeout(module_wq,
1436 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1436 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1437 || PTR_ERR(ksym) != -EBUSY, 1437 || PTR_ERR(ksym) != -EBUSY,
1438 30 * HZ) <= 0) { 1438 30 * HZ) <= 0) {
1439 pr_warn("%s: gave up waiting for init of module %s.\n", 1439 pr_warn("%s: gave up waiting for init of module %s.\n",
1440 mod->name, owner); 1440 mod->name, owner);
1441 } 1441 }
1442 return ksym; 1442 return ksym;
1443 } 1443 }
1444 1444
1445 /* 1445 /*
1446 * /sys/module/foo/sections stuff 1446 * /sys/module/foo/sections stuff
1447 * J. Corbet <corbet@lwn.net> 1447 * J. Corbet <corbet@lwn.net>
1448 */ 1448 */
1449 #ifdef CONFIG_SYSFS 1449 #ifdef CONFIG_SYSFS
1450 1450
1451 #ifdef CONFIG_KALLSYMS 1451 #ifdef CONFIG_KALLSYMS
1452 static inline bool sect_empty(const Elf_Shdr *sect) 1452 static inline bool sect_empty(const Elf_Shdr *sect)
1453 { 1453 {
1454 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; 1454 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1455 } 1455 }
1456 1456
1457 struct module_sect_attr { 1457 struct module_sect_attr {
1458 struct module_attribute mattr; 1458 struct module_attribute mattr;
1459 char *name; 1459 char *name;
1460 unsigned long address; 1460 unsigned long address;
1461 }; 1461 };
1462 1462
1463 struct module_sect_attrs { 1463 struct module_sect_attrs {
1464 struct attribute_group grp; 1464 struct attribute_group grp;
1465 unsigned int nsections; 1465 unsigned int nsections;
1466 struct module_sect_attr attrs[0]; 1466 struct module_sect_attr attrs[0];
1467 }; 1467 };
1468 1468
1469 static ssize_t module_sect_show(struct module_attribute *mattr, 1469 static ssize_t module_sect_show(struct module_attribute *mattr,
1470 struct module_kobject *mk, char *buf) 1470 struct module_kobject *mk, char *buf)
1471 { 1471 {
1472 struct module_sect_attr *sattr = 1472 struct module_sect_attr *sattr =
1473 container_of(mattr, struct module_sect_attr, mattr); 1473 container_of(mattr, struct module_sect_attr, mattr);
1474 return sprintf(buf, "0x%pK\n", (void *)sattr->address); 1474 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1475 } 1475 }
1476 1476
1477 static void free_sect_attrs(struct module_sect_attrs *sect_attrs) 1477 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1478 { 1478 {
1479 unsigned int section; 1479 unsigned int section;
1480 1480
1481 for (section = 0; section < sect_attrs->nsections; section++) 1481 for (section = 0; section < sect_attrs->nsections; section++)
1482 kfree(sect_attrs->attrs[section].name); 1482 kfree(sect_attrs->attrs[section].name);
1483 kfree(sect_attrs); 1483 kfree(sect_attrs);
1484 } 1484 }
1485 1485
1486 static void add_sect_attrs(struct module *mod, const struct load_info *info) 1486 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1487 { 1487 {
1488 unsigned int nloaded = 0, i, size[2]; 1488 unsigned int nloaded = 0, i, size[2];
1489 struct module_sect_attrs *sect_attrs; 1489 struct module_sect_attrs *sect_attrs;
1490 struct module_sect_attr *sattr; 1490 struct module_sect_attr *sattr;
1491 struct attribute **gattr; 1491 struct attribute **gattr;
1492 1492
1493 /* Count loaded sections and allocate structures */ 1493 /* Count loaded sections and allocate structures */
1494 for (i = 0; i < info->hdr->e_shnum; i++) 1494 for (i = 0; i < info->hdr->e_shnum; i++)
1495 if (!sect_empty(&info->sechdrs[i])) 1495 if (!sect_empty(&info->sechdrs[i]))
1496 nloaded++; 1496 nloaded++;
1497 size[0] = ALIGN(sizeof(*sect_attrs) 1497 size[0] = ALIGN(sizeof(*sect_attrs)
1498 + nloaded * sizeof(sect_attrs->attrs[0]), 1498 + nloaded * sizeof(sect_attrs->attrs[0]),
1499 sizeof(sect_attrs->grp.attrs[0])); 1499 sizeof(sect_attrs->grp.attrs[0]));
1500 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); 1500 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1501 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); 1501 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1502 if (sect_attrs == NULL) 1502 if (sect_attrs == NULL)
1503 return; 1503 return;
1504 1504
1505 /* Setup section attributes. */ 1505 /* Setup section attributes. */
1506 sect_attrs->grp.name = "sections"; 1506 sect_attrs->grp.name = "sections";
1507 sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; 1507 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1508 1508
1509 sect_attrs->nsections = 0; 1509 sect_attrs->nsections = 0;
1510 sattr = &sect_attrs->attrs[0]; 1510 sattr = &sect_attrs->attrs[0];
1511 gattr = &sect_attrs->grp.attrs[0]; 1511 gattr = &sect_attrs->grp.attrs[0];
1512 for (i = 0; i < info->hdr->e_shnum; i++) { 1512 for (i = 0; i < info->hdr->e_shnum; i++) {
1513 Elf_Shdr *sec = &info->sechdrs[i]; 1513 Elf_Shdr *sec = &info->sechdrs[i];
1514 if (sect_empty(sec)) 1514 if (sect_empty(sec))
1515 continue; 1515 continue;
1516 sattr->address = sec->sh_addr; 1516 sattr->address = sec->sh_addr;
1517 sattr->name = kstrdup(info->secstrings + sec->sh_name, 1517 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1518 GFP_KERNEL); 1518 GFP_KERNEL);
1519 if (sattr->name == NULL) 1519 if (sattr->name == NULL)
1520 goto out; 1520 goto out;
1521 sect_attrs->nsections++; 1521 sect_attrs->nsections++;
1522 sysfs_attr_init(&sattr->mattr.attr); 1522 sysfs_attr_init(&sattr->mattr.attr);
1523 sattr->mattr.show = module_sect_show; 1523 sattr->mattr.show = module_sect_show;
1524 sattr->mattr.store = NULL; 1524 sattr->mattr.store = NULL;
1525 sattr->mattr.attr.name = sattr->name; 1525 sattr->mattr.attr.name = sattr->name;
1526 sattr->mattr.attr.mode = S_IRUGO; 1526 sattr->mattr.attr.mode = S_IRUGO;
1527 *(gattr++) = &(sattr++)->mattr.attr; 1527 *(gattr++) = &(sattr++)->mattr.attr;
1528 } 1528 }
1529 *gattr = NULL; 1529 *gattr = NULL;
1530 1530
1531 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp)) 1531 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1532 goto out; 1532 goto out;
1533 1533
1534 mod->sect_attrs = sect_attrs; 1534 mod->sect_attrs = sect_attrs;
1535 return; 1535 return;
1536 out: 1536 out:
1537 free_sect_attrs(sect_attrs); 1537 free_sect_attrs(sect_attrs);
1538 } 1538 }
1539 1539
1540 static void remove_sect_attrs(struct module *mod) 1540 static void remove_sect_attrs(struct module *mod)
1541 { 1541 {
1542 if (mod->sect_attrs) { 1542 if (mod->sect_attrs) {
1543 sysfs_remove_group(&mod->mkobj.kobj, 1543 sysfs_remove_group(&mod->mkobj.kobj,
1544 &mod->sect_attrs->grp); 1544 &mod->sect_attrs->grp);
1545 /* We are positive that no one is using any sect attrs 1545 /* We are positive that no one is using any sect attrs
1546 * at this point. Deallocate immediately. */ 1546 * at this point. Deallocate immediately. */
1547 free_sect_attrs(mod->sect_attrs); 1547 free_sect_attrs(mod->sect_attrs);
1548 mod->sect_attrs = NULL; 1548 mod->sect_attrs = NULL;
1549 } 1549 }
1550 } 1550 }
1551 1551
1552 /* 1552 /*
1553 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. 1553 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1554 */ 1554 */
1555 1555
1556 struct module_notes_attrs { 1556 struct module_notes_attrs {
1557 struct kobject *dir; 1557 struct kobject *dir;
1558 unsigned int notes; 1558 unsigned int notes;
1559 struct bin_attribute attrs[0]; 1559 struct bin_attribute attrs[0];
1560 }; 1560 };
1561 1561
1562 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, 1562 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1563 struct bin_attribute *bin_attr, 1563 struct bin_attribute *bin_attr,
1564 char *buf, loff_t pos, size_t count) 1564 char *buf, loff_t pos, size_t count)
1565 { 1565 {
1566 /* 1566 /*
1567 * The caller checked the pos and count against our size. 1567 * The caller checked the pos and count against our size.
1568 */ 1568 */
1569 memcpy(buf, bin_attr->private + pos, count); 1569 memcpy(buf, bin_attr->private + pos, count);
1570 return count; 1570 return count;
1571 } 1571 }
1572 1572
1573 static void free_notes_attrs(struct module_notes_attrs *notes_attrs, 1573 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1574 unsigned int i) 1574 unsigned int i)
1575 { 1575 {
1576 if (notes_attrs->dir) { 1576 if (notes_attrs->dir) {
1577 while (i-- > 0) 1577 while (i-- > 0)
1578 sysfs_remove_bin_file(notes_attrs->dir, 1578 sysfs_remove_bin_file(notes_attrs->dir,
1579 &notes_attrs->attrs[i]); 1579 &notes_attrs->attrs[i]);
1580 kobject_put(notes_attrs->dir); 1580 kobject_put(notes_attrs->dir);
1581 } 1581 }
1582 kfree(notes_attrs); 1582 kfree(notes_attrs);
1583 } 1583 }
1584 1584
1585 static void add_notes_attrs(struct module *mod, const struct load_info *info) 1585 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1586 { 1586 {
1587 unsigned int notes, loaded, i; 1587 unsigned int notes, loaded, i;
1588 struct module_notes_attrs *notes_attrs; 1588 struct module_notes_attrs *notes_attrs;
1589 struct bin_attribute *nattr; 1589 struct bin_attribute *nattr;
1590 1590
1591 /* failed to create section attributes, so can't create notes */ 1591 /* failed to create section attributes, so can't create notes */
1592 if (!mod->sect_attrs) 1592 if (!mod->sect_attrs)
1593 return; 1593 return;
1594 1594
1595 /* Count notes sections and allocate structures. */ 1595 /* Count notes sections and allocate structures. */
1596 notes = 0; 1596 notes = 0;
1597 for (i = 0; i < info->hdr->e_shnum; i++) 1597 for (i = 0; i < info->hdr->e_shnum; i++)
1598 if (!sect_empty(&info->sechdrs[i]) && 1598 if (!sect_empty(&info->sechdrs[i]) &&
1599 (info->sechdrs[i].sh_type == SHT_NOTE)) 1599 (info->sechdrs[i].sh_type == SHT_NOTE))
1600 ++notes; 1600 ++notes;
1601 1601
1602 if (notes == 0) 1602 if (notes == 0)
1603 return; 1603 return;
1604 1604
1605 notes_attrs = kzalloc(sizeof(*notes_attrs) 1605 notes_attrs = kzalloc(sizeof(*notes_attrs)
1606 + notes * sizeof(notes_attrs->attrs[0]), 1606 + notes * sizeof(notes_attrs->attrs[0]),
1607 GFP_KERNEL); 1607 GFP_KERNEL);
1608 if (notes_attrs == NULL) 1608 if (notes_attrs == NULL)
1609 return; 1609 return;
1610 1610
1611 notes_attrs->notes = notes; 1611 notes_attrs->notes = notes;
1612 nattr = &notes_attrs->attrs[0]; 1612 nattr = &notes_attrs->attrs[0];
1613 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { 1613 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1614 if (sect_empty(&info->sechdrs[i])) 1614 if (sect_empty(&info->sechdrs[i]))
1615 continue; 1615 continue;
1616 if (info->sechdrs[i].sh_type == SHT_NOTE) { 1616 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1617 sysfs_bin_attr_init(nattr); 1617 sysfs_bin_attr_init(nattr);
1618 nattr->attr.name = mod->sect_attrs->attrs[loaded].name; 1618 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1619 nattr->attr.mode = S_IRUGO; 1619 nattr->attr.mode = S_IRUGO;
1620 nattr->size = info->sechdrs[i].sh_size; 1620 nattr->size = info->sechdrs[i].sh_size;
1621 nattr->private = (void *) info->sechdrs[i].sh_addr; 1621 nattr->private = (void *) info->sechdrs[i].sh_addr;
1622 nattr->read = module_notes_read; 1622 nattr->read = module_notes_read;
1623 ++nattr; 1623 ++nattr;
1624 } 1624 }
1625 ++loaded; 1625 ++loaded;
1626 } 1626 }
1627 1627
1628 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); 1628 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1629 if (!notes_attrs->dir) 1629 if (!notes_attrs->dir)
1630 goto out; 1630 goto out;
1631 1631
1632 for (i = 0; i < notes; ++i) 1632 for (i = 0; i < notes; ++i)
1633 if (sysfs_create_bin_file(notes_attrs->dir, 1633 if (sysfs_create_bin_file(notes_attrs->dir,
1634 &notes_attrs->attrs[i])) 1634 &notes_attrs->attrs[i]))
1635 goto out; 1635 goto out;
1636 1636
1637 mod->notes_attrs = notes_attrs; 1637 mod->notes_attrs = notes_attrs;
1638 return; 1638 return;
1639 1639
1640 out: 1640 out:
1641 free_notes_attrs(notes_attrs, i); 1641 free_notes_attrs(notes_attrs, i);
1642 } 1642 }
1643 1643
1644 static void remove_notes_attrs(struct module *mod) 1644 static void remove_notes_attrs(struct module *mod)
1645 { 1645 {
1646 if (mod->notes_attrs) 1646 if (mod->notes_attrs)
1647 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); 1647 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1648 } 1648 }
1649 1649
1650 #else 1650 #else
1651 1651
1652 static inline void add_sect_attrs(struct module *mod, 1652 static inline void add_sect_attrs(struct module *mod,
1653 const struct load_info *info) 1653 const struct load_info *info)
1654 { 1654 {
1655 } 1655 }
1656 1656
1657 static inline void remove_sect_attrs(struct module *mod) 1657 static inline void remove_sect_attrs(struct module *mod)
1658 { 1658 {
1659 } 1659 }
1660 1660
1661 static inline void add_notes_attrs(struct module *mod, 1661 static inline void add_notes_attrs(struct module *mod,
1662 const struct load_info *info) 1662 const struct load_info *info)
1663 { 1663 {
1664 } 1664 }
1665 1665
1666 static inline void remove_notes_attrs(struct module *mod) 1666 static inline void remove_notes_attrs(struct module *mod)
1667 { 1667 {
1668 } 1668 }
1669 #endif /* CONFIG_KALLSYMS */ 1669 #endif /* CONFIG_KALLSYMS */
1670 1670
1671 static void add_usage_links(struct module *mod) 1671 static void add_usage_links(struct module *mod)
1672 { 1672 {
1673 #ifdef CONFIG_MODULE_UNLOAD 1673 #ifdef CONFIG_MODULE_UNLOAD
1674 struct module_use *use; 1674 struct module_use *use;
1675 int nowarn; 1675 int nowarn;
1676 1676
1677 mutex_lock(&module_mutex); 1677 mutex_lock(&module_mutex);
1678 list_for_each_entry(use, &mod->target_list, target_list) { 1678 list_for_each_entry(use, &mod->target_list, target_list) {
1679 nowarn = sysfs_create_link(use->target->holders_dir, 1679 nowarn = sysfs_create_link(use->target->holders_dir,
1680 &mod->mkobj.kobj, mod->name); 1680 &mod->mkobj.kobj, mod->name);
1681 } 1681 }
1682 mutex_unlock(&module_mutex); 1682 mutex_unlock(&module_mutex);
1683 #endif 1683 #endif
1684 } 1684 }
1685 1685
1686 static void del_usage_links(struct module *mod) 1686 static void del_usage_links(struct module *mod)
1687 { 1687 {
1688 #ifdef CONFIG_MODULE_UNLOAD 1688 #ifdef CONFIG_MODULE_UNLOAD
1689 struct module_use *use; 1689 struct module_use *use;
1690 1690
1691 mutex_lock(&module_mutex); 1691 mutex_lock(&module_mutex);
1692 list_for_each_entry(use, &mod->target_list, target_list) 1692 list_for_each_entry(use, &mod->target_list, target_list)
1693 sysfs_remove_link(use->target->holders_dir, mod->name); 1693 sysfs_remove_link(use->target->holders_dir, mod->name);
1694 mutex_unlock(&module_mutex); 1694 mutex_unlock(&module_mutex);
1695 #endif 1695 #endif
1696 } 1696 }
1697 1697
1698 static int module_add_modinfo_attrs(struct module *mod) 1698 static int module_add_modinfo_attrs(struct module *mod)
1699 { 1699 {
1700 struct module_attribute *attr; 1700 struct module_attribute *attr;
1701 struct module_attribute *temp_attr; 1701 struct module_attribute *temp_attr;
1702 int error = 0; 1702 int error = 0;
1703 int i; 1703 int i;
1704 1704
1705 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * 1705 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1706 (ARRAY_SIZE(modinfo_attrs) + 1)), 1706 (ARRAY_SIZE(modinfo_attrs) + 1)),
1707 GFP_KERNEL); 1707 GFP_KERNEL);
1708 if (!mod->modinfo_attrs) 1708 if (!mod->modinfo_attrs)
1709 return -ENOMEM; 1709 return -ENOMEM;
1710 1710
1711 temp_attr = mod->modinfo_attrs; 1711 temp_attr = mod->modinfo_attrs;
1712 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { 1712 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1713 if (!attr->test || 1713 if (!attr->test ||
1714 (attr->test && attr->test(mod))) { 1714 (attr->test && attr->test(mod))) {
1715 memcpy(temp_attr, attr, sizeof(*temp_attr)); 1715 memcpy(temp_attr, attr, sizeof(*temp_attr));
1716 sysfs_attr_init(&temp_attr->attr); 1716 sysfs_attr_init(&temp_attr->attr);
1717 error = sysfs_create_file(&mod->mkobj.kobj, 1717 error = sysfs_create_file(&mod->mkobj.kobj,
1718 &temp_attr->attr); 1718 &temp_attr->attr);
1719 ++temp_attr; 1719 ++temp_attr;
1720 } 1720 }
1721 } 1721 }
1722 return error; 1722 return error;
1723 } 1723 }
1724 1724
1725 static void module_remove_modinfo_attrs(struct module *mod) 1725 static void module_remove_modinfo_attrs(struct module *mod)
1726 { 1726 {
1727 struct module_attribute *attr; 1727 struct module_attribute *attr;
1728 int i; 1728 int i;
1729 1729
1730 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { 1730 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1731 /* pick a field to test for end of list */ 1731 /* pick a field to test for end of list */
1732 if (!attr->attr.name) 1732 if (!attr->attr.name)
1733 break; 1733 break;
1734 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); 1734 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1735 if (attr->free) 1735 if (attr->free)
1736 attr->free(mod); 1736 attr->free(mod);
1737 } 1737 }
1738 kfree(mod->modinfo_attrs); 1738 kfree(mod->modinfo_attrs);
1739 } 1739 }
1740 1740
1741 static void mod_kobject_put(struct module *mod) 1741 static void mod_kobject_put(struct module *mod)
1742 { 1742 {
1743 DECLARE_COMPLETION_ONSTACK(c); 1743 DECLARE_COMPLETION_ONSTACK(c);
1744 mod->mkobj.kobj_completion = &c; 1744 mod->mkobj.kobj_completion = &c;
1745 kobject_put(&mod->mkobj.kobj); 1745 kobject_put(&mod->mkobj.kobj);
1746 wait_for_completion(&c); 1746 wait_for_completion(&c);
1747 } 1747 }
1748 1748
1749 static int mod_sysfs_init(struct module *mod) 1749 static int mod_sysfs_init(struct module *mod)
1750 { 1750 {
1751 int err; 1751 int err;
1752 struct kobject *kobj; 1752 struct kobject *kobj;
1753 1753
1754 if (!module_sysfs_initialized) { 1754 if (!module_sysfs_initialized) {
1755 pr_err("%s: module sysfs not initialized\n", mod->name); 1755 pr_err("%s: module sysfs not initialized\n", mod->name);
1756 err = -EINVAL; 1756 err = -EINVAL;
1757 goto out; 1757 goto out;
1758 } 1758 }
1759 1759
1760 kobj = kset_find_obj(module_kset, mod->name); 1760 kobj = kset_find_obj(module_kset, mod->name);
1761 if (kobj) { 1761 if (kobj) {
1762 pr_err("%s: module is already loaded\n", mod->name); 1762 pr_err("%s: module is already loaded\n", mod->name);
1763 kobject_put(kobj); 1763 kobject_put(kobj);
1764 err = -EINVAL; 1764 err = -EINVAL;
1765 goto out; 1765 goto out;
1766 } 1766 }
1767 1767
1768 mod->mkobj.mod = mod; 1768 mod->mkobj.mod = mod;
1769 1769
1770 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); 1770 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1771 mod->mkobj.kobj.kset = module_kset; 1771 mod->mkobj.kobj.kset = module_kset;
1772 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, 1772 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1773 "%s", mod->name); 1773 "%s", mod->name);
1774 if (err) 1774 if (err)
1775 mod_kobject_put(mod); 1775 mod_kobject_put(mod);
1776 1776
1777 /* delay uevent until full sysfs population */ 1777 /* delay uevent until full sysfs population */
1778 out: 1778 out:
1779 return err; 1779 return err;
1780 } 1780 }
1781 1781
1782 static int mod_sysfs_setup(struct module *mod, 1782 static int mod_sysfs_setup(struct module *mod,
1783 const struct load_info *info, 1783 const struct load_info *info,
1784 struct kernel_param *kparam, 1784 struct kernel_param *kparam,
1785 unsigned int num_params) 1785 unsigned int num_params)
1786 { 1786 {
1787 int err; 1787 int err;
1788 1788
1789 err = mod_sysfs_init(mod); 1789 err = mod_sysfs_init(mod);
1790 if (err) 1790 if (err)
1791 goto out; 1791 goto out;
1792 1792
1793 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); 1793 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1794 if (!mod->holders_dir) { 1794 if (!mod->holders_dir) {
1795 err = -ENOMEM; 1795 err = -ENOMEM;
1796 goto out_unreg; 1796 goto out_unreg;
1797 } 1797 }
1798 1798
1799 err = module_param_sysfs_setup(mod, kparam, num_params); 1799 err = module_param_sysfs_setup(mod, kparam, num_params);
1800 if (err) 1800 if (err)
1801 goto out_unreg_holders; 1801 goto out_unreg_holders;
1802 1802
1803 err = module_add_modinfo_attrs(mod); 1803 err = module_add_modinfo_attrs(mod);
1804 if (err) 1804 if (err)
1805 goto out_unreg_param; 1805 goto out_unreg_param;
1806 1806
1807 add_usage_links(mod); 1807 add_usage_links(mod);
1808 add_sect_attrs(mod, info); 1808 add_sect_attrs(mod, info);
1809 add_notes_attrs(mod, info); 1809 add_notes_attrs(mod, info);
1810 1810
1811 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 1811 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1812 return 0; 1812 return 0;
1813 1813
1814 out_unreg_param: 1814 out_unreg_param:
1815 module_param_sysfs_remove(mod); 1815 module_param_sysfs_remove(mod);
1816 out_unreg_holders: 1816 out_unreg_holders:
1817 kobject_put(mod->holders_dir); 1817 kobject_put(mod->holders_dir);
1818 out_unreg: 1818 out_unreg:
1819 mod_kobject_put(mod); 1819 mod_kobject_put(mod);
1820 out: 1820 out:
1821 return err; 1821 return err;
1822 } 1822 }
1823 1823
1824 static void mod_sysfs_fini(struct module *mod) 1824 static void mod_sysfs_fini(struct module *mod)
1825 { 1825 {
1826 remove_notes_attrs(mod); 1826 remove_notes_attrs(mod);
1827 remove_sect_attrs(mod); 1827 remove_sect_attrs(mod);
1828 mod_kobject_put(mod); 1828 mod_kobject_put(mod);
1829 } 1829 }
1830 1830
1831 static void init_param_lock(struct module *mod) 1831 static void init_param_lock(struct module *mod)
1832 { 1832 {
1833 mutex_init(&mod->param_lock); 1833 mutex_init(&mod->param_lock);
1834 } 1834 }
1835 #else /* !CONFIG_SYSFS */ 1835 #else /* !CONFIG_SYSFS */
1836 1836
1837 static int mod_sysfs_setup(struct module *mod, 1837 static int mod_sysfs_setup(struct module *mod,
1838 const struct load_info *info, 1838 const struct load_info *info,
1839 struct kernel_param *kparam, 1839 struct kernel_param *kparam,
1840 unsigned int num_params) 1840 unsigned int num_params)
1841 { 1841 {
1842 return 0; 1842 return 0;
1843 } 1843 }
1844 1844
1845 static void mod_sysfs_fini(struct module *mod) 1845 static void mod_sysfs_fini(struct module *mod)
1846 { 1846 {
1847 } 1847 }
1848 1848
1849 static void module_remove_modinfo_attrs(struct module *mod) 1849 static void module_remove_modinfo_attrs(struct module *mod)
1850 { 1850 {
1851 } 1851 }
1852 1852
1853 static void del_usage_links(struct module *mod) 1853 static void del_usage_links(struct module *mod)
1854 { 1854 {
1855 } 1855 }
1856 1856
1857 static void init_param_lock(struct module *mod) 1857 static void init_param_lock(struct module *mod)
1858 { 1858 {
1859 } 1859 }
1860 #endif /* CONFIG_SYSFS */ 1860 #endif /* CONFIG_SYSFS */
1861 1861
1862 static void mod_sysfs_teardown(struct module *mod) 1862 static void mod_sysfs_teardown(struct module *mod)
1863 { 1863 {
1864 del_usage_links(mod); 1864 del_usage_links(mod);
1865 module_remove_modinfo_attrs(mod); 1865 module_remove_modinfo_attrs(mod);
1866 module_param_sysfs_remove(mod); 1866 module_param_sysfs_remove(mod);
1867 kobject_put(mod->mkobj.drivers_dir); 1867 kobject_put(mod->mkobj.drivers_dir);
1868 kobject_put(mod->holders_dir); 1868 kobject_put(mod->holders_dir);
1869 mod_sysfs_fini(mod); 1869 mod_sysfs_fini(mod);
1870 } 1870 }
1871 1871
1872 #ifdef CONFIG_DEBUG_SET_MODULE_RONX 1872 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1873 /* 1873 /*
1874 * LKM RO/NX protection: protect module's text/ro-data 1874 * LKM RO/NX protection: protect module's text/ro-data
1875 * from modification and any data from execution. 1875 * from modification and any data from execution.
1876 */ 1876 */
1877 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) 1877 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1878 { 1878 {
1879 unsigned long begin_pfn = PFN_DOWN((unsigned long)start); 1879 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1880 unsigned long end_pfn = PFN_DOWN((unsigned long)end); 1880 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1881 1881
1882 if (end_pfn > begin_pfn) 1882 if (end_pfn > begin_pfn)
1883 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); 1883 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1884 } 1884 }
1885 1885
1886 static void set_section_ro_nx(void *base, 1886 static void set_section_ro_nx(void *base,
1887 unsigned long text_size, 1887 unsigned long text_size,
1888 unsigned long ro_size, 1888 unsigned long ro_size,
1889 unsigned long total_size) 1889 unsigned long total_size)
1890 { 1890 {
1891 /* begin and end PFNs of the current subsection */ 1891 /* begin and end PFNs of the current subsection */
1892 unsigned long begin_pfn; 1892 unsigned long begin_pfn;
1893 unsigned long end_pfn; 1893 unsigned long end_pfn;
1894 1894
1895 /* 1895 /*
1896 * Set RO for module text and RO-data: 1896 * Set RO for module text and RO-data:
1897 * - Always protect first page. 1897 * - Always protect first page.
1898 * - Do not protect last partial page. 1898 * - Do not protect last partial page.
1899 */ 1899 */
1900 if (ro_size > 0) 1900 if (ro_size > 0)
1901 set_page_attributes(base, base + ro_size, set_memory_ro); 1901 set_page_attributes(base, base + ro_size, set_memory_ro);
1902 1902
1903 /* 1903 /*
1904 * Set NX permissions for module data: 1904 * Set NX permissions for module data:
1905 * - Do not protect first partial page. 1905 * - Do not protect first partial page.
1906 * - Always protect last page. 1906 * - Always protect last page.
1907 */ 1907 */
1908 if (total_size > text_size) { 1908 if (total_size > text_size) {
1909 begin_pfn = PFN_UP((unsigned long)base + text_size); 1909 begin_pfn = PFN_UP((unsigned long)base + text_size);
1910 end_pfn = PFN_UP((unsigned long)base + total_size); 1910 end_pfn = PFN_UP((unsigned long)base + total_size);
1911 if (end_pfn > begin_pfn) 1911 if (end_pfn > begin_pfn)
1912 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); 1912 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1913 } 1913 }
1914 } 1914 }
1915 1915
1916 static void unset_module_core_ro_nx(struct module *mod) 1916 static void unset_module_core_ro_nx(struct module *mod)
1917 { 1917 {
1918 set_page_attributes(mod->module_core + mod->core_text_size, 1918 set_page_attributes(mod->module_core + mod->core_text_size,
1919 mod->module_core + mod->core_size, 1919 mod->module_core + mod->core_size,
1920 set_memory_x); 1920 set_memory_x);
1921 set_page_attributes(mod->module_core, 1921 set_page_attributes(mod->module_core,
1922 mod->module_core + mod->core_ro_size, 1922 mod->module_core + mod->core_ro_size,
1923 set_memory_rw); 1923 set_memory_rw);
1924 } 1924 }
1925 1925
1926 static void unset_module_init_ro_nx(struct module *mod) 1926 static void unset_module_init_ro_nx(struct module *mod)
1927 { 1927 {
1928 set_page_attributes(mod->module_init + mod->init_text_size, 1928 set_page_attributes(mod->module_init + mod->init_text_size,
1929 mod->module_init + mod->init_size, 1929 mod->module_init + mod->init_size,
1930 set_memory_x); 1930 set_memory_x);
1931 set_page_attributes(mod->module_init, 1931 set_page_attributes(mod->module_init,
1932 mod->module_init + mod->init_ro_size, 1932 mod->module_init + mod->init_ro_size,
1933 set_memory_rw); 1933 set_memory_rw);
1934 } 1934 }
1935 1935
1936 /* Iterate through all modules and set each module's text as RW */ 1936 /* Iterate through all modules and set each module's text as RW */
1937 void set_all_modules_text_rw(void) 1937 void set_all_modules_text_rw(void)
1938 { 1938 {
1939 struct module *mod; 1939 struct module *mod;
1940 1940
1941 mutex_lock(&module_mutex); 1941 mutex_lock(&module_mutex);
1942 list_for_each_entry_rcu(mod, &modules, list) { 1942 list_for_each_entry_rcu(mod, &modules, list) {
1943 if (mod->state == MODULE_STATE_UNFORMED) 1943 if (mod->state == MODULE_STATE_UNFORMED)
1944 continue; 1944 continue;
1945 if ((mod->module_core) && (mod->core_text_size)) { 1945 if ((mod->module_core) && (mod->core_text_size)) {
1946 set_page_attributes(mod->module_core, 1946 set_page_attributes(mod->module_core,
1947 mod->module_core + mod->core_text_size, 1947 mod->module_core + mod->core_text_size,
1948 set_memory_rw); 1948 set_memory_rw);
1949 } 1949 }
1950 if ((mod->module_init) && (mod->init_text_size)) { 1950 if ((mod->module_init) && (mod->init_text_size)) {
1951 set_page_attributes(mod->module_init, 1951 set_page_attributes(mod->module_init,
1952 mod->module_init + mod->init_text_size, 1952 mod->module_init + mod->init_text_size,
1953 set_memory_rw); 1953 set_memory_rw);
1954 } 1954 }
1955 } 1955 }
1956 mutex_unlock(&module_mutex); 1956 mutex_unlock(&module_mutex);
1957 } 1957 }
1958 1958
1959 /* Iterate through all modules and set each module's text as RO */ 1959 /* Iterate through all modules and set each module's text as RO */
1960 void set_all_modules_text_ro(void) 1960 void set_all_modules_text_ro(void)
1961 { 1961 {
1962 struct module *mod; 1962 struct module *mod;
1963 1963
1964 mutex_lock(&module_mutex); 1964 mutex_lock(&module_mutex);
1965 list_for_each_entry_rcu(mod, &modules, list) { 1965 list_for_each_entry_rcu(mod, &modules, list) {
1966 if (mod->state == MODULE_STATE_UNFORMED) 1966 if (mod->state == MODULE_STATE_UNFORMED)
1967 continue; 1967 continue;
1968 if ((mod->module_core) && (mod->core_text_size)) { 1968 if ((mod->module_core) && (mod->core_text_size)) {
1969 set_page_attributes(mod->module_core, 1969 set_page_attributes(mod->module_core,
1970 mod->module_core + mod->core_text_size, 1970 mod->module_core + mod->core_text_size,
1971 set_memory_ro); 1971 set_memory_ro);
1972 } 1972 }
1973 if ((mod->module_init) && (mod->init_text_size)) { 1973 if ((mod->module_init) && (mod->init_text_size)) {
1974 set_page_attributes(mod->module_init, 1974 set_page_attributes(mod->module_init,
1975 mod->module_init + mod->init_text_size, 1975 mod->module_init + mod->init_text_size,
1976 set_memory_ro); 1976 set_memory_ro);
1977 } 1977 }
1978 } 1978 }
1979 mutex_unlock(&module_mutex); 1979 mutex_unlock(&module_mutex);
1980 } 1980 }
1981 #else 1981 #else
1982 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } 1982 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1983 static void unset_module_core_ro_nx(struct module *mod) { } 1983 static void unset_module_core_ro_nx(struct module *mod) { }
1984 static void unset_module_init_ro_nx(struct module *mod) { } 1984 static void unset_module_init_ro_nx(struct module *mod) { }
1985 #endif 1985 #endif
1986 1986
1987 void __weak module_memfree(void *module_region) 1987 void __weak module_memfree(void *module_region)
1988 { 1988 {
1989 vfree(module_region); 1989 vfree(module_region);
1990 } 1990 }
1991 1991
1992 void __weak module_arch_cleanup(struct module *mod) 1992 void __weak module_arch_cleanup(struct module *mod)
1993 { 1993 {
1994 } 1994 }
1995 1995
1996 void __weak module_arch_freeing_init(struct module *mod) 1996 void __weak module_arch_freeing_init(struct module *mod)
1997 { 1997 {
1998 } 1998 }
1999 1999
2000 /* Free a module, remove from lists, etc. */ 2000 /* Free a module, remove from lists, etc. */
2001 static void free_module(struct module *mod) 2001 static void free_module(struct module *mod)
2002 { 2002 {
2003 trace_module_free(mod); 2003 trace_module_free(mod);
2004 2004
2005 mod_sysfs_teardown(mod); 2005 mod_sysfs_teardown(mod);
2006 2006
2007 /* We leave it in list to prevent duplicate loads, but make sure 2007 /* We leave it in list to prevent duplicate loads, but make sure
2008 * that noone uses it while it's being deconstructed. */ 2008 * that noone uses it while it's being deconstructed. */
2009 mutex_lock(&module_mutex); 2009 mutex_lock(&module_mutex);
2010 mod->state = MODULE_STATE_UNFORMED; 2010 mod->state = MODULE_STATE_UNFORMED;
2011 mutex_unlock(&module_mutex); 2011 mutex_unlock(&module_mutex);
2012 2012
2013 /* Remove dynamic debug info */ 2013 /* Remove dynamic debug info */
2014 ddebug_remove_module(mod->name); 2014 ddebug_remove_module(mod->name);
2015 2015
2016 /* Arch-specific cleanup. */ 2016 /* Arch-specific cleanup. */
2017 module_arch_cleanup(mod); 2017 module_arch_cleanup(mod);
2018 2018
2019 /* Module unload stuff */ 2019 /* Module unload stuff */
2020 module_unload_free(mod); 2020 module_unload_free(mod);
2021 2021
2022 /* Free any allocated parameters. */ 2022 /* Free any allocated parameters. */
2023 destroy_params(mod->kp, mod->num_kp); 2023 destroy_params(mod->kp, mod->num_kp);
2024 2024
2025 /* Now we can delete it from the lists */ 2025 /* Now we can delete it from the lists */
2026 mutex_lock(&module_mutex); 2026 mutex_lock(&module_mutex);
2027 /* Unlink carefully: kallsyms could be walking list. */ 2027 /* Unlink carefully: kallsyms could be walking list. */
2028 list_del_rcu(&mod->list); 2028 list_del_rcu(&mod->list);
2029 mod_tree_remove(mod); 2029 mod_tree_remove(mod);
2030 /* Remove this module from bug list, this uses list_del_rcu */ 2030 /* Remove this module from bug list, this uses list_del_rcu */
2031 module_bug_cleanup(mod); 2031 module_bug_cleanup(mod);
2032 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 2032 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2033 synchronize_sched(); 2033 synchronize_sched();
2034 mutex_unlock(&module_mutex); 2034 mutex_unlock(&module_mutex);
2035 2035
2036 /* This may be NULL, but that's OK */ 2036 /* This may be NULL, but that's OK */
2037 unset_module_init_ro_nx(mod); 2037 unset_module_init_ro_nx(mod);
2038 module_arch_freeing_init(mod); 2038 module_arch_freeing_init(mod);
2039 module_memfree(mod->module_init); 2039 module_memfree(mod->module_init);
2040 kfree(mod->args); 2040 kfree(mod->args);
2041 percpu_modfree(mod); 2041 percpu_modfree(mod);
2042 2042
2043 /* Free lock-classes; relies on the preceding sync_rcu(). */ 2043 /* Free lock-classes; relies on the preceding sync_rcu(). */
2044 lockdep_free_key_range(mod->module_core, mod->core_size); 2044 lockdep_free_key_range(mod->module_core, mod->core_size);
2045 2045
2046 /* Finally, free the core (containing the module structure) */ 2046 /* Finally, free the core (containing the module structure) */
2047 unset_module_core_ro_nx(mod); 2047 unset_module_core_ro_nx(mod);
2048 module_memfree(mod->module_core); 2048 module_memfree(mod->module_core);
2049 2049
2050 #ifdef CONFIG_MPU 2050 #ifdef CONFIG_MPU
2051 update_protections(current->mm); 2051 update_protections(current->mm);
2052 #endif 2052 #endif
2053 } 2053 }
2054 2054
2055 void *__symbol_get(const char *symbol) 2055 void *__symbol_get(const char *symbol)
2056 { 2056 {
2057 struct module *owner; 2057 struct module *owner;
2058 const struct kernel_symbol *sym; 2058 const struct kernel_symbol *sym;
2059 2059
2060 preempt_disable(); 2060 preempt_disable();
2061 sym = find_symbol(symbol, &owner, NULL, true, true); 2061 sym = find_symbol(symbol, &owner, NULL, true, true);
2062 if (sym && strong_try_module_get(owner)) 2062 if (sym && strong_try_module_get(owner))
2063 sym = NULL; 2063 sym = NULL;
2064 preempt_enable(); 2064 preempt_enable();
2065 2065
2066 return sym ? (void *)sym->value : NULL; 2066 return sym ? (void *)sym->value : NULL;
2067 } 2067 }
2068 EXPORT_SYMBOL_GPL(__symbol_get); 2068 EXPORT_SYMBOL_GPL(__symbol_get);
2069 2069
2070 /* 2070 /*
2071 * Ensure that an exported symbol [global namespace] does not already exist 2071 * Ensure that an exported symbol [global namespace] does not already exist
2072 * in the kernel or in some other module's exported symbol table. 2072 * in the kernel or in some other module's exported symbol table.
2073 * 2073 *
2074 * You must hold the module_mutex. 2074 * You must hold the module_mutex.
2075 */ 2075 */
2076 static int verify_export_symbols(struct module *mod) 2076 static int verify_export_symbols(struct module *mod)
2077 { 2077 {
2078 unsigned int i; 2078 unsigned int i;
2079 struct module *owner; 2079 struct module *owner;
2080 const struct kernel_symbol *s; 2080 const struct kernel_symbol *s;
2081 struct { 2081 struct {
2082 const struct kernel_symbol *sym; 2082 const struct kernel_symbol *sym;
2083 unsigned int num; 2083 unsigned int num;
2084 } arr[] = { 2084 } arr[] = {
2085 { mod->syms, mod->num_syms }, 2085 { mod->syms, mod->num_syms },
2086 { mod->gpl_syms, mod->num_gpl_syms }, 2086 { mod->gpl_syms, mod->num_gpl_syms },
2087 { mod->gpl_future_syms, mod->num_gpl_future_syms }, 2087 { mod->gpl_future_syms, mod->num_gpl_future_syms },
2088 #ifdef CONFIG_UNUSED_SYMBOLS 2088 #ifdef CONFIG_UNUSED_SYMBOLS
2089 { mod->unused_syms, mod->num_unused_syms }, 2089 { mod->unused_syms, mod->num_unused_syms },
2090 { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, 2090 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2091 #endif 2091 #endif
2092 }; 2092 };
2093 2093
2094 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2094 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2095 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 2095 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2096 if (find_symbol(s->name, &owner, NULL, true, false)) { 2096 if (find_symbol(s->name, &owner, NULL, true, false)) {
2097 pr_err("%s: exports duplicate symbol %s" 2097 pr_err("%s: exports duplicate symbol %s"
2098 " (owned by %s)\n", 2098 " (owned by %s)\n",
2099 mod->name, s->name, module_name(owner)); 2099 mod->name, s->name, module_name(owner));
2100 return -ENOEXEC; 2100 return -ENOEXEC;
2101 } 2101 }
2102 } 2102 }
2103 } 2103 }
2104 return 0; 2104 return 0;
2105 } 2105 }
2106 2106
2107 /* Change all symbols so that st_value encodes the pointer directly. */ 2107 /* Change all symbols so that st_value encodes the pointer directly. */
2108 static int simplify_symbols(struct module *mod, const struct load_info *info) 2108 static int simplify_symbols(struct module *mod, const struct load_info *info)
2109 { 2109 {
2110 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2110 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2111 Elf_Sym *sym = (void *)symsec->sh_addr; 2111 Elf_Sym *sym = (void *)symsec->sh_addr;
2112 unsigned long secbase; 2112 unsigned long secbase;
2113 unsigned int i; 2113 unsigned int i;
2114 int ret = 0; 2114 int ret = 0;
2115 const struct kernel_symbol *ksym; 2115 const struct kernel_symbol *ksym;
2116 2116
2117 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 2117 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2118 const char *name = info->strtab + sym[i].st_name; 2118 const char *name = info->strtab + sym[i].st_name;
2119 2119
2120 switch (sym[i].st_shndx) { 2120 switch (sym[i].st_shndx) {
2121 case SHN_COMMON: 2121 case SHN_COMMON:
2122 /* Ignore common symbols */ 2122 /* Ignore common symbols */
2123 if (!strncmp(name, "__gnu_lto", 9)) 2123 if (!strncmp(name, "__gnu_lto", 9))
2124 break; 2124 break;
2125 2125
2126 /* We compiled with -fno-common. These are not 2126 /* We compiled with -fno-common. These are not
2127 supposed to happen. */ 2127 supposed to happen. */
2128 pr_debug("Common symbol: %s\n", name); 2128 pr_debug("Common symbol: %s\n", name);
2129 pr_warn("%s: please compile with -fno-common\n", 2129 pr_warn("%s: please compile with -fno-common\n",
2130 mod->name); 2130 mod->name);
2131 ret = -ENOEXEC; 2131 ret = -ENOEXEC;
2132 break; 2132 break;
2133 2133
2134 case SHN_ABS: 2134 case SHN_ABS:
2135 /* Don't need to do anything */ 2135 /* Don't need to do anything */
2136 pr_debug("Absolute symbol: 0x%08lx\n", 2136 pr_debug("Absolute symbol: 0x%08lx\n",
2137 (long)sym[i].st_value); 2137 (long)sym[i].st_value);
2138 break; 2138 break;
2139 2139
2140 case SHN_UNDEF: 2140 case SHN_UNDEF:
2141 ksym = resolve_symbol_wait(mod, info, name); 2141 ksym = resolve_symbol_wait(mod, info, name);
2142 /* Ok if resolved. */ 2142 /* Ok if resolved. */
2143 if (ksym && !IS_ERR(ksym)) { 2143 if (ksym && !IS_ERR(ksym)) {
2144 sym[i].st_value = ksym->value; 2144 sym[i].st_value = ksym->value;
2145 break; 2145 break;
2146 } 2146 }
2147 2147
2148 /* Ok if weak. */ 2148 /* Ok if weak. */
2149 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) 2149 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2150 break; 2150 break;
2151 2151
2152 pr_warn("%s: Unknown symbol %s (err %li)\n", 2152 pr_warn("%s: Unknown symbol %s (err %li)\n",
2153 mod->name, name, PTR_ERR(ksym)); 2153 mod->name, name, PTR_ERR(ksym));
2154 ret = PTR_ERR(ksym) ?: -ENOENT; 2154 ret = PTR_ERR(ksym) ?: -ENOENT;
2155 break; 2155 break;
2156 2156
2157 default: 2157 default:
2158 /* Divert to percpu allocation if a percpu var. */ 2158 /* Divert to percpu allocation if a percpu var. */
2159 if (sym[i].st_shndx == info->index.pcpu) 2159 if (sym[i].st_shndx == info->index.pcpu)
2160 secbase = (unsigned long)mod_percpu(mod); 2160 secbase = (unsigned long)mod_percpu(mod);
2161 else 2161 else
2162 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 2162 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2163 sym[i].st_value += secbase; 2163 sym[i].st_value += secbase;
2164 break; 2164 break;
2165 } 2165 }
2166 } 2166 }
2167 2167
2168 return ret; 2168 return ret;
2169 } 2169 }
2170 2170
2171 static int apply_relocations(struct module *mod, const struct load_info *info) 2171 static int apply_relocations(struct module *mod, const struct load_info *info)
2172 { 2172 {
2173 unsigned int i; 2173 unsigned int i;
2174 int err = 0; 2174 int err = 0;
2175 2175
2176 /* Now do relocations. */ 2176 /* Now do relocations. */
2177 for (i = 1; i < info->hdr->e_shnum; i++) { 2177 for (i = 1; i < info->hdr->e_shnum; i++) {
2178 unsigned int infosec = info->sechdrs[i].sh_info; 2178 unsigned int infosec = info->sechdrs[i].sh_info;
2179 2179
2180 /* Not a valid relocation section? */ 2180 /* Not a valid relocation section? */
2181 if (infosec >= info->hdr->e_shnum) 2181 if (infosec >= info->hdr->e_shnum)
2182 continue; 2182 continue;
2183 2183
2184 /* Don't bother with non-allocated sections */ 2184 /* Don't bother with non-allocated sections */
2185 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) 2185 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2186 continue; 2186 continue;
2187 2187
2188 if (info->sechdrs[i].sh_type == SHT_REL) 2188 if (info->sechdrs[i].sh_type == SHT_REL)
2189 err = apply_relocate(info->sechdrs, info->strtab, 2189 err = apply_relocate(info->sechdrs, info->strtab,
2190 info->index.sym, i, mod); 2190 info->index.sym, i, mod);
2191 else if (info->sechdrs[i].sh_type == SHT_RELA) 2191 else if (info->sechdrs[i].sh_type == SHT_RELA)
2192 err = apply_relocate_add(info->sechdrs, info->strtab, 2192 err = apply_relocate_add(info->sechdrs, info->strtab,
2193 info->index.sym, i, mod); 2193 info->index.sym, i, mod);
2194 if (err < 0) 2194 if (err < 0)
2195 break; 2195 break;
2196 } 2196 }
2197 return err; 2197 return err;
2198 } 2198 }
2199 2199
2200 /* Additional bytes needed by arch in front of individual sections */ 2200 /* Additional bytes needed by arch in front of individual sections */
2201 unsigned int __weak arch_mod_section_prepend(struct module *mod, 2201 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2202 unsigned int section) 2202 unsigned int section)
2203 { 2203 {
2204 /* default implementation just returns zero */ 2204 /* default implementation just returns zero */
2205 return 0; 2205 return 0;
2206 } 2206 }
2207 2207
2208 /* Update size with this section: return offset. */ 2208 /* Update size with this section: return offset. */
2209 static long get_offset(struct module *mod, unsigned int *size, 2209 static long get_offset(struct module *mod, unsigned int *size,
2210 Elf_Shdr *sechdr, unsigned int section) 2210 Elf_Shdr *sechdr, unsigned int section)
2211 { 2211 {
2212 long ret; 2212 long ret;
2213 2213
2214 *size += arch_mod_section_prepend(mod, section); 2214 *size += arch_mod_section_prepend(mod, section);
2215 ret = ALIGN(*size, sechdr->sh_addralign ?: 1); 2215 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2216 *size = ret + sechdr->sh_size; 2216 *size = ret + sechdr->sh_size;
2217 return ret; 2217 return ret;
2218 } 2218 }
2219 2219
2220 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 2220 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2221 might -- code, read-only data, read-write data, small data. Tally 2221 might -- code, read-only data, read-write data, small data. Tally
2222 sizes, and place the offsets into sh_entsize fields: high bit means it 2222 sizes, and place the offsets into sh_entsize fields: high bit means it
2223 belongs in init. */ 2223 belongs in init. */
2224 static void layout_sections(struct module *mod, struct load_info *info) 2224 static void layout_sections(struct module *mod, struct load_info *info)
2225 { 2225 {
2226 static unsigned long const masks[][2] = { 2226 static unsigned long const masks[][2] = {
2227 /* NOTE: all executable code must be the first section 2227 /* NOTE: all executable code must be the first section
2228 * in this array; otherwise modify the text_size 2228 * in this array; otherwise modify the text_size
2229 * finder in the two loops below */ 2229 * finder in the two loops below */
2230 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 2230 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2231 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 2231 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2232 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 2232 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2233 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 2233 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2234 }; 2234 };
2235 unsigned int m, i; 2235 unsigned int m, i;
2236 2236
2237 for (i = 0; i < info->hdr->e_shnum; i++) 2237 for (i = 0; i < info->hdr->e_shnum; i++)
2238 info->sechdrs[i].sh_entsize = ~0UL; 2238 info->sechdrs[i].sh_entsize = ~0UL;
2239 2239
2240 pr_debug("Core section allocation order:\n"); 2240 pr_debug("Core section allocation order:\n");
2241 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 2241 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2242 for (i = 0; i < info->hdr->e_shnum; ++i) { 2242 for (i = 0; i < info->hdr->e_shnum; ++i) {
2243 Elf_Shdr *s = &info->sechdrs[i]; 2243 Elf_Shdr *s = &info->sechdrs[i];
2244 const char *sname = info->secstrings + s->sh_name; 2244 const char *sname = info->secstrings + s->sh_name;
2245 2245
2246 if ((s->sh_flags & masks[m][0]) != masks[m][0] 2246 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2247 || (s->sh_flags & masks[m][1]) 2247 || (s->sh_flags & masks[m][1])
2248 || s->sh_entsize != ~0UL 2248 || s->sh_entsize != ~0UL
2249 || strstarts(sname, ".init")) 2249 || strstarts(sname, ".init"))
2250 continue; 2250 continue;
2251 s->sh_entsize = get_offset(mod, &mod->core_size, s, i); 2251 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2252 pr_debug("\t%s\n", sname); 2252 pr_debug("\t%s\n", sname);
2253 } 2253 }
2254 switch (m) { 2254 switch (m) {
2255 case 0: /* executable */ 2255 case 0: /* executable */
2256 mod->core_size = debug_align(mod->core_size); 2256 mod->core_size = debug_align(mod->core_size);
2257 mod->core_text_size = mod->core_size; 2257 mod->core_text_size = mod->core_size;
2258 break; 2258 break;
2259 case 1: /* RO: text and ro-data */ 2259 case 1: /* RO: text and ro-data */
2260 mod->core_size = debug_align(mod->core_size); 2260 mod->core_size = debug_align(mod->core_size);
2261 mod->core_ro_size = mod->core_size; 2261 mod->core_ro_size = mod->core_size;
2262 break; 2262 break;
2263 case 3: /* whole core */ 2263 case 3: /* whole core */
2264 mod->core_size = debug_align(mod->core_size); 2264 mod->core_size = debug_align(mod->core_size);
2265 break; 2265 break;
2266 } 2266 }
2267 } 2267 }
2268 2268
2269 pr_debug("Init section allocation order:\n"); 2269 pr_debug("Init section allocation order:\n");
2270 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 2270 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2271 for (i = 0; i < info->hdr->e_shnum; ++i) { 2271 for (i = 0; i < info->hdr->e_shnum; ++i) {
2272 Elf_Shdr *s = &info->sechdrs[i]; 2272 Elf_Shdr *s = &info->sechdrs[i];
2273 const char *sname = info->secstrings + s->sh_name; 2273 const char *sname = info->secstrings + s->sh_name;
2274 2274
2275 if ((s->sh_flags & masks[m][0]) != masks[m][0] 2275 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2276 || (s->sh_flags & masks[m][1]) 2276 || (s->sh_flags & masks[m][1])
2277 || s->sh_entsize != ~0UL 2277 || s->sh_entsize != ~0UL
2278 || !strstarts(sname, ".init")) 2278 || !strstarts(sname, ".init"))
2279 continue; 2279 continue;
2280 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) 2280 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2281 | INIT_OFFSET_MASK); 2281 | INIT_OFFSET_MASK);
2282 pr_debug("\t%s\n", sname); 2282 pr_debug("\t%s\n", sname);
2283 } 2283 }
2284 switch (m) { 2284 switch (m) {
2285 case 0: /* executable */ 2285 case 0: /* executable */
2286 mod->init_size = debug_align(mod->init_size); 2286 mod->init_size = debug_align(mod->init_size);
2287 mod->init_text_size = mod->init_size; 2287 mod->init_text_size = mod->init_size;
2288 break; 2288 break;
2289 case 1: /* RO: text and ro-data */ 2289 case 1: /* RO: text and ro-data */
2290 mod->init_size = debug_align(mod->init_size); 2290 mod->init_size = debug_align(mod->init_size);
2291 mod->init_ro_size = mod->init_size; 2291 mod->init_ro_size = mod->init_size;
2292 break; 2292 break;
2293 case 3: /* whole init */ 2293 case 3: /* whole init */
2294 mod->init_size = debug_align(mod->init_size); 2294 mod->init_size = debug_align(mod->init_size);
2295 break; 2295 break;
2296 } 2296 }
2297 } 2297 }
2298 } 2298 }
2299 2299
2300 static void set_license(struct module *mod, const char *license) 2300 static void set_license(struct module *mod, const char *license)
2301 { 2301 {
2302 if (!license) 2302 if (!license)
2303 license = "unspecified"; 2303 license = "unspecified";
2304 2304
2305 if (!license_is_gpl_compatible(license)) { 2305 if (!license_is_gpl_compatible(license)) {
2306 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 2306 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2307 pr_warn("%s: module license '%s' taints kernel.\n", 2307 pr_warn("%s: module license '%s' taints kernel.\n",
2308 mod->name, license); 2308 mod->name, license);
2309 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2309 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2310 LOCKDEP_NOW_UNRELIABLE); 2310 LOCKDEP_NOW_UNRELIABLE);
2311 } 2311 }
2312 } 2312 }
2313 2313
2314 /* Parse tag=value strings from .modinfo section */ 2314 /* Parse tag=value strings from .modinfo section */
2315 static char *next_string(char *string, unsigned long *secsize) 2315 static char *next_string(char *string, unsigned long *secsize)
2316 { 2316 {
2317 /* Skip non-zero chars */ 2317 /* Skip non-zero chars */
2318 while (string[0]) { 2318 while (string[0]) {
2319 string++; 2319 string++;
2320 if ((*secsize)-- <= 1) 2320 if ((*secsize)-- <= 1)
2321 return NULL; 2321 return NULL;
2322 } 2322 }
2323 2323
2324 /* Skip any zero padding. */ 2324 /* Skip any zero padding. */
2325 while (!string[0]) { 2325 while (!string[0]) {
2326 string++; 2326 string++;
2327 if ((*secsize)-- <= 1) 2327 if ((*secsize)-- <= 1)
2328 return NULL; 2328 return NULL;
2329 } 2329 }
2330 return string; 2330 return string;
2331 } 2331 }
2332 2332
2333 static char *get_modinfo(struct load_info *info, const char *tag) 2333 static char *get_modinfo(struct load_info *info, const char *tag)
2334 { 2334 {
2335 char *p; 2335 char *p;
2336 unsigned int taglen = strlen(tag); 2336 unsigned int taglen = strlen(tag);
2337 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 2337 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2338 unsigned long size = infosec->sh_size; 2338 unsigned long size = infosec->sh_size;
2339 2339
2340 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) { 2340 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2341 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 2341 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2342 return p + taglen + 1; 2342 return p + taglen + 1;
2343 } 2343 }
2344 return NULL; 2344 return NULL;
2345 } 2345 }
2346 2346
2347 static void setup_modinfo(struct module *mod, struct load_info *info) 2347 static void setup_modinfo(struct module *mod, struct load_info *info)
2348 { 2348 {
2349 struct module_attribute *attr; 2349 struct module_attribute *attr;
2350 int i; 2350 int i;
2351 2351
2352 for (i = 0; (attr = modinfo_attrs[i]); i++) { 2352 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2353 if (attr->setup) 2353 if (attr->setup)
2354 attr->setup(mod, get_modinfo(info, attr->attr.name)); 2354 attr->setup(mod, get_modinfo(info, attr->attr.name));
2355 } 2355 }
2356 } 2356 }
2357 2357
2358 static void free_modinfo(struct module *mod) 2358 static void free_modinfo(struct module *mod)
2359 { 2359 {
2360 struct module_attribute *attr; 2360 struct module_attribute *attr;
2361 int i; 2361 int i;
2362 2362
2363 for (i = 0; (attr = modinfo_attrs[i]); i++) { 2363 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2364 if (attr->free) 2364 if (attr->free)
2365 attr->free(mod); 2365 attr->free(mod);
2366 } 2366 }
2367 } 2367 }
2368 2368
2369 #ifdef CONFIG_KALLSYMS 2369 #ifdef CONFIG_KALLSYMS
2370 2370
2371 /* lookup symbol in given range of kernel_symbols */ 2371 /* lookup symbol in given range of kernel_symbols */
2372 static const struct kernel_symbol *lookup_symbol(const char *name, 2372 static const struct kernel_symbol *lookup_symbol(const char *name,
2373 const struct kernel_symbol *start, 2373 const struct kernel_symbol *start,
2374 const struct kernel_symbol *stop) 2374 const struct kernel_symbol *stop)
2375 { 2375 {
2376 return bsearch(name, start, stop - start, 2376 return bsearch(name, start, stop - start,
2377 sizeof(struct kernel_symbol), cmp_name); 2377 sizeof(struct kernel_symbol), cmp_name);
2378 } 2378 }
2379 2379
2380 static int is_exported(const char *name, unsigned long value, 2380 static int is_exported(const char *name, unsigned long value,
2381 const struct module *mod) 2381 const struct module *mod)
2382 { 2382 {
2383 const struct kernel_symbol *ks; 2383 const struct kernel_symbol *ks;
2384 if (!mod) 2384 if (!mod)
2385 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); 2385 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2386 else 2386 else
2387 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); 2387 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2388 return ks != NULL && ks->value == value; 2388 return ks != NULL && ks->value == value;
2389 } 2389 }
2390 2390
2391 /* As per nm */ 2391 /* As per nm */
2392 static char elf_type(const Elf_Sym *sym, const struct load_info *info) 2392 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2393 { 2393 {
2394 const Elf_Shdr *sechdrs = info->sechdrs; 2394 const Elf_Shdr *sechdrs = info->sechdrs;
2395 2395
2396 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { 2396 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2397 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) 2397 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2398 return 'v'; 2398 return 'v';
2399 else 2399 else
2400 return 'w'; 2400 return 'w';
2401 } 2401 }
2402 if (sym->st_shndx == SHN_UNDEF) 2402 if (sym->st_shndx == SHN_UNDEF)
2403 return 'U'; 2403 return 'U';
2404 if (sym->st_shndx == SHN_ABS) 2404 if (sym->st_shndx == SHN_ABS)
2405 return 'a'; 2405 return 'a';
2406 if (sym->st_shndx >= SHN_LORESERVE) 2406 if (sym->st_shndx >= SHN_LORESERVE)
2407 return '?'; 2407 return '?';
2408 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) 2408 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2409 return 't'; 2409 return 't';
2410 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC 2410 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2411 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { 2411 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2412 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) 2412 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2413 return 'r'; 2413 return 'r';
2414 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) 2414 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2415 return 'g'; 2415 return 'g';
2416 else 2416 else
2417 return 'd'; 2417 return 'd';
2418 } 2418 }
2419 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { 2419 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2420 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) 2420 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2421 return 's'; 2421 return 's';
2422 else 2422 else
2423 return 'b'; 2423 return 'b';
2424 } 2424 }
2425 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, 2425 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2426 ".debug")) { 2426 ".debug")) {
2427 return 'n'; 2427 return 'n';
2428 } 2428 }
2429 return '?'; 2429 return '?';
2430 } 2430 }
2431 2431
2432 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, 2432 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2433 unsigned int shnum) 2433 unsigned int shnum)
2434 { 2434 {
2435 const Elf_Shdr *sec; 2435 const Elf_Shdr *sec;
2436 2436
2437 if (src->st_shndx == SHN_UNDEF 2437 if (src->st_shndx == SHN_UNDEF
2438 || src->st_shndx >= shnum 2438 || src->st_shndx >= shnum
2439 || !src->st_name) 2439 || !src->st_name)
2440 return false; 2440 return false;
2441 2441
2442 sec = sechdrs + src->st_shndx; 2442 sec = sechdrs + src->st_shndx;
2443 if (!(sec->sh_flags & SHF_ALLOC) 2443 if (!(sec->sh_flags & SHF_ALLOC)
2444 #ifndef CONFIG_KALLSYMS_ALL 2444 #ifndef CONFIG_KALLSYMS_ALL
2445 || !(sec->sh_flags & SHF_EXECINSTR) 2445 || !(sec->sh_flags & SHF_EXECINSTR)
2446 #endif 2446 #endif
2447 || (sec->sh_entsize & INIT_OFFSET_MASK)) 2447 || (sec->sh_entsize & INIT_OFFSET_MASK))
2448 return false; 2448 return false;
2449 2449
2450 return true; 2450 return true;
2451 } 2451 }
2452 2452
2453 /* 2453 /*
2454 * We only allocate and copy the strings needed by the parts of symtab 2454 * We only allocate and copy the strings needed by the parts of symtab
2455 * we keep. This is simple, but has the effect of making multiple 2455 * we keep. This is simple, but has the effect of making multiple
2456 * copies of duplicates. We could be more sophisticated, see 2456 * copies of duplicates. We could be more sophisticated, see
2457 * linux-kernel thread starting with 2457 * linux-kernel thread starting with
2458 * <73defb5e4bca04a6431392cc341112b1@localhost>. 2458 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2459 */ 2459 */
2460 static void layout_symtab(struct module *mod, struct load_info *info) 2460 static void layout_symtab(struct module *mod, struct load_info *info)
2461 { 2461 {
2462 Elf_Shdr *symsect = info->sechdrs + info->index.sym; 2462 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2463 Elf_Shdr *strsect = info->sechdrs + info->index.str; 2463 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2464 const Elf_Sym *src; 2464 const Elf_Sym *src;
2465 unsigned int i, nsrc, ndst, strtab_size = 0; 2465 unsigned int i, nsrc, ndst, strtab_size = 0;
2466 2466
2467 /* Put symbol section at end of init part of module. */ 2467 /* Put symbol section at end of init part of module. */
2468 symsect->sh_flags |= SHF_ALLOC; 2468 symsect->sh_flags |= SHF_ALLOC;
2469 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, 2469 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2470 info->index.sym) | INIT_OFFSET_MASK; 2470 info->index.sym) | INIT_OFFSET_MASK;
2471 pr_debug("\t%s\n", info->secstrings + symsect->sh_name); 2471 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2472 2472
2473 src = (void *)info->hdr + symsect->sh_offset; 2473 src = (void *)info->hdr + symsect->sh_offset;
2474 nsrc = symsect->sh_size / sizeof(*src); 2474 nsrc = symsect->sh_size / sizeof(*src);
2475 2475
2476 /* Compute total space required for the core symbols' strtab. */ 2476 /* Compute total space required for the core symbols' strtab. */
2477 for (ndst = i = 0; i < nsrc; i++) { 2477 for (ndst = i = 0; i < nsrc; i++) {
2478 if (i == 0 || 2478 if (i == 0 ||
2479 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2479 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2480 strtab_size += strlen(&info->strtab[src[i].st_name])+1; 2480 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2481 ndst++; 2481 ndst++;
2482 } 2482 }
2483 } 2483 }
2484 2484
2485 /* Append room for core symbols at end of core part. */ 2485 /* Append room for core symbols at end of core part. */
2486 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); 2486 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2487 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); 2487 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2488 mod->core_size += strtab_size; 2488 mod->core_size += strtab_size;
2489 mod->core_size = debug_align(mod->core_size); 2489 mod->core_size = debug_align(mod->core_size);
2490 2490
2491 /* Put string table section at end of init part of module. */ 2491 /* Put string table section at end of init part of module. */
2492 strsect->sh_flags |= SHF_ALLOC; 2492 strsect->sh_flags |= SHF_ALLOC;
2493 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, 2493 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2494 info->index.str) | INIT_OFFSET_MASK; 2494 info->index.str) | INIT_OFFSET_MASK;
2495 mod->init_size = debug_align(mod->init_size); 2495 mod->init_size = debug_align(mod->init_size);
2496 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2496 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2497 } 2497 }
2498 2498
2499 static void add_kallsyms(struct module *mod, const struct load_info *info) 2499 static void add_kallsyms(struct module *mod, const struct load_info *info)
2500 { 2500 {
2501 unsigned int i, ndst; 2501 unsigned int i, ndst;
2502 const Elf_Sym *src; 2502 const Elf_Sym *src;
2503 Elf_Sym *dst; 2503 Elf_Sym *dst;
2504 char *s; 2504 char *s;
2505 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2505 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2506 2506
2507 mod->symtab = (void *)symsec->sh_addr; 2507 mod->symtab = (void *)symsec->sh_addr;
2508 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); 2508 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2509 /* Make sure we get permanent strtab: don't use info->strtab. */ 2509 /* Make sure we get permanent strtab: don't use info->strtab. */
2510 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; 2510 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2511 2511
2512 /* Set types up while we still have access to sections. */ 2512 /* Set types up while we still have access to sections. */
2513 for (i = 0; i < mod->num_symtab; i++) 2513 for (i = 0; i < mod->num_symtab; i++)
2514 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); 2514 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2515 2515
2516 mod->core_symtab = dst = mod->module_core + info->symoffs; 2516 mod->core_symtab = dst = mod->module_core + info->symoffs;
2517 mod->core_strtab = s = mod->module_core + info->stroffs; 2517 mod->core_strtab = s = mod->module_core + info->stroffs;
2518 src = mod->symtab; 2518 src = mod->symtab;
2519 for (ndst = i = 0; i < mod->num_symtab; i++) { 2519 for (ndst = i = 0; i < mod->num_symtab; i++) {
2520 if (i == 0 || 2520 if (i == 0 ||
2521 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2521 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2522 dst[ndst] = src[i]; 2522 dst[ndst] = src[i];
2523 dst[ndst++].st_name = s - mod->core_strtab; 2523 dst[ndst++].st_name = s - mod->core_strtab;
2524 s += strlcpy(s, &mod->strtab[src[i].st_name], 2524 s += strlcpy(s, &mod->strtab[src[i].st_name],
2525 KSYM_NAME_LEN) + 1; 2525 KSYM_NAME_LEN) + 1;
2526 } 2526 }
2527 } 2527 }
2528 mod->core_num_syms = ndst; 2528 mod->core_num_syms = ndst;
2529 } 2529 }
2530 #else 2530 #else
2531 static inline void layout_symtab(struct module *mod, struct load_info *info) 2531 static inline void layout_symtab(struct module *mod, struct load_info *info)
2532 { 2532 {
2533 } 2533 }
2534 2534
2535 static void add_kallsyms(struct module *mod, const struct load_info *info) 2535 static void add_kallsyms(struct module *mod, const struct load_info *info)
2536 { 2536 {
2537 } 2537 }
2538 #endif /* CONFIG_KALLSYMS */ 2538 #endif /* CONFIG_KALLSYMS */
2539 2539
2540 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) 2540 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2541 { 2541 {
2542 if (!debug) 2542 if (!debug)
2543 return; 2543 return;
2544 #ifdef CONFIG_DYNAMIC_DEBUG 2544 #ifdef CONFIG_DYNAMIC_DEBUG
2545 if (ddebug_add_module(debug, num, debug->modname)) 2545 if (ddebug_add_module(debug, num, debug->modname))
2546 pr_err("dynamic debug error adding module: %s\n", 2546 pr_err("dynamic debug error adding module: %s\n",
2547 debug->modname); 2547 debug->modname);
2548 #endif 2548 #endif
2549 } 2549 }
2550 2550
2551 static void dynamic_debug_remove(struct _ddebug *debug) 2551 static void dynamic_debug_remove(struct _ddebug *debug)
2552 { 2552 {
2553 if (debug) 2553 if (debug)
2554 ddebug_remove_module(debug->modname); 2554 ddebug_remove_module(debug->modname);
2555 } 2555 }
2556 2556
2557 void * __weak module_alloc(unsigned long size) 2557 void * __weak module_alloc(unsigned long size)
2558 { 2558 {
2559 return vmalloc_exec(size); 2559 return vmalloc_exec(size);
2560 } 2560 }
2561 2561
2562 #ifdef CONFIG_DEBUG_KMEMLEAK 2562 #ifdef CONFIG_DEBUG_KMEMLEAK
2563 static void kmemleak_load_module(const struct module *mod, 2563 static void kmemleak_load_module(const struct module *mod,
2564 const struct load_info *info) 2564 const struct load_info *info)
2565 { 2565 {
2566 unsigned int i; 2566 unsigned int i;
2567 2567
2568 /* only scan the sections containing data */ 2568 /* only scan the sections containing data */
2569 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); 2569 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2570 2570
2571 for (i = 1; i < info->hdr->e_shnum; i++) { 2571 for (i = 1; i < info->hdr->e_shnum; i++) {
2572 /* Scan all writable sections that's not executable */ 2572 /* Scan all writable sections that's not executable */
2573 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || 2573 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2574 !(info->sechdrs[i].sh_flags & SHF_WRITE) || 2574 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2575 (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) 2575 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2576 continue; 2576 continue;
2577 2577
2578 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, 2578 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2579 info->sechdrs[i].sh_size, GFP_KERNEL); 2579 info->sechdrs[i].sh_size, GFP_KERNEL);
2580 } 2580 }
2581 } 2581 }
2582 #else 2582 #else
2583 static inline void kmemleak_load_module(const struct module *mod, 2583 static inline void kmemleak_load_module(const struct module *mod,
2584 const struct load_info *info) 2584 const struct load_info *info)
2585 { 2585 {
2586 } 2586 }
2587 #endif 2587 #endif
2588 2588
2589 #ifdef CONFIG_MODULE_SIG 2589 #ifdef CONFIG_MODULE_SIG
2590 static int module_sig_check(struct load_info *info) 2590 static int module_sig_check(struct load_info *info)
2591 { 2591 {
2592 int err = -ENOKEY; 2592 int err = -ENOKEY;
2593 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; 2593 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2594 const void *mod = info->hdr; 2594 const void *mod = info->hdr;
2595 2595
2596 if (info->len > markerlen && 2596 if (info->len > markerlen &&
2597 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { 2597 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2598 /* We truncate the module to discard the signature */ 2598 /* We truncate the module to discard the signature */
2599 info->len -= markerlen; 2599 info->len -= markerlen;
2600 err = mod_verify_sig(mod, &info->len); 2600 err = mod_verify_sig(mod, &info->len);
2601 } 2601 }
2602 2602
2603 if (!err) { 2603 if (!err) {
2604 info->sig_ok = true; 2604 info->sig_ok = true;
2605 return 0; 2605 return 0;
2606 } 2606 }
2607 2607
2608 /* Not having a signature is only an error if we're strict. */ 2608 /* Not having a signature is only an error if we're strict. */
2609 if (err == -ENOKEY && !sig_enforce) 2609 if (err == -ENOKEY && !sig_enforce)
2610 err = 0; 2610 err = 0;
2611 2611
2612 return err; 2612 return err;
2613 } 2613 }
2614 #else /* !CONFIG_MODULE_SIG */ 2614 #else /* !CONFIG_MODULE_SIG */
2615 static int module_sig_check(struct load_info *info) 2615 static int module_sig_check(struct load_info *info)
2616 { 2616 {
2617 return 0; 2617 return 0;
2618 } 2618 }
2619 #endif /* !CONFIG_MODULE_SIG */ 2619 #endif /* !CONFIG_MODULE_SIG */
2620 2620
2621 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */ 2621 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2622 static int elf_header_check(struct load_info *info) 2622 static int elf_header_check(struct load_info *info)
2623 { 2623 {
2624 if (info->len < sizeof(*(info->hdr))) 2624 if (info->len < sizeof(*(info->hdr)))
2625 return -ENOEXEC; 2625 return -ENOEXEC;
2626 2626
2627 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 2627 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2628 || info->hdr->e_type != ET_REL 2628 || info->hdr->e_type != ET_REL
2629 || !elf_check_arch(info->hdr) 2629 || !elf_check_arch(info->hdr)
2630 || info->hdr->e_shentsize != sizeof(Elf_Shdr)) 2630 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2631 return -ENOEXEC; 2631 return -ENOEXEC;
2632 2632
2633 if (info->hdr->e_shoff >= info->len 2633 if (info->hdr->e_shoff >= info->len
2634 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 2634 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2635 info->len - info->hdr->e_shoff)) 2635 info->len - info->hdr->e_shoff))
2636 return -ENOEXEC; 2636 return -ENOEXEC;
2637 2637
2638 return 0; 2638 return 0;
2639 } 2639 }
2640 2640
2641 #define COPY_CHUNK_SIZE (16*PAGE_SIZE) 2641 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2642 2642
2643 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 2643 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2644 { 2644 {
2645 do { 2645 do {
2646 unsigned long n = min(len, COPY_CHUNK_SIZE); 2646 unsigned long n = min(len, COPY_CHUNK_SIZE);
2647 2647
2648 if (copy_from_user(dst, usrc, n) != 0) 2648 if (copy_from_user(dst, usrc, n) != 0)
2649 return -EFAULT; 2649 return -EFAULT;
2650 cond_resched(); 2650 cond_resched();
2651 dst += n; 2651 dst += n;
2652 usrc += n; 2652 usrc += n;
2653 len -= n; 2653 len -= n;
2654 } while (len); 2654 } while (len);
2655 return 0; 2655 return 0;
2656 } 2656 }
2657 2657
2658 /* Sets info->hdr and info->len. */ 2658 /* Sets info->hdr and info->len. */
2659 static int copy_module_from_user(const void __user *umod, unsigned long len, 2659 static int copy_module_from_user(const void __user *umod, unsigned long len,
2660 struct load_info *info) 2660 struct load_info *info)
2661 { 2661 {
2662 int err; 2662 int err;
2663 2663
2664 info->len = len; 2664 info->len = len;
2665 if (info->len < sizeof(*(info->hdr))) 2665 if (info->len < sizeof(*(info->hdr)))
2666 return -ENOEXEC; 2666 return -ENOEXEC;
2667 2667
2668 err = security_kernel_module_from_file(NULL); 2668 err = security_kernel_module_from_file(NULL);
2669 if (err) 2669 if (err)
2670 return err; 2670 return err;
2671 2671
2672 /* Suck in entire file: we'll want most of it. */ 2672 /* Suck in entire file: we'll want most of it. */
2673 info->hdr = __vmalloc(info->len, 2673 info->hdr = __vmalloc(info->len,
2674 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL); 2674 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
2675 if (!info->hdr) 2675 if (!info->hdr)
2676 return -ENOMEM; 2676 return -ENOMEM;
2677 2677
2678 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 2678 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2679 vfree(info->hdr); 2679 vfree(info->hdr);
2680 return -EFAULT; 2680 return -EFAULT;
2681 } 2681 }
2682 2682
2683 return 0; 2683 return 0;
2684 } 2684 }
2685 2685
2686 /* Sets info->hdr and info->len. */ 2686 /* Sets info->hdr and info->len. */
2687 static int copy_module_from_fd(int fd, struct load_info *info) 2687 static int copy_module_from_fd(int fd, struct load_info *info)
2688 { 2688 {
2689 struct fd f = fdget(fd); 2689 struct fd f = fdget(fd);
2690 int err; 2690 int err;
2691 struct kstat stat; 2691 struct kstat stat;
2692 loff_t pos; 2692 loff_t pos;
2693 ssize_t bytes = 0; 2693 ssize_t bytes = 0;
2694 2694
2695 if (!f.file) 2695 if (!f.file)
2696 return -ENOEXEC; 2696 return -ENOEXEC;
2697 2697
2698 err = security_kernel_module_from_file(f.file); 2698 err = security_kernel_module_from_file(f.file);
2699 if (err) 2699 if (err)
2700 goto out; 2700 goto out;
2701 2701
2702 err = vfs_getattr(&f.file->f_path, &stat); 2702 err = vfs_getattr(&f.file->f_path, &stat);
2703 if (err) 2703 if (err)
2704 goto out; 2704 goto out;
2705 2705
2706 if (stat.size > INT_MAX) { 2706 if (stat.size > INT_MAX) {
2707 err = -EFBIG; 2707 err = -EFBIG;
2708 goto out; 2708 goto out;
2709 } 2709 }
2710 2710
2711 /* Don't hand 0 to vmalloc, it whines. */ 2711 /* Don't hand 0 to vmalloc, it whines. */
2712 if (stat.size == 0) { 2712 if (stat.size == 0) {
2713 err = -EINVAL; 2713 err = -EINVAL;
2714 goto out; 2714 goto out;
2715 } 2715 }
2716 2716
2717 info->hdr = vmalloc(stat.size); 2717 info->hdr = vmalloc(stat.size);
2718 if (!info->hdr) { 2718 if (!info->hdr) {
2719 err = -ENOMEM; 2719 err = -ENOMEM;
2720 goto out; 2720 goto out;
2721 } 2721 }
2722 2722
2723 pos = 0; 2723 pos = 0;
2724 while (pos < stat.size) { 2724 while (pos < stat.size) {
2725 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos, 2725 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2726 stat.size - pos); 2726 stat.size - pos);
2727 if (bytes < 0) { 2727 if (bytes < 0) {
2728 vfree(info->hdr); 2728 vfree(info->hdr);
2729 err = bytes; 2729 err = bytes;
2730 goto out; 2730 goto out;
2731 } 2731 }
2732 if (bytes == 0) 2732 if (bytes == 0)
2733 break; 2733 break;
2734 pos += bytes; 2734 pos += bytes;
2735 } 2735 }
2736 info->len = pos; 2736 info->len = pos;
2737 2737
2738 out: 2738 out:
2739 fdput(f); 2739 fdput(f);
2740 return err; 2740 return err;
2741 } 2741 }
2742 2742
2743 static void free_copy(struct load_info *info) 2743 static void free_copy(struct load_info *info)
2744 { 2744 {
2745 vfree(info->hdr); 2745 vfree(info->hdr);
2746 } 2746 }
2747 2747
2748 static int rewrite_section_headers(struct load_info *info, int flags) 2748 static int rewrite_section_headers(struct load_info *info, int flags)
2749 { 2749 {
2750 unsigned int i; 2750 unsigned int i;
2751 2751
2752 /* This should always be true, but let's be sure. */ 2752 /* This should always be true, but let's be sure. */
2753 info->sechdrs[0].sh_addr = 0; 2753 info->sechdrs[0].sh_addr = 0;
2754 2754
2755 for (i = 1; i < info->hdr->e_shnum; i++) { 2755 for (i = 1; i < info->hdr->e_shnum; i++) {
2756 Elf_Shdr *shdr = &info->sechdrs[i]; 2756 Elf_Shdr *shdr = &info->sechdrs[i];
2757 if (shdr->sh_type != SHT_NOBITS 2757 if (shdr->sh_type != SHT_NOBITS
2758 && info->len < shdr->sh_offset + shdr->sh_size) { 2758 && info->len < shdr->sh_offset + shdr->sh_size) {
2759 pr_err("Module len %lu truncated\n", info->len); 2759 pr_err("Module len %lu truncated\n", info->len);
2760 return -ENOEXEC; 2760 return -ENOEXEC;
2761 } 2761 }
2762 2762
2763 /* Mark all sections sh_addr with their address in the 2763 /* Mark all sections sh_addr with their address in the
2764 temporary image. */ 2764 temporary image. */
2765 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 2765 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2766 2766
2767 #ifndef CONFIG_MODULE_UNLOAD 2767 #ifndef CONFIG_MODULE_UNLOAD
2768 /* Don't load .exit sections */ 2768 /* Don't load .exit sections */
2769 if (strstarts(info->secstrings+shdr->sh_name, ".exit")) 2769 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2770 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; 2770 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2771 #endif 2771 #endif
2772 } 2772 }
2773 2773
2774 /* Track but don't keep modinfo and version sections. */ 2774 /* Track but don't keep modinfo and version sections. */
2775 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) 2775 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2776 info->index.vers = 0; /* Pretend no __versions section! */ 2776 info->index.vers = 0; /* Pretend no __versions section! */
2777 else 2777 else
2778 info->index.vers = find_sec(info, "__versions"); 2778 info->index.vers = find_sec(info, "__versions");
2779 info->index.info = find_sec(info, ".modinfo"); 2779 info->index.info = find_sec(info, ".modinfo");
2780 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2780 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2781 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 2781 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2782 return 0; 2782 return 0;
2783 } 2783 }
2784 2784
2785 /* 2785 /*
2786 * Set up our basic convenience variables (pointers to section headers, 2786 * Set up our basic convenience variables (pointers to section headers,
2787 * search for module section index etc), and do some basic section 2787 * search for module section index etc), and do some basic section
2788 * verification. 2788 * verification.
2789 * 2789 *
2790 * Return the temporary module pointer (we'll replace it with the final 2790 * Return the temporary module pointer (we'll replace it with the final
2791 * one when we move the module sections around). 2791 * one when we move the module sections around).
2792 */ 2792 */
2793 static struct module *setup_load_info(struct load_info *info, int flags) 2793 static struct module *setup_load_info(struct load_info *info, int flags)
2794 { 2794 {
2795 unsigned int i; 2795 unsigned int i;
2796 int err; 2796 int err;
2797 struct module *mod; 2797 struct module *mod;
2798 2798
2799 /* Set up the convenience variables */ 2799 /* Set up the convenience variables */
2800 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; 2800 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2801 info->secstrings = (void *)info->hdr 2801 info->secstrings = (void *)info->hdr
2802 + info->sechdrs[info->hdr->e_shstrndx].sh_offset; 2802 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2803 2803
2804 err = rewrite_section_headers(info, flags); 2804 err = rewrite_section_headers(info, flags);
2805 if (err) 2805 if (err)
2806 return ERR_PTR(err); 2806 return ERR_PTR(err);
2807 2807
2808 /* Find internal symbols and strings. */ 2808 /* Find internal symbols and strings. */
2809 for (i = 1; i < info->hdr->e_shnum; i++) { 2809 for (i = 1; i < info->hdr->e_shnum; i++) {
2810 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 2810 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2811 info->index.sym = i; 2811 info->index.sym = i;
2812 info->index.str = info->sechdrs[i].sh_link; 2812 info->index.str = info->sechdrs[i].sh_link;
2813 info->strtab = (char *)info->hdr 2813 info->strtab = (char *)info->hdr
2814 + info->sechdrs[info->index.str].sh_offset; 2814 + info->sechdrs[info->index.str].sh_offset;
2815 break; 2815 break;
2816 } 2816 }
2817 } 2817 }
2818 2818
2819 info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); 2819 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2820 if (!info->index.mod) { 2820 if (!info->index.mod) {
2821 pr_warn("No module found in object\n"); 2821 pr_warn("No module found in object\n");
2822 return ERR_PTR(-ENOEXEC); 2822 return ERR_PTR(-ENOEXEC);
2823 } 2823 }
2824 /* This is temporary: point mod into copy of data. */ 2824 /* This is temporary: point mod into copy of data. */
2825 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2825 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2826 2826
2827 if (info->index.sym == 0) { 2827 if (info->index.sym == 0) {
2828 pr_warn("%s: module has no symbols (stripped?)\n", mod->name); 2828 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2829 return ERR_PTR(-ENOEXEC); 2829 return ERR_PTR(-ENOEXEC);
2830 } 2830 }
2831 2831
2832 info->index.pcpu = find_pcpusec(info); 2832 info->index.pcpu = find_pcpusec(info);
2833 2833
2834 /* Check module struct version now, before we try to use module. */ 2834 /* Check module struct version now, before we try to use module. */
2835 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) 2835 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2836 return ERR_PTR(-ENOEXEC); 2836 return ERR_PTR(-ENOEXEC);
2837 2837
2838 return mod; 2838 return mod;
2839 } 2839 }
2840 2840
2841 static int check_modinfo(struct module *mod, struct load_info *info, int flags) 2841 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2842 { 2842 {
2843 const char *modmagic = get_modinfo(info, "vermagic"); 2843 const char *modmagic = get_modinfo(info, "vermagic");
2844 int err; 2844 int err;
2845 2845
2846 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 2846 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2847 modmagic = NULL; 2847 modmagic = NULL;
2848 2848
2849 /* This is allowed: modprobe --force will invalidate it. */ 2849 /* This is allowed: modprobe --force will invalidate it. */
2850 if (!modmagic) { 2850 if (!modmagic) {
2851 err = try_to_force_load(mod, "bad vermagic"); 2851 err = try_to_force_load(mod, "bad vermagic");
2852 if (err) 2852 if (err)
2853 return err; 2853 return err;
2854 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 2854 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2855 pr_err("%s: version magic '%s' should be '%s'\n", 2855 pr_err("%s: version magic '%s' should be '%s'\n",
2856 mod->name, modmagic, vermagic); 2856 mod->name, modmagic, vermagic);
2857 return -ENOEXEC; 2857 return -ENOEXEC;
2858 } 2858 }
2859 2859
2860 if (!get_modinfo(info, "intree")) 2860 if (!get_modinfo(info, "intree"))
2861 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 2861 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2862 2862
2863 if (get_modinfo(info, "staging")) { 2863 if (get_modinfo(info, "staging")) {
2864 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 2864 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2865 pr_warn("%s: module is from the staging directory, the quality " 2865 pr_warn("%s: module is from the staging directory, the quality "
2866 "is unknown, you have been warned.\n", mod->name); 2866 "is unknown, you have been warned.\n", mod->name);
2867 } 2867 }
2868 2868
2869 /* Set up license info based on the info section */ 2869 /* Set up license info based on the info section */
2870 set_license(mod, get_modinfo(info, "license")); 2870 set_license(mod, get_modinfo(info, "license"));
2871 2871
2872 return 0; 2872 return 0;
2873 } 2873 }
2874 2874
2875 static int find_module_sections(struct module *mod, struct load_info *info) 2875 static int find_module_sections(struct module *mod, struct load_info *info)
2876 { 2876 {
2877 mod->kp = section_objs(info, "__param", 2877 mod->kp = section_objs(info, "__param",
2878 sizeof(*mod->kp), &mod->num_kp); 2878 sizeof(*mod->kp), &mod->num_kp);
2879 mod->syms = section_objs(info, "__ksymtab", 2879 mod->syms = section_objs(info, "__ksymtab",
2880 sizeof(*mod->syms), &mod->num_syms); 2880 sizeof(*mod->syms), &mod->num_syms);
2881 mod->crcs = section_addr(info, "__kcrctab"); 2881 mod->crcs = section_addr(info, "__kcrctab");
2882 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 2882 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2883 sizeof(*mod->gpl_syms), 2883 sizeof(*mod->gpl_syms),
2884 &mod->num_gpl_syms); 2884 &mod->num_gpl_syms);
2885 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 2885 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2886 mod->gpl_future_syms = section_objs(info, 2886 mod->gpl_future_syms = section_objs(info,
2887 "__ksymtab_gpl_future", 2887 "__ksymtab_gpl_future",
2888 sizeof(*mod->gpl_future_syms), 2888 sizeof(*mod->gpl_future_syms),
2889 &mod->num_gpl_future_syms); 2889 &mod->num_gpl_future_syms);
2890 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); 2890 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2891 2891
2892 #ifdef CONFIG_UNUSED_SYMBOLS 2892 #ifdef CONFIG_UNUSED_SYMBOLS
2893 mod->unused_syms = section_objs(info, "__ksymtab_unused", 2893 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2894 sizeof(*mod->unused_syms), 2894 sizeof(*mod->unused_syms),
2895 &mod->num_unused_syms); 2895 &mod->num_unused_syms);
2896 mod->unused_crcs = section_addr(info, "__kcrctab_unused"); 2896 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2897 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", 2897 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2898 sizeof(*mod->unused_gpl_syms), 2898 sizeof(*mod->unused_gpl_syms),
2899 &mod->num_unused_gpl_syms); 2899 &mod->num_unused_gpl_syms);
2900 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); 2900 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2901 #endif 2901 #endif
2902 #ifdef CONFIG_CONSTRUCTORS 2902 #ifdef CONFIG_CONSTRUCTORS
2903 mod->ctors = section_objs(info, ".ctors", 2903 mod->ctors = section_objs(info, ".ctors",
2904 sizeof(*mod->ctors), &mod->num_ctors); 2904 sizeof(*mod->ctors), &mod->num_ctors);
2905 if (!mod->ctors) 2905 if (!mod->ctors)
2906 mod->ctors = section_objs(info, ".init_array", 2906 mod->ctors = section_objs(info, ".init_array",
2907 sizeof(*mod->ctors), &mod->num_ctors); 2907 sizeof(*mod->ctors), &mod->num_ctors);
2908 else if (find_sec(info, ".init_array")) { 2908 else if (find_sec(info, ".init_array")) {
2909 /* 2909 /*
2910 * This shouldn't happen with same compiler and binutils 2910 * This shouldn't happen with same compiler and binutils
2911 * building all parts of the module. 2911 * building all parts of the module.
2912 */ 2912 */
2913 pr_warn("%s: has both .ctors and .init_array.\n", 2913 pr_warn("%s: has both .ctors and .init_array.\n",
2914 mod->name); 2914 mod->name);
2915 return -EINVAL; 2915 return -EINVAL;
2916 } 2916 }
2917 #endif 2917 #endif
2918 2918
2919 #ifdef CONFIG_TRACEPOINTS 2919 #ifdef CONFIG_TRACEPOINTS
2920 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 2920 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2921 sizeof(*mod->tracepoints_ptrs), 2921 sizeof(*mod->tracepoints_ptrs),
2922 &mod->num_tracepoints); 2922 &mod->num_tracepoints);
2923 #endif 2923 #endif
2924 #ifdef HAVE_JUMP_LABEL 2924 #ifdef HAVE_JUMP_LABEL
2925 mod->jump_entries = section_objs(info, "__jump_table", 2925 mod->jump_entries = section_objs(info, "__jump_table",
2926 sizeof(*mod->jump_entries), 2926 sizeof(*mod->jump_entries),
2927 &mod->num_jump_entries); 2927 &mod->num_jump_entries);
2928 #endif 2928 #endif
2929 #ifdef CONFIG_EVENT_TRACING 2929 #ifdef CONFIG_EVENT_TRACING
2930 mod->trace_events = section_objs(info, "_ftrace_events", 2930 mod->trace_events = section_objs(info, "_ftrace_events",
2931 sizeof(*mod->trace_events), 2931 sizeof(*mod->trace_events),
2932 &mod->num_trace_events); 2932 &mod->num_trace_events);
2933 mod->trace_enums = section_objs(info, "_ftrace_enum_map", 2933 mod->trace_enums = section_objs(info, "_ftrace_enum_map",
2934 sizeof(*mod->trace_enums), 2934 sizeof(*mod->trace_enums),
2935 &mod->num_trace_enums); 2935 &mod->num_trace_enums);
2936 #endif 2936 #endif
2937 #ifdef CONFIG_TRACING 2937 #ifdef CONFIG_TRACING
2938 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2938 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2939 sizeof(*mod->trace_bprintk_fmt_start), 2939 sizeof(*mod->trace_bprintk_fmt_start),
2940 &mod->num_trace_bprintk_fmt); 2940 &mod->num_trace_bprintk_fmt);
2941 #endif 2941 #endif
2942 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 2942 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2943 /* sechdrs[0].sh_size is always zero */ 2943 /* sechdrs[0].sh_size is always zero */
2944 mod->ftrace_callsites = section_objs(info, "__mcount_loc", 2944 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2945 sizeof(*mod->ftrace_callsites), 2945 sizeof(*mod->ftrace_callsites),
2946 &mod->num_ftrace_callsites); 2946 &mod->num_ftrace_callsites);
2947 #endif 2947 #endif
2948 2948
2949 mod->extable = section_objs(info, "__ex_table", 2949 mod->extable = section_objs(info, "__ex_table",
2950 sizeof(*mod->extable), &mod->num_exentries); 2950 sizeof(*mod->extable), &mod->num_exentries);
2951 2951
2952 if (section_addr(info, "__obsparm")) 2952 if (section_addr(info, "__obsparm"))
2953 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 2953 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2954 2954
2955 info->debug = section_objs(info, "__verbose", 2955 info->debug = section_objs(info, "__verbose",
2956 sizeof(*info->debug), &info->num_debug); 2956 sizeof(*info->debug), &info->num_debug);
2957 2957
2958 return 0; 2958 return 0;
2959 } 2959 }
2960 2960
2961 static int move_module(struct module *mod, struct load_info *info) 2961 static int move_module(struct module *mod, struct load_info *info)
2962 { 2962 {
2963 int i; 2963 int i;
2964 void *ptr; 2964 void *ptr;
2965 2965
2966 /* Do the allocs. */ 2966 /* Do the allocs. */
2967 ptr = module_alloc(mod->core_size); 2967 ptr = module_alloc(mod->core_size);
2968 /* 2968 /*
2969 * The pointer to this block is stored in the module structure 2969 * The pointer to this block is stored in the module structure
2970 * which is inside the block. Just mark it as not being a 2970 * which is inside the block. Just mark it as not being a
2971 * leak. 2971 * leak.
2972 */ 2972 */
2973 kmemleak_not_leak(ptr); 2973 kmemleak_not_leak(ptr);
2974 if (!ptr) 2974 if (!ptr)
2975 return -ENOMEM; 2975 return -ENOMEM;
2976 2976
2977 memset(ptr, 0, mod->core_size); 2977 memset(ptr, 0, mod->core_size);
2978 mod->module_core = ptr; 2978 mod->module_core = ptr;
2979 2979
2980 if (mod->init_size) { 2980 if (mod->init_size) {
2981 ptr = module_alloc(mod->init_size); 2981 ptr = module_alloc(mod->init_size);
2982 /* 2982 /*
2983 * The pointer to this block is stored in the module structure 2983 * The pointer to this block is stored in the module structure
2984 * which is inside the block. This block doesn't need to be 2984 * which is inside the block. This block doesn't need to be
2985 * scanned as it contains data and code that will be freed 2985 * scanned as it contains data and code that will be freed
2986 * after the module is initialized. 2986 * after the module is initialized.
2987 */ 2987 */
2988 kmemleak_ignore(ptr); 2988 kmemleak_ignore(ptr);
2989 if (!ptr) { 2989 if (!ptr) {
2990 module_memfree(mod->module_core); 2990 module_memfree(mod->module_core);
2991 return -ENOMEM; 2991 return -ENOMEM;
2992 } 2992 }
2993 memset(ptr, 0, mod->init_size); 2993 memset(ptr, 0, mod->init_size);
2994 mod->module_init = ptr; 2994 mod->module_init = ptr;
2995 } else 2995 } else
2996 mod->module_init = NULL; 2996 mod->module_init = NULL;
2997 2997
2998 /* Transfer each section which specifies SHF_ALLOC */ 2998 /* Transfer each section which specifies SHF_ALLOC */
2999 pr_debug("final section addresses:\n"); 2999 pr_debug("final section addresses:\n");
3000 for (i = 0; i < info->hdr->e_shnum; i++) { 3000 for (i = 0; i < info->hdr->e_shnum; i++) {
3001 void *dest; 3001 void *dest;
3002 Elf_Shdr *shdr = &info->sechdrs[i]; 3002 Elf_Shdr *shdr = &info->sechdrs[i];
3003 3003
3004 if (!(shdr->sh_flags & SHF_ALLOC)) 3004 if (!(shdr->sh_flags & SHF_ALLOC))
3005 continue; 3005 continue;
3006 3006
3007 if (shdr->sh_entsize & INIT_OFFSET_MASK) 3007 if (shdr->sh_entsize & INIT_OFFSET_MASK)
3008 dest = mod->module_init 3008 dest = mod->module_init
3009 + (shdr->sh_entsize & ~INIT_OFFSET_MASK); 3009 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3010 else 3010 else
3011 dest = mod->module_core + shdr->sh_entsize; 3011 dest = mod->module_core + shdr->sh_entsize;
3012 3012
3013 if (shdr->sh_type != SHT_NOBITS) 3013 if (shdr->sh_type != SHT_NOBITS)
3014 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 3014 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3015 /* Update sh_addr to point to copy in image. */ 3015 /* Update sh_addr to point to copy in image. */
3016 shdr->sh_addr = (unsigned long)dest; 3016 shdr->sh_addr = (unsigned long)dest;
3017 pr_debug("\t0x%lx %s\n", 3017 pr_debug("\t0x%lx %s\n",
3018 (long)shdr->sh_addr, info->secstrings + shdr->sh_name); 3018 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3019 } 3019 }
3020 3020
3021 return 0; 3021 return 0;
3022 } 3022 }
3023 3023
3024 static int check_module_license_and_versions(struct module *mod) 3024 static int check_module_license_and_versions(struct module *mod)
3025 { 3025 {
3026 /* 3026 /*
3027 * ndiswrapper is under GPL by itself, but loads proprietary modules. 3027 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3028 * Don't use add_taint_module(), as it would prevent ndiswrapper from 3028 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3029 * using GPL-only symbols it needs. 3029 * using GPL-only symbols it needs.
3030 */ 3030 */
3031 if (strcmp(mod->name, "ndiswrapper") == 0) 3031 if (strcmp(mod->name, "ndiswrapper") == 0)
3032 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 3032 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3033 3033
3034 /* driverloader was caught wrongly pretending to be under GPL */ 3034 /* driverloader was caught wrongly pretending to be under GPL */
3035 if (strcmp(mod->name, "driverloader") == 0) 3035 if (strcmp(mod->name, "driverloader") == 0)
3036 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 3036 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3037 LOCKDEP_NOW_UNRELIABLE); 3037 LOCKDEP_NOW_UNRELIABLE);
3038 3038
3039 /* lve claims to be GPL but upstream won't provide source */ 3039 /* lve claims to be GPL but upstream won't provide source */
3040 if (strcmp(mod->name, "lve") == 0) 3040 if (strcmp(mod->name, "lve") == 0)
3041 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 3041 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3042 LOCKDEP_NOW_UNRELIABLE); 3042 LOCKDEP_NOW_UNRELIABLE);
3043 3043
3044 #ifdef CONFIG_MODVERSIONS 3044 #ifdef CONFIG_MODVERSIONS
3045 if ((mod->num_syms && !mod->crcs) 3045 if ((mod->num_syms && !mod->crcs)
3046 || (mod->num_gpl_syms && !mod->gpl_crcs) 3046 || (mod->num_gpl_syms && !mod->gpl_crcs)
3047 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) 3047 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3048 #ifdef CONFIG_UNUSED_SYMBOLS 3048 #ifdef CONFIG_UNUSED_SYMBOLS
3049 || (mod->num_unused_syms && !mod->unused_crcs) 3049 || (mod->num_unused_syms && !mod->unused_crcs)
3050 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) 3050 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3051 #endif 3051 #endif
3052 ) { 3052 ) {
3053 return try_to_force_load(mod, 3053 return try_to_force_load(mod,
3054 "no versions for exported symbols"); 3054 "no versions for exported symbols");
3055 } 3055 }
3056 #endif 3056 #endif
3057 return 0; 3057 return 0;
3058 } 3058 }
3059 3059
3060 static void flush_module_icache(const struct module *mod) 3060 static void flush_module_icache(const struct module *mod)
3061 { 3061 {
3062 mm_segment_t old_fs; 3062 mm_segment_t old_fs;
3063 3063
3064 /* flush the icache in correct context */ 3064 /* flush the icache in correct context */
3065 old_fs = get_fs(); 3065 old_fs = get_fs();
3066 set_fs(KERNEL_DS); 3066 set_fs(KERNEL_DS);
3067 3067
3068 /* 3068 /*
3069 * Flush the instruction cache, since we've played with text. 3069 * Flush the instruction cache, since we've played with text.
3070 * Do it before processing of module parameters, so the module 3070 * Do it before processing of module parameters, so the module
3071 * can provide parameter accessor functions of its own. 3071 * can provide parameter accessor functions of its own.
3072 */ 3072 */
3073 if (mod->module_init) 3073 if (mod->module_init)
3074 flush_icache_range((unsigned long)mod->module_init, 3074 flush_icache_range((unsigned long)mod->module_init,
3075 (unsigned long)mod->module_init 3075 (unsigned long)mod->module_init
3076 + mod->init_size); 3076 + mod->init_size);
3077 flush_icache_range((unsigned long)mod->module_core, 3077 flush_icache_range((unsigned long)mod->module_core,
3078 (unsigned long)mod->module_core + mod->core_size); 3078 (unsigned long)mod->module_core + mod->core_size);
3079 3079
3080 set_fs(old_fs); 3080 set_fs(old_fs);
3081 } 3081 }
3082 3082
3083 int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 3083 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3084 Elf_Shdr *sechdrs, 3084 Elf_Shdr *sechdrs,
3085 char *secstrings, 3085 char *secstrings,
3086 struct module *mod) 3086 struct module *mod)
3087 { 3087 {
3088 return 0; 3088 return 0;
3089 } 3089 }
3090 3090
3091 static struct module *layout_and_allocate(struct load_info *info, int flags) 3091 static struct module *layout_and_allocate(struct load_info *info, int flags)
3092 { 3092 {
3093 /* Module within temporary copy. */ 3093 /* Module within temporary copy. */
3094 struct module *mod; 3094 struct module *mod;
3095 int err; 3095 int err;
3096 3096
3097 mod = setup_load_info(info, flags); 3097 mod = setup_load_info(info, flags);
3098 if (IS_ERR(mod)) 3098 if (IS_ERR(mod))
3099 return mod; 3099 return mod;
3100 3100
3101 err = check_modinfo(mod, info, flags); 3101 err = check_modinfo(mod, info, flags);
3102 if (err) 3102 if (err)
3103 return ERR_PTR(err); 3103 return ERR_PTR(err);
3104 3104
3105 /* Allow arches to frob section contents and sizes. */ 3105 /* Allow arches to frob section contents and sizes. */
3106 err = module_frob_arch_sections(info->hdr, info->sechdrs, 3106 err = module_frob_arch_sections(info->hdr, info->sechdrs,
3107 info->secstrings, mod); 3107 info->secstrings, mod);
3108 if (err < 0) 3108 if (err < 0)
3109 return ERR_PTR(err); 3109 return ERR_PTR(err);
3110 3110
3111 /* We will do a special allocation for per-cpu sections later. */ 3111 /* We will do a special allocation for per-cpu sections later. */
3112 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 3112 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3113 3113
3114 /* Determine total sizes, and put offsets in sh_entsize. For now 3114 /* Determine total sizes, and put offsets in sh_entsize. For now
3115 this is done generically; there doesn't appear to be any 3115 this is done generically; there doesn't appear to be any
3116 special cases for the architectures. */ 3116 special cases for the architectures. */
3117 layout_sections(mod, info); 3117 layout_sections(mod, info);
3118 layout_symtab(mod, info); 3118 layout_symtab(mod, info);
3119 3119
3120 /* Allocate and move to the final place */ 3120 /* Allocate and move to the final place */
3121 err = move_module(mod, info); 3121 err = move_module(mod, info);
3122 if (err) 3122 if (err)
3123 return ERR_PTR(err); 3123 return ERR_PTR(err);
3124 3124
3125 /* Module has been copied to its final place now: return it. */ 3125 /* Module has been copied to its final place now: return it. */
3126 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 3126 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3127 kmemleak_load_module(mod, info); 3127 kmemleak_load_module(mod, info);
3128 return mod; 3128 return mod;
3129 } 3129 }
3130 3130
3131 /* mod is no longer valid after this! */ 3131 /* mod is no longer valid after this! */
3132 static void module_deallocate(struct module *mod, struct load_info *info) 3132 static void module_deallocate(struct module *mod, struct load_info *info)
3133 { 3133 {
3134 percpu_modfree(mod); 3134 percpu_modfree(mod);
3135 module_arch_freeing_init(mod); 3135 module_arch_freeing_init(mod);
3136 module_memfree(mod->module_init); 3136 module_memfree(mod->module_init);
3137 module_memfree(mod->module_core); 3137 module_memfree(mod->module_core);
3138 } 3138 }
3139 3139
3140 int __weak module_finalize(const Elf_Ehdr *hdr, 3140 int __weak module_finalize(const Elf_Ehdr *hdr,
3141 const Elf_Shdr *sechdrs, 3141 const Elf_Shdr *sechdrs,
3142 struct module *me) 3142 struct module *me)
3143 { 3143 {
3144 return 0; 3144 return 0;
3145 } 3145 }
3146 3146
3147 static int post_relocation(struct module *mod, const struct load_info *info) 3147 static int post_relocation(struct module *mod, const struct load_info *info)
3148 { 3148 {
3149 /* Sort exception table now relocations are done. */ 3149 /* Sort exception table now relocations are done. */
3150 sort_extable(mod->extable, mod->extable + mod->num_exentries); 3150 sort_extable(mod->extable, mod->extable + mod->num_exentries);
3151 3151
3152 /* Copy relocated percpu area over. */ 3152 /* Copy relocated percpu area over. */
3153 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 3153 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3154 info->sechdrs[info->index.pcpu].sh_size); 3154 info->sechdrs[info->index.pcpu].sh_size);
3155 3155
3156 /* Setup kallsyms-specific fields. */ 3156 /* Setup kallsyms-specific fields. */
3157 add_kallsyms(mod, info); 3157 add_kallsyms(mod, info);
3158 3158
3159 /* Arch-specific module finalizing. */ 3159 /* Arch-specific module finalizing. */
3160 return module_finalize(info->hdr, info->sechdrs, mod); 3160 return module_finalize(info->hdr, info->sechdrs, mod);
3161 } 3161 }
3162 3162
3163 /* Is this module of this name done loading? No locks held. */ 3163 /* Is this module of this name done loading? No locks held. */
3164 static bool finished_loading(const char *name) 3164 static bool finished_loading(const char *name)
3165 { 3165 {
3166 struct module *mod; 3166 struct module *mod;
3167 bool ret; 3167 bool ret;
3168 3168
3169 /* 3169 /*
3170 * The module_mutex should not be a heavily contended lock; 3170 * The module_mutex should not be a heavily contended lock;
3171 * if we get the occasional sleep here, we'll go an extra iteration 3171 * if we get the occasional sleep here, we'll go an extra iteration
3172 * in the wait_event_interruptible(), which is harmless. 3172 * in the wait_event_interruptible(), which is harmless.
3173 */ 3173 */
3174 sched_annotate_sleep(); 3174 sched_annotate_sleep();
3175 mutex_lock(&module_mutex); 3175 mutex_lock(&module_mutex);
3176 mod = find_module_all(name, strlen(name), true); 3176 mod = find_module_all(name, strlen(name), true);
3177 ret = !mod || mod->state == MODULE_STATE_LIVE 3177 ret = !mod || mod->state == MODULE_STATE_LIVE
3178 || mod->state == MODULE_STATE_GOING; 3178 || mod->state == MODULE_STATE_GOING;
3179 mutex_unlock(&module_mutex); 3179 mutex_unlock(&module_mutex);
3180 3180
3181 return ret; 3181 return ret;
3182 } 3182 }
3183 3183
3184 /* Call module constructors. */ 3184 /* Call module constructors. */
3185 static void do_mod_ctors(struct module *mod) 3185 static void do_mod_ctors(struct module *mod)
3186 { 3186 {
3187 #ifdef CONFIG_CONSTRUCTORS 3187 #ifdef CONFIG_CONSTRUCTORS
3188 unsigned long i; 3188 unsigned long i;
3189 3189
3190 for (i = 0; i < mod->num_ctors; i++) 3190 for (i = 0; i < mod->num_ctors; i++)
3191 mod->ctors[i](); 3191 mod->ctors[i]();
3192 #endif 3192 #endif
3193 } 3193 }
3194 3194
3195 /* For freeing module_init on success, in case kallsyms traversing */ 3195 /* For freeing module_init on success, in case kallsyms traversing */
3196 struct mod_initfree { 3196 struct mod_initfree {
3197 struct rcu_head rcu; 3197 struct rcu_head rcu;
3198 void *module_init; 3198 void *module_init;
3199 }; 3199 };
3200 3200
3201 static void do_free_init(struct rcu_head *head) 3201 static void do_free_init(struct rcu_head *head)
3202 { 3202 {
3203 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); 3203 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3204 module_memfree(m->module_init); 3204 module_memfree(m->module_init);
3205 kfree(m); 3205 kfree(m);
3206 } 3206 }
3207 3207
3208 /* 3208 /*
3209 * This is where the real work happens. 3209 * This is where the real work happens.
3210 * 3210 *
3211 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 3211 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3212 * helper command 'lx-symbols'. 3212 * helper command 'lx-symbols'.
3213 */ 3213 */
3214 static noinline int do_init_module(struct module *mod) 3214 static noinline int do_init_module(struct module *mod)
3215 { 3215 {
3216 int ret = 0; 3216 int ret = 0;
3217 struct mod_initfree *freeinit; 3217 struct mod_initfree *freeinit;
3218 3218
3219 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 3219 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3220 if (!freeinit) { 3220 if (!freeinit) {
3221 ret = -ENOMEM; 3221 ret = -ENOMEM;
3222 goto fail; 3222 goto fail;
3223 } 3223 }
3224 freeinit->module_init = mod->module_init; 3224 freeinit->module_init = mod->module_init;
3225 3225
3226 /* 3226 /*
3227 * We want to find out whether @mod uses async during init. Clear 3227 * We want to find out whether @mod uses async during init. Clear
3228 * PF_USED_ASYNC. async_schedule*() will set it. 3228 * PF_USED_ASYNC. async_schedule*() will set it.
3229 */ 3229 */
3230 current->flags &= ~PF_USED_ASYNC; 3230 current->flags &= ~PF_USED_ASYNC;
3231 3231
3232 do_mod_ctors(mod); 3232 do_mod_ctors(mod);
3233 /* Start the module */ 3233 /* Start the module */
3234 if (mod->init != NULL) 3234 if (mod->init != NULL)
3235 ret = do_one_initcall(mod->init); 3235 ret = do_one_initcall(mod->init);
3236 if (ret < 0) { 3236 if (ret < 0) {
3237 goto fail_free_freeinit; 3237 goto fail_free_freeinit;
3238 } 3238 }
3239 if (ret > 0) { 3239 if (ret > 0) {
3240 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 3240 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3241 "follow 0/-E convention\n" 3241 "follow 0/-E convention\n"
3242 "%s: loading module anyway...\n", 3242 "%s: loading module anyway...\n",
3243 __func__, mod->name, ret, __func__); 3243 __func__, mod->name, ret, __func__);
3244 dump_stack(); 3244 dump_stack();
3245 } 3245 }
3246 3246
3247 /* Now it's a first class citizen! */ 3247 /* Now it's a first class citizen! */
3248 mod->state = MODULE_STATE_LIVE; 3248 mod->state = MODULE_STATE_LIVE;
3249 blocking_notifier_call_chain(&module_notify_list, 3249 blocking_notifier_call_chain(&module_notify_list,
3250 MODULE_STATE_LIVE, mod); 3250 MODULE_STATE_LIVE, mod);
3251 3251
3252 /* 3252 /*
3253 * We need to finish all async code before the module init sequence 3253 * We need to finish all async code before the module init sequence
3254 * is done. This has potential to deadlock. For example, a newly 3254 * is done. This has potential to deadlock. For example, a newly
3255 * detected block device can trigger request_module() of the 3255 * detected block device can trigger request_module() of the
3256 * default iosched from async probing task. Once userland helper 3256 * default iosched from async probing task. Once userland helper
3257 * reaches here, async_synchronize_full() will wait on the async 3257 * reaches here, async_synchronize_full() will wait on the async
3258 * task waiting on request_module() and deadlock. 3258 * task waiting on request_module() and deadlock.
3259 * 3259 *
3260 * This deadlock is avoided by perfomring async_synchronize_full() 3260 * This deadlock is avoided by perfomring async_synchronize_full()
3261 * iff module init queued any async jobs. This isn't a full 3261 * iff module init queued any async jobs. This isn't a full
3262 * solution as it will deadlock the same if module loading from 3262 * solution as it will deadlock the same if module loading from
3263 * async jobs nests more than once; however, due to the various 3263 * async jobs nests more than once; however, due to the various
3264 * constraints, this hack seems to be the best option for now. 3264 * constraints, this hack seems to be the best option for now.
3265 * Please refer to the following thread for details. 3265 * Please refer to the following thread for details.
3266 * 3266 *
3267 * http://thread.gmane.org/gmane.linux.kernel/1420814 3267 * http://thread.gmane.org/gmane.linux.kernel/1420814
3268 */ 3268 */
3269 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) 3269 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3270 async_synchronize_full(); 3270 async_synchronize_full();
3271 3271
3272 mutex_lock(&module_mutex); 3272 mutex_lock(&module_mutex);
3273 /* Drop initial reference. */ 3273 /* Drop initial reference. */
3274 module_put(mod); 3274 module_put(mod);
3275 trim_init_extable(mod); 3275 trim_init_extable(mod);
3276 #ifdef CONFIG_KALLSYMS 3276 #ifdef CONFIG_KALLSYMS
3277 mod->num_symtab = mod->core_num_syms; 3277 mod->num_symtab = mod->core_num_syms;
3278 mod->symtab = mod->core_symtab; 3278 mod->symtab = mod->core_symtab;
3279 mod->strtab = mod->core_strtab; 3279 mod->strtab = mod->core_strtab;
3280 #endif 3280 #endif
3281 mod_tree_remove_init(mod); 3281 mod_tree_remove_init(mod);
3282 unset_module_init_ro_nx(mod); 3282 unset_module_init_ro_nx(mod);
3283 module_arch_freeing_init(mod); 3283 module_arch_freeing_init(mod);
3284 mod->module_init = NULL; 3284 mod->module_init = NULL;
3285 mod->init_size = 0; 3285 mod->init_size = 0;
3286 mod->init_ro_size = 0; 3286 mod->init_ro_size = 0;
3287 mod->init_text_size = 0; 3287 mod->init_text_size = 0;
3288 /* 3288 /*
3289 * We want to free module_init, but be aware that kallsyms may be 3289 * We want to free module_init, but be aware that kallsyms may be
3290 * walking this with preempt disabled. In all the failure paths, we 3290 * walking this with preempt disabled. In all the failure paths, we
3291 * call synchronize_sched(), but we don't want to slow down the success 3291 * call synchronize_sched(), but we don't want to slow down the success
3292 * path, so use actual RCU here. 3292 * path, so use actual RCU here.
3293 */ 3293 */
3294 call_rcu_sched(&freeinit->rcu, do_free_init); 3294 call_rcu_sched(&freeinit->rcu, do_free_init);
3295 mutex_unlock(&module_mutex); 3295 mutex_unlock(&module_mutex);
3296 wake_up_all(&module_wq); 3296 wake_up_all(&module_wq);
3297 3297
3298 return 0; 3298 return 0;
3299 3299
3300 fail_free_freeinit: 3300 fail_free_freeinit:
3301 kfree(freeinit); 3301 kfree(freeinit);
3302 fail: 3302 fail:
3303 /* Try to protect us from buggy refcounters. */ 3303 /* Try to protect us from buggy refcounters. */
3304 mod->state = MODULE_STATE_GOING; 3304 mod->state = MODULE_STATE_GOING;
3305 synchronize_sched(); 3305 synchronize_sched();
3306 module_put(mod); 3306 module_put(mod);
3307 blocking_notifier_call_chain(&module_notify_list, 3307 blocking_notifier_call_chain(&module_notify_list,
3308 MODULE_STATE_GOING, mod); 3308 MODULE_STATE_GOING, mod);
3309 free_module(mod); 3309 free_module(mod);
3310 wake_up_all(&module_wq); 3310 wake_up_all(&module_wq);
3311 return ret; 3311 return ret;
3312 } 3312 }
3313 3313
3314 static int may_init_module(void) 3314 static int may_init_module(void)
3315 { 3315 {
3316 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3316 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3317 return -EPERM; 3317 return -EPERM;
3318 3318
3319 return 0; 3319 return 0;
3320 } 3320 }
3321 3321
3322 /* 3322 /*
3323 * We try to place it in the list now to make sure it's unique before 3323 * We try to place it in the list now to make sure it's unique before
3324 * we dedicate too many resources. In particular, temporary percpu 3324 * we dedicate too many resources. In particular, temporary percpu
3325 * memory exhaustion. 3325 * memory exhaustion.
3326 */ 3326 */
3327 static int add_unformed_module(struct module *mod) 3327 static int add_unformed_module(struct module *mod)
3328 { 3328 {
3329 int err; 3329 int err;
3330 struct module *old; 3330 struct module *old;
3331 3331
3332 mod->state = MODULE_STATE_UNFORMED; 3332 mod->state = MODULE_STATE_UNFORMED;
3333 3333
3334 again: 3334 again:
3335 mutex_lock(&module_mutex); 3335 mutex_lock(&module_mutex);
3336 old = find_module_all(mod->name, strlen(mod->name), true); 3336 old = find_module_all(mod->name, strlen(mod->name), true);
3337 if (old != NULL) { 3337 if (old != NULL) {
3338 if (old->state == MODULE_STATE_COMING 3338 if (old->state == MODULE_STATE_COMING
3339 || old->state == MODULE_STATE_UNFORMED) { 3339 || old->state == MODULE_STATE_UNFORMED) {
3340 /* Wait in case it fails to load. */ 3340 /* Wait in case it fails to load. */
3341 mutex_unlock(&module_mutex); 3341 mutex_unlock(&module_mutex);
3342 err = wait_event_interruptible(module_wq, 3342 err = wait_event_interruptible(module_wq,
3343 finished_loading(mod->name)); 3343 finished_loading(mod->name));
3344 if (err) 3344 if (err)
3345 goto out_unlocked; 3345 goto out_unlocked;
3346 goto again; 3346 goto again;
3347 } 3347 }
3348 err = -EEXIST; 3348 err = -EEXIST;
3349 goto out; 3349 goto out;
3350 } 3350 }
3351 mod_update_bounds(mod); 3351 mod_update_bounds(mod);
3352 list_add_rcu(&mod->list, &modules); 3352 list_add_rcu(&mod->list, &modules);
3353 mod_tree_insert(mod); 3353 mod_tree_insert(mod);
3354 err = 0; 3354 err = 0;
3355 3355
3356 out: 3356 out:
3357 mutex_unlock(&module_mutex); 3357 mutex_unlock(&module_mutex);
3358 out_unlocked: 3358 out_unlocked:
3359 return err; 3359 return err;
3360 } 3360 }
3361 3361
3362 static int complete_formation(struct module *mod, struct load_info *info) 3362 static int complete_formation(struct module *mod, struct load_info *info)
3363 { 3363 {
3364 int err; 3364 int err;
3365 3365
3366 mutex_lock(&module_mutex); 3366 mutex_lock(&module_mutex);
3367 3367
3368 /* Find duplicate symbols (must be called under lock). */ 3368 /* Find duplicate symbols (must be called under lock). */
3369 err = verify_export_symbols(mod); 3369 err = verify_export_symbols(mod);
3370 if (err < 0) 3370 if (err < 0)
3371 goto out; 3371 goto out;
3372 3372
3373 /* This relies on module_mutex for list integrity. */ 3373 /* This relies on module_mutex for list integrity. */
3374 module_bug_finalize(info->hdr, info->sechdrs, mod); 3374 module_bug_finalize(info->hdr, info->sechdrs, mod);
3375 3375
3376 /* Set RO and NX regions for core */ 3376 /* Set RO and NX regions for core */
3377 set_section_ro_nx(mod->module_core, 3377 set_section_ro_nx(mod->module_core,
3378 mod->core_text_size, 3378 mod->core_text_size,
3379 mod->core_ro_size, 3379 mod->core_ro_size,
3380 mod->core_size); 3380 mod->core_size);
3381 3381
3382 /* Set RO and NX regions for init */ 3382 /* Set RO and NX regions for init */
3383 set_section_ro_nx(mod->module_init, 3383 set_section_ro_nx(mod->module_init,
3384 mod->init_text_size, 3384 mod->init_text_size,
3385 mod->init_ro_size, 3385 mod->init_ro_size,
3386 mod->init_size); 3386 mod->init_size);
3387 3387
3388 /* Mark state as coming so strong_try_module_get() ignores us, 3388 /* Mark state as coming so strong_try_module_get() ignores us,
3389 * but kallsyms etc. can see us. */ 3389 * but kallsyms etc. can see us. */
3390 mod->state = MODULE_STATE_COMING; 3390 mod->state = MODULE_STATE_COMING;
3391 mutex_unlock(&module_mutex); 3391 mutex_unlock(&module_mutex);
3392 3392
3393 blocking_notifier_call_chain(&module_notify_list, 3393 blocking_notifier_call_chain(&module_notify_list,
3394 MODULE_STATE_COMING, mod); 3394 MODULE_STATE_COMING, mod);
3395 return 0; 3395 return 0;
3396 3396
3397 out: 3397 out:
3398 mutex_unlock(&module_mutex); 3398 mutex_unlock(&module_mutex);
3399 return err; 3399 return err;
3400 } 3400 }
3401 3401
3402 static int unknown_module_param_cb(char *param, char *val, const char *modname, 3402 static int unknown_module_param_cb(char *param, char *val, const char *modname,
3403 void *arg) 3403 void *arg)
3404 { 3404 {
3405 struct module *mod = arg; 3405 struct module *mod = arg;
3406 int ret; 3406 int ret;
3407 3407
3408 if (strcmp(param, "async_probe") == 0) { 3408 if (strcmp(param, "async_probe") == 0) {
3409 mod->async_probe_requested = true; 3409 mod->async_probe_requested = true;
3410 return 0; 3410 return 0;
3411 } 3411 }
3412 3412
3413 /* Check for magic 'dyndbg' arg */ 3413 /* Check for magic 'dyndbg' arg */
3414 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3414 ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3415 if (ret != 0) 3415 if (ret != 0)
3416 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3416 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3417 return 0; 3417 return 0;
3418 } 3418 }
3419 3419
3420 /* Allocate and load the module: note that size of section 0 is always 3420 /* Allocate and load the module: note that size of section 0 is always
3421 zero, and we rely on this for optional sections. */ 3421 zero, and we rely on this for optional sections. */
3422 static int load_module(struct load_info *info, const char __user *uargs, 3422 static int load_module(struct load_info *info, const char __user *uargs,
3423 int flags) 3423 int flags)
3424 { 3424 {
3425 struct module *mod; 3425 struct module *mod;
3426 long err; 3426 long err;
3427 char *after_dashes; 3427 char *after_dashes;
3428 3428
3429 err = module_sig_check(info); 3429 err = module_sig_check(info);
3430 if (err) 3430 if (err)
3431 goto free_copy; 3431 goto free_copy;
3432 3432
3433 err = elf_header_check(info); 3433 err = elf_header_check(info);
3434 if (err) 3434 if (err)
3435 goto free_copy; 3435 goto free_copy;
3436 3436
3437 /* Figure out module layout, and allocate all the memory. */ 3437 /* Figure out module layout, and allocate all the memory. */
3438 mod = layout_and_allocate(info, flags); 3438 mod = layout_and_allocate(info, flags);
3439 if (IS_ERR(mod)) { 3439 if (IS_ERR(mod)) {
3440 err = PTR_ERR(mod); 3440 err = PTR_ERR(mod);
3441 goto free_copy; 3441 goto free_copy;
3442 } 3442 }
3443 3443
3444 /* Reserve our place in the list. */ 3444 /* Reserve our place in the list. */
3445 err = add_unformed_module(mod); 3445 err = add_unformed_module(mod);
3446 if (err) 3446 if (err)
3447 goto free_module; 3447 goto free_module;
3448 3448
3449 #ifdef CONFIG_MODULE_SIG 3449 #ifdef CONFIG_MODULE_SIG
3450 mod->sig_ok = info->sig_ok; 3450 mod->sig_ok = info->sig_ok;
3451 if (!mod->sig_ok) { 3451 if (!mod->sig_ok) {
3452 pr_notice_once("%s: module verification failed: signature " 3452 pr_notice_once("%s: module verification failed: signature "
3453 "and/or required key missing - tainting " 3453 "and/or required key missing - tainting "
3454 "kernel\n", mod->name); 3454 "kernel\n", mod->name);
3455 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 3455 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3456 } 3456 }
3457 #endif 3457 #endif
3458 3458
3459 /* To avoid stressing percpu allocator, do this once we're unique. */ 3459 /* To avoid stressing percpu allocator, do this once we're unique. */
3460 err = percpu_modalloc(mod, info); 3460 err = percpu_modalloc(mod, info);
3461 if (err) 3461 if (err)
3462 goto unlink_mod; 3462 goto unlink_mod;
3463 3463
3464 /* Now module is in final location, initialize linked lists, etc. */ 3464 /* Now module is in final location, initialize linked lists, etc. */
3465 err = module_unload_init(mod); 3465 err = module_unload_init(mod);
3466 if (err) 3466 if (err)
3467 goto unlink_mod; 3467 goto unlink_mod;
3468 3468
3469 init_param_lock(mod); 3469 init_param_lock(mod);
3470 3470
3471 /* Now we've got everything in the final locations, we can 3471 /* Now we've got everything in the final locations, we can
3472 * find optional sections. */ 3472 * find optional sections. */
3473 err = find_module_sections(mod, info); 3473 err = find_module_sections(mod, info);
3474 if (err) 3474 if (err)
3475 goto free_unload; 3475 goto free_unload;
3476 3476
3477 err = check_module_license_and_versions(mod); 3477 err = check_module_license_and_versions(mod);
3478 if (err) 3478 if (err)
3479 goto free_unload; 3479 goto free_unload;
3480 3480
3481 /* Set up MODINFO_ATTR fields */ 3481 /* Set up MODINFO_ATTR fields */
3482 setup_modinfo(mod, info); 3482 setup_modinfo(mod, info);
3483 3483
3484 /* Fix up syms, so that st_value is a pointer to location. */ 3484 /* Fix up syms, so that st_value is a pointer to location. */
3485 err = simplify_symbols(mod, info); 3485 err = simplify_symbols(mod, info);
3486 if (err < 0) 3486 if (err < 0)
3487 goto free_modinfo; 3487 goto free_modinfo;
3488 3488
3489 err = apply_relocations(mod, info); 3489 err = apply_relocations(mod, info);
3490 if (err < 0) 3490 if (err < 0)
3491 goto free_modinfo; 3491 goto free_modinfo;
3492 3492
3493 err = post_relocation(mod, info); 3493 err = post_relocation(mod, info);
3494 if (err < 0) 3494 if (err < 0)
3495 goto free_modinfo; 3495 goto free_modinfo;
3496 3496
3497 flush_module_icache(mod); 3497 flush_module_icache(mod);
3498 3498
3499 /* Now copy in args */ 3499 /* Now copy in args */
3500 mod->args = strndup_user(uargs, ~0UL >> 1); 3500 mod->args = strndup_user(uargs, ~0UL >> 1);
3501 if (IS_ERR(mod->args)) { 3501 if (IS_ERR(mod->args)) {
3502 err = PTR_ERR(mod->args); 3502 err = PTR_ERR(mod->args);
3503 goto free_arch_cleanup; 3503 goto free_arch_cleanup;
3504 } 3504 }
3505 3505
3506 dynamic_debug_setup(info->debug, info->num_debug); 3506 dynamic_debug_setup(info->debug, info->num_debug);
3507 3507
3508 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3508 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3509 ftrace_module_init(mod); 3509 ftrace_module_init(mod);
3510 3510
3511 /* Finally it's fully formed, ready to start executing. */ 3511 /* Finally it's fully formed, ready to start executing. */
3512 err = complete_formation(mod, info); 3512 err = complete_formation(mod, info);
3513 if (err) 3513 if (err)
3514 goto ddebug_cleanup; 3514 goto ddebug_cleanup;
3515 3515
3516 /* Module is ready to execute: parsing args may do that. */ 3516 /* Module is ready to execute: parsing args may do that. */
3517 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3517 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3518 -32768, 32767, NULL, 3518 -32768, 32767, NULL,
3519 unknown_module_param_cb); 3519 unknown_module_param_cb);
3520 if (IS_ERR(after_dashes)) { 3520 if (IS_ERR(after_dashes)) {
3521 err = PTR_ERR(after_dashes); 3521 err = PTR_ERR(after_dashes);
3522 goto bug_cleanup; 3522 goto bug_cleanup;
3523 } else if (after_dashes) { 3523 } else if (after_dashes) {
3524 pr_warn("%s: parameters '%s' after `--' ignored\n", 3524 pr_warn("%s: parameters '%s' after `--' ignored\n",
3525 mod->name, after_dashes); 3525 mod->name, after_dashes);
3526 } 3526 }
3527 3527
3528 /* Link in to syfs. */ 3528 /* Link in to syfs. */
3529 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3529 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3530 if (err < 0) 3530 if (err < 0)
3531 goto bug_cleanup; 3531 goto bug_cleanup;
3532 3532
3533 /* Get rid of temporary copy. */ 3533 /* Get rid of temporary copy. */
3534 free_copy(info); 3534 free_copy(info);
3535 3535
3536 /* Done! */ 3536 /* Done! */
3537 trace_module_load(mod); 3537 trace_module_load(mod);
3538 3538
3539 return do_init_module(mod); 3539 return do_init_module(mod);
3540 3540
3541 bug_cleanup: 3541 bug_cleanup:
3542 /* module_bug_cleanup needs module_mutex protection */ 3542 /* module_bug_cleanup needs module_mutex protection */
3543 mutex_lock(&module_mutex); 3543 mutex_lock(&module_mutex);
3544 module_bug_cleanup(mod); 3544 module_bug_cleanup(mod);
3545 mutex_unlock(&module_mutex); 3545 mutex_unlock(&module_mutex);
3546 3546
3547 blocking_notifier_call_chain(&module_notify_list, 3547 blocking_notifier_call_chain(&module_notify_list,
3548 MODULE_STATE_GOING, mod); 3548 MODULE_STATE_GOING, mod);
3549 3549
3550 /* we can't deallocate the module until we clear memory protection */ 3550 /* we can't deallocate the module until we clear memory protection */
3551 unset_module_init_ro_nx(mod); 3551 unset_module_init_ro_nx(mod);
3552 unset_module_core_ro_nx(mod); 3552 unset_module_core_ro_nx(mod);
3553 3553
3554 ddebug_cleanup: 3554 ddebug_cleanup:
3555 dynamic_debug_remove(info->debug); 3555 dynamic_debug_remove(info->debug);
3556 synchronize_sched(); 3556 synchronize_sched();
3557 kfree(mod->args); 3557 kfree(mod->args);
3558 free_arch_cleanup: 3558 free_arch_cleanup:
3559 module_arch_cleanup(mod); 3559 module_arch_cleanup(mod);
3560 free_modinfo: 3560 free_modinfo:
3561 free_modinfo(mod); 3561 free_modinfo(mod);
3562 free_unload: 3562 free_unload:
3563 module_unload_free(mod); 3563 module_unload_free(mod);
3564 unlink_mod: 3564 unlink_mod:
3565 mutex_lock(&module_mutex); 3565 mutex_lock(&module_mutex);
3566 /* Unlink carefully: kallsyms could be walking list. */ 3566 /* Unlink carefully: kallsyms could be walking list. */
3567 list_del_rcu(&mod->list); 3567 list_del_rcu(&mod->list);
3568 mod_tree_remove(mod); 3568 mod_tree_remove(mod);
3569 wake_up_all(&module_wq); 3569 wake_up_all(&module_wq);
3570 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3570 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3571 synchronize_sched(); 3571 synchronize_sched();
3572 mutex_unlock(&module_mutex); 3572 mutex_unlock(&module_mutex);
3573 free_module: 3573 free_module:
3574 /*
3575 * Ftrace needs to clean up what it initialized.
3576 * This does nothing if ftrace_module_init() wasn't called,
3577 * but it must be called outside of module_mutex.
3578 */
3579 ftrace_release_mod(mod);
3574 /* Free lock-classes; relies on the preceding sync_rcu() */ 3580 /* Free lock-classes; relies on the preceding sync_rcu() */
3575 lockdep_free_key_range(mod->module_core, mod->core_size); 3581 lockdep_free_key_range(mod->module_core, mod->core_size);
3576 3582
3577 module_deallocate(mod, info); 3583 module_deallocate(mod, info);
3578 free_copy: 3584 free_copy:
3579 free_copy(info); 3585 free_copy(info);
3580 return err; 3586 return err;
3581 } 3587 }
3582 3588
3583 SYSCALL_DEFINE3(init_module, void __user *, umod, 3589 SYSCALL_DEFINE3(init_module, void __user *, umod,
3584 unsigned long, len, const char __user *, uargs) 3590 unsigned long, len, const char __user *, uargs)
3585 { 3591 {
3586 int err; 3592 int err;
3587 struct load_info info = { }; 3593 struct load_info info = { };
3588 3594
3589 err = may_init_module(); 3595 err = may_init_module();
3590 if (err) 3596 if (err)
3591 return err; 3597 return err;
3592 3598
3593 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 3599 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3594 umod, len, uargs); 3600 umod, len, uargs);
3595 3601
3596 err = copy_module_from_user(umod, len, &info); 3602 err = copy_module_from_user(umod, len, &info);
3597 if (err) 3603 if (err)
3598 return err; 3604 return err;
3599 3605
3600 return load_module(&info, uargs, 0); 3606 return load_module(&info, uargs, 0);
3601 } 3607 }
3602 3608
3603 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 3609 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3604 { 3610 {
3605 int err; 3611 int err;
3606 struct load_info info = { }; 3612 struct load_info info = { };
3607 3613
3608 err = may_init_module(); 3614 err = may_init_module();
3609 if (err) 3615 if (err)
3610 return err; 3616 return err;
3611 3617
3612 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 3618 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3613 3619
3614 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 3620 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3615 |MODULE_INIT_IGNORE_VERMAGIC)) 3621 |MODULE_INIT_IGNORE_VERMAGIC))
3616 return -EINVAL; 3622 return -EINVAL;
3617 3623
3618 err = copy_module_from_fd(fd, &info); 3624 err = copy_module_from_fd(fd, &info);
3619 if (err) 3625 if (err)
3620 return err; 3626 return err;
3621 3627
3622 return load_module(&info, uargs, flags); 3628 return load_module(&info, uargs, flags);
3623 } 3629 }
3624 3630
3625 static inline int within(unsigned long addr, void *start, unsigned long size) 3631 static inline int within(unsigned long addr, void *start, unsigned long size)
3626 { 3632 {
3627 return ((void *)addr >= start && (void *)addr < start + size); 3633 return ((void *)addr >= start && (void *)addr < start + size);
3628 } 3634 }
3629 3635
3630 #ifdef CONFIG_KALLSYMS 3636 #ifdef CONFIG_KALLSYMS
3631 /* 3637 /*
3632 * This ignores the intensely annoying "mapping symbols" found 3638 * This ignores the intensely annoying "mapping symbols" found
3633 * in ARM ELF files: $a, $t and $d. 3639 * in ARM ELF files: $a, $t and $d.
3634 */ 3640 */
3635 static inline int is_arm_mapping_symbol(const char *str) 3641 static inline int is_arm_mapping_symbol(const char *str)
3636 { 3642 {
3637 if (str[0] == '.' && str[1] == 'L') 3643 if (str[0] == '.' && str[1] == 'L')
3638 return true; 3644 return true;
3639 return str[0] == '$' && strchr("axtd", str[1]) 3645 return str[0] == '$' && strchr("axtd", str[1])
3640 && (str[2] == '\0' || str[2] == '.'); 3646 && (str[2] == '\0' || str[2] == '.');
3641 } 3647 }
3642 3648
3643 static const char *get_ksymbol(struct module *mod, 3649 static const char *get_ksymbol(struct module *mod,
3644 unsigned long addr, 3650 unsigned long addr,
3645 unsigned long *size, 3651 unsigned long *size,
3646 unsigned long *offset) 3652 unsigned long *offset)
3647 { 3653 {
3648 unsigned int i, best = 0; 3654 unsigned int i, best = 0;
3649 unsigned long nextval; 3655 unsigned long nextval;
3650 3656
3651 /* At worse, next value is at end of module */ 3657 /* At worse, next value is at end of module */
3652 if (within_module_init(addr, mod)) 3658 if (within_module_init(addr, mod))
3653 nextval = (unsigned long)mod->module_init+mod->init_text_size; 3659 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3654 else 3660 else
3655 nextval = (unsigned long)mod->module_core+mod->core_text_size; 3661 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3656 3662
3657 /* Scan for closest preceding symbol, and next symbol. (ELF 3663 /* Scan for closest preceding symbol, and next symbol. (ELF
3658 starts real symbols at 1). */ 3664 starts real symbols at 1). */
3659 for (i = 1; i < mod->num_symtab; i++) { 3665 for (i = 1; i < mod->num_symtab; i++) {
3660 if (mod->symtab[i].st_shndx == SHN_UNDEF) 3666 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3661 continue; 3667 continue;
3662 3668
3663 /* We ignore unnamed symbols: they're uninformative 3669 /* We ignore unnamed symbols: they're uninformative
3664 * and inserted at a whim. */ 3670 * and inserted at a whim. */
3665 if (mod->symtab[i].st_value <= addr 3671 if (mod->symtab[i].st_value <= addr
3666 && mod->symtab[i].st_value > mod->symtab[best].st_value 3672 && mod->symtab[i].st_value > mod->symtab[best].st_value
3667 && *(mod->strtab + mod->symtab[i].st_name) != '\0' 3673 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3668 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) 3674 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3669 best = i; 3675 best = i;
3670 if (mod->symtab[i].st_value > addr 3676 if (mod->symtab[i].st_value > addr
3671 && mod->symtab[i].st_value < nextval 3677 && mod->symtab[i].st_value < nextval
3672 && *(mod->strtab + mod->symtab[i].st_name) != '\0' 3678 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3673 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) 3679 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3674 nextval = mod->symtab[i].st_value; 3680 nextval = mod->symtab[i].st_value;
3675 } 3681 }
3676 3682
3677 if (!best) 3683 if (!best)
3678 return NULL; 3684 return NULL;
3679 3685
3680 if (size) 3686 if (size)
3681 *size = nextval - mod->symtab[best].st_value; 3687 *size = nextval - mod->symtab[best].st_value;
3682 if (offset) 3688 if (offset)
3683 *offset = addr - mod->symtab[best].st_value; 3689 *offset = addr - mod->symtab[best].st_value;
3684 return mod->strtab + mod->symtab[best].st_name; 3690 return mod->strtab + mod->symtab[best].st_name;
3685 } 3691 }
3686 3692
3687 /* For kallsyms to ask for address resolution. NULL means not found. Careful 3693 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3688 * not to lock to avoid deadlock on oopses, simply disable preemption. */ 3694 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3689 const char *module_address_lookup(unsigned long addr, 3695 const char *module_address_lookup(unsigned long addr,
3690 unsigned long *size, 3696 unsigned long *size,
3691 unsigned long *offset, 3697 unsigned long *offset,
3692 char **modname, 3698 char **modname,
3693 char *namebuf) 3699 char *namebuf)
3694 { 3700 {
3695 const char *ret = NULL; 3701 const char *ret = NULL;
3696 struct module *mod; 3702 struct module *mod;
3697 3703
3698 preempt_disable(); 3704 preempt_disable();
3699 mod = __module_address(addr); 3705 mod = __module_address(addr);
3700 if (mod) { 3706 if (mod) {
3701 if (modname) 3707 if (modname)
3702 *modname = mod->name; 3708 *modname = mod->name;
3703 ret = get_ksymbol(mod, addr, size, offset); 3709 ret = get_ksymbol(mod, addr, size, offset);
3704 } 3710 }
3705 /* Make a copy in here where it's safe */ 3711 /* Make a copy in here where it's safe */
3706 if (ret) { 3712 if (ret) {
3707 strncpy(namebuf, ret, KSYM_NAME_LEN - 1); 3713 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3708 ret = namebuf; 3714 ret = namebuf;
3709 } 3715 }
3710 preempt_enable(); 3716 preempt_enable();
3711 3717
3712 return ret; 3718 return ret;
3713 } 3719 }
3714 3720
3715 int lookup_module_symbol_name(unsigned long addr, char *symname) 3721 int lookup_module_symbol_name(unsigned long addr, char *symname)
3716 { 3722 {
3717 struct module *mod; 3723 struct module *mod;
3718 3724
3719 preempt_disable(); 3725 preempt_disable();
3720 list_for_each_entry_rcu(mod, &modules, list) { 3726 list_for_each_entry_rcu(mod, &modules, list) {
3721 if (mod->state == MODULE_STATE_UNFORMED) 3727 if (mod->state == MODULE_STATE_UNFORMED)
3722 continue; 3728 continue;
3723 if (within_module(addr, mod)) { 3729 if (within_module(addr, mod)) {
3724 const char *sym; 3730 const char *sym;
3725 3731
3726 sym = get_ksymbol(mod, addr, NULL, NULL); 3732 sym = get_ksymbol(mod, addr, NULL, NULL);
3727 if (!sym) 3733 if (!sym)
3728 goto out; 3734 goto out;
3729 strlcpy(symname, sym, KSYM_NAME_LEN); 3735 strlcpy(symname, sym, KSYM_NAME_LEN);
3730 preempt_enable(); 3736 preempt_enable();
3731 return 0; 3737 return 0;
3732 } 3738 }
3733 } 3739 }
3734 out: 3740 out:
3735 preempt_enable(); 3741 preempt_enable();
3736 return -ERANGE; 3742 return -ERANGE;
3737 } 3743 }
3738 3744
3739 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, 3745 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3740 unsigned long *offset, char *modname, char *name) 3746 unsigned long *offset, char *modname, char *name)
3741 { 3747 {
3742 struct module *mod; 3748 struct module *mod;
3743 3749
3744 preempt_disable(); 3750 preempt_disable();
3745 list_for_each_entry_rcu(mod, &modules, list) { 3751 list_for_each_entry_rcu(mod, &modules, list) {
3746 if (mod->state == MODULE_STATE_UNFORMED) 3752 if (mod->state == MODULE_STATE_UNFORMED)
3747 continue; 3753 continue;
3748 if (within_module(addr, mod)) { 3754 if (within_module(addr, mod)) {
3749 const char *sym; 3755 const char *sym;
3750 3756
3751 sym = get_ksymbol(mod, addr, size, offset); 3757 sym = get_ksymbol(mod, addr, size, offset);
3752 if (!sym) 3758 if (!sym)
3753 goto out; 3759 goto out;
3754 if (modname) 3760 if (modname)
3755 strlcpy(modname, mod->name, MODULE_NAME_LEN); 3761 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3756 if (name) 3762 if (name)
3757 strlcpy(name, sym, KSYM_NAME_LEN); 3763 strlcpy(name, sym, KSYM_NAME_LEN);
3758 preempt_enable(); 3764 preempt_enable();
3759 return 0; 3765 return 0;
3760 } 3766 }
3761 } 3767 }
3762 out: 3768 out:
3763 preempt_enable(); 3769 preempt_enable();
3764 return -ERANGE; 3770 return -ERANGE;
3765 } 3771 }
3766 3772
3767 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 3773 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3768 char *name, char *module_name, int *exported) 3774 char *name, char *module_name, int *exported)
3769 { 3775 {
3770 struct module *mod; 3776 struct module *mod;
3771 3777
3772 preempt_disable(); 3778 preempt_disable();
3773 list_for_each_entry_rcu(mod, &modules, list) { 3779 list_for_each_entry_rcu(mod, &modules, list) {
3774 if (mod->state == MODULE_STATE_UNFORMED) 3780 if (mod->state == MODULE_STATE_UNFORMED)
3775 continue; 3781 continue;
3776 if (symnum < mod->num_symtab) { 3782 if (symnum < mod->num_symtab) {
3777 *value = mod->symtab[symnum].st_value; 3783 *value = mod->symtab[symnum].st_value;
3778 *type = mod->symtab[symnum].st_info; 3784 *type = mod->symtab[symnum].st_info;
3779 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, 3785 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3780 KSYM_NAME_LEN); 3786 KSYM_NAME_LEN);
3781 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 3787 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3782 *exported = is_exported(name, *value, mod); 3788 *exported = is_exported(name, *value, mod);
3783 preempt_enable(); 3789 preempt_enable();
3784 return 0; 3790 return 0;
3785 } 3791 }
3786 symnum -= mod->num_symtab; 3792 symnum -= mod->num_symtab;
3787 } 3793 }
3788 preempt_enable(); 3794 preempt_enable();
3789 return -ERANGE; 3795 return -ERANGE;
3790 } 3796 }
3791 3797
3792 static unsigned long mod_find_symname(struct module *mod, const char *name) 3798 static unsigned long mod_find_symname(struct module *mod, const char *name)
3793 { 3799 {
3794 unsigned int i; 3800 unsigned int i;
3795 3801
3796 for (i = 0; i < mod->num_symtab; i++) 3802 for (i = 0; i < mod->num_symtab; i++)
3797 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && 3803 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3798 mod->symtab[i].st_info != 'U') 3804 mod->symtab[i].st_info != 'U')
3799 return mod->symtab[i].st_value; 3805 return mod->symtab[i].st_value;
3800 return 0; 3806 return 0;
3801 } 3807 }
3802 3808
3803 /* Look for this name: can be of form module:name. */ 3809 /* Look for this name: can be of form module:name. */
3804 unsigned long module_kallsyms_lookup_name(const char *name) 3810 unsigned long module_kallsyms_lookup_name(const char *name)
3805 { 3811 {
3806 struct module *mod; 3812 struct module *mod;
3807 char *colon; 3813 char *colon;
3808 unsigned long ret = 0; 3814 unsigned long ret = 0;
3809 3815
3810 /* Don't lock: we're in enough trouble already. */ 3816 /* Don't lock: we're in enough trouble already. */
3811 preempt_disable(); 3817 preempt_disable();
3812 if ((colon = strchr(name, ':')) != NULL) { 3818 if ((colon = strchr(name, ':')) != NULL) {
3813 if ((mod = find_module_all(name, colon - name, false)) != NULL) 3819 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3814 ret = mod_find_symname(mod, colon+1); 3820 ret = mod_find_symname(mod, colon+1);
3815 } else { 3821 } else {
3816 list_for_each_entry_rcu(mod, &modules, list) { 3822 list_for_each_entry_rcu(mod, &modules, list) {
3817 if (mod->state == MODULE_STATE_UNFORMED) 3823 if (mod->state == MODULE_STATE_UNFORMED)
3818 continue; 3824 continue;
3819 if ((ret = mod_find_symname(mod, name)) != 0) 3825 if ((ret = mod_find_symname(mod, name)) != 0)
3820 break; 3826 break;
3821 } 3827 }
3822 } 3828 }
3823 preempt_enable(); 3829 preempt_enable();
3824 return ret; 3830 return ret;
3825 } 3831 }
3826 3832
3827 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, 3833 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3828 struct module *, unsigned long), 3834 struct module *, unsigned long),
3829 void *data) 3835 void *data)
3830 { 3836 {
3831 struct module *mod; 3837 struct module *mod;
3832 unsigned int i; 3838 unsigned int i;
3833 int ret; 3839 int ret;
3834 3840
3835 module_assert_mutex(); 3841 module_assert_mutex();
3836 3842
3837 list_for_each_entry(mod, &modules, list) { 3843 list_for_each_entry(mod, &modules, list) {
3838 if (mod->state == MODULE_STATE_UNFORMED) 3844 if (mod->state == MODULE_STATE_UNFORMED)
3839 continue; 3845 continue;
3840 for (i = 0; i < mod->num_symtab; i++) { 3846 for (i = 0; i < mod->num_symtab; i++) {
3841 ret = fn(data, mod->strtab + mod->symtab[i].st_name, 3847 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3842 mod, mod->symtab[i].st_value); 3848 mod, mod->symtab[i].st_value);
3843 if (ret != 0) 3849 if (ret != 0)
3844 return ret; 3850 return ret;
3845 } 3851 }
3846 } 3852 }
3847 return 0; 3853 return 0;
3848 } 3854 }
3849 #endif /* CONFIG_KALLSYMS */ 3855 #endif /* CONFIG_KALLSYMS */
3850 3856
3851 static char *module_flags(struct module *mod, char *buf) 3857 static char *module_flags(struct module *mod, char *buf)
3852 { 3858 {
3853 int bx = 0; 3859 int bx = 0;
3854 3860
3855 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 3861 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3856 if (mod->taints || 3862 if (mod->taints ||
3857 mod->state == MODULE_STATE_GOING || 3863 mod->state == MODULE_STATE_GOING ||
3858 mod->state == MODULE_STATE_COMING) { 3864 mod->state == MODULE_STATE_COMING) {
3859 buf[bx++] = '('; 3865 buf[bx++] = '(';
3860 bx += module_flags_taint(mod, buf + bx); 3866 bx += module_flags_taint(mod, buf + bx);
3861 /* Show a - for module-is-being-unloaded */ 3867 /* Show a - for module-is-being-unloaded */
3862 if (mod->state == MODULE_STATE_GOING) 3868 if (mod->state == MODULE_STATE_GOING)
3863 buf[bx++] = '-'; 3869 buf[bx++] = '-';
3864 /* Show a + for module-is-being-loaded */ 3870 /* Show a + for module-is-being-loaded */
3865 if (mod->state == MODULE_STATE_COMING) 3871 if (mod->state == MODULE_STATE_COMING)
3866 buf[bx++] = '+'; 3872 buf[bx++] = '+';
3867 buf[bx++] = ')'; 3873 buf[bx++] = ')';
3868 } 3874 }
3869 buf[bx] = '\0'; 3875 buf[bx] = '\0';
3870 3876
3871 return buf; 3877 return buf;
3872 } 3878 }
3873 3879
3874 #ifdef CONFIG_PROC_FS 3880 #ifdef CONFIG_PROC_FS
3875 /* Called by the /proc file system to return a list of modules. */ 3881 /* Called by the /proc file system to return a list of modules. */
3876 static void *m_start(struct seq_file *m, loff_t *pos) 3882 static void *m_start(struct seq_file *m, loff_t *pos)
3877 { 3883 {
3878 mutex_lock(&module_mutex); 3884 mutex_lock(&module_mutex);
3879 return seq_list_start(&modules, *pos); 3885 return seq_list_start(&modules, *pos);
3880 } 3886 }
3881 3887
3882 static void *m_next(struct seq_file *m, void *p, loff_t *pos) 3888 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3883 { 3889 {
3884 return seq_list_next(p, &modules, pos); 3890 return seq_list_next(p, &modules, pos);
3885 } 3891 }
3886 3892
3887 static void m_stop(struct seq_file *m, void *p) 3893 static void m_stop(struct seq_file *m, void *p)
3888 { 3894 {
3889 mutex_unlock(&module_mutex); 3895 mutex_unlock(&module_mutex);
3890 } 3896 }
3891 3897
3892 static int m_show(struct seq_file *m, void *p) 3898 static int m_show(struct seq_file *m, void *p)
3893 { 3899 {
3894 struct module *mod = list_entry(p, struct module, list); 3900 struct module *mod = list_entry(p, struct module, list);
3895 char buf[8]; 3901 char buf[8];
3896 3902
3897 /* We always ignore unformed modules. */ 3903 /* We always ignore unformed modules. */
3898 if (mod->state == MODULE_STATE_UNFORMED) 3904 if (mod->state == MODULE_STATE_UNFORMED)
3899 return 0; 3905 return 0;
3900 3906
3901 seq_printf(m, "%s %u", 3907 seq_printf(m, "%s %u",
3902 mod->name, mod->init_size + mod->core_size); 3908 mod->name, mod->init_size + mod->core_size);
3903 print_unload_info(m, mod); 3909 print_unload_info(m, mod);
3904 3910
3905 /* Informative for users. */ 3911 /* Informative for users. */
3906 seq_printf(m, " %s", 3912 seq_printf(m, " %s",
3907 mod->state == MODULE_STATE_GOING ? "Unloading" : 3913 mod->state == MODULE_STATE_GOING ? "Unloading" :
3908 mod->state == MODULE_STATE_COMING ? "Loading" : 3914 mod->state == MODULE_STATE_COMING ? "Loading" :
3909 "Live"); 3915 "Live");
3910 /* Used by oprofile and other similar tools. */ 3916 /* Used by oprofile and other similar tools. */
3911 seq_printf(m, " 0x%pK", mod->module_core); 3917 seq_printf(m, " 0x%pK", mod->module_core);
3912 3918
3913 /* Taints info */ 3919 /* Taints info */
3914 if (mod->taints) 3920 if (mod->taints)
3915 seq_printf(m, " %s", module_flags(mod, buf)); 3921 seq_printf(m, " %s", module_flags(mod, buf));
3916 3922
3917 seq_puts(m, "\n"); 3923 seq_puts(m, "\n");
3918 return 0; 3924 return 0;
3919 } 3925 }
3920 3926
3921 /* Format: modulename size refcount deps address 3927 /* Format: modulename size refcount deps address
3922 3928
3923 Where refcount is a number or -, and deps is a comma-separated list 3929 Where refcount is a number or -, and deps is a comma-separated list
3924 of depends or -. 3930 of depends or -.
3925 */ 3931 */
3926 static const struct seq_operations modules_op = { 3932 static const struct seq_operations modules_op = {
3927 .start = m_start, 3933 .start = m_start,
3928 .next = m_next, 3934 .next = m_next,
3929 .stop = m_stop, 3935 .stop = m_stop,
3930 .show = m_show 3936 .show = m_show
3931 }; 3937 };
3932 3938
3933 static int modules_open(struct inode *inode, struct file *file) 3939 static int modules_open(struct inode *inode, struct file *file)
3934 { 3940 {
3935 return seq_open(file, &modules_op); 3941 return seq_open(file, &modules_op);
3936 } 3942 }
3937 3943
3938 static const struct file_operations proc_modules_operations = { 3944 static const struct file_operations proc_modules_operations = {
3939 .open = modules_open, 3945 .open = modules_open,
3940 .read = seq_read, 3946 .read = seq_read,
3941 .llseek = seq_lseek, 3947 .llseek = seq_lseek,
3942 .release = seq_release, 3948 .release = seq_release,
3943 }; 3949 };
3944 3950
3945 static int __init proc_modules_init(void) 3951 static int __init proc_modules_init(void)
3946 { 3952 {
3947 proc_create("modules", 0, NULL, &proc_modules_operations); 3953 proc_create("modules", 0, NULL, &proc_modules_operations);
3948 return 0; 3954 return 0;
3949 } 3955 }
3950 module_init(proc_modules_init); 3956 module_init(proc_modules_init);
3951 #endif 3957 #endif
3952 3958
3953 /* Given an address, look for it in the module exception tables. */ 3959 /* Given an address, look for it in the module exception tables. */
3954 const struct exception_table_entry *search_module_extables(unsigned long addr) 3960 const struct exception_table_entry *search_module_extables(unsigned long addr)
3955 { 3961 {
3956 const struct exception_table_entry *e = NULL; 3962 const struct exception_table_entry *e = NULL;
3957 struct module *mod; 3963 struct module *mod;
3958 3964
3959 preempt_disable(); 3965 preempt_disable();
3960 list_for_each_entry_rcu(mod, &modules, list) { 3966 list_for_each_entry_rcu(mod, &modules, list) {
3961 if (mod->state == MODULE_STATE_UNFORMED) 3967 if (mod->state == MODULE_STATE_UNFORMED)
3962 continue; 3968 continue;
3963 if (mod->num_exentries == 0) 3969 if (mod->num_exentries == 0)
3964 continue; 3970 continue;
3965 3971
3966 e = search_extable(mod->extable, 3972 e = search_extable(mod->extable,
3967 mod->extable + mod->num_exentries - 1, 3973 mod->extable + mod->num_exentries - 1,
3968 addr); 3974 addr);
3969 if (e) 3975 if (e)
3970 break; 3976 break;
3971 } 3977 }
3972 preempt_enable(); 3978 preempt_enable();
3973 3979
3974 /* Now, if we found one, we are running inside it now, hence 3980 /* Now, if we found one, we are running inside it now, hence
3975 we cannot unload the module, hence no refcnt needed. */ 3981 we cannot unload the module, hence no refcnt needed. */
3976 return e; 3982 return e;
3977 } 3983 }
3978 3984
3979 /* 3985 /*
3980 * is_module_address - is this address inside a module? 3986 * is_module_address - is this address inside a module?
3981 * @addr: the address to check. 3987 * @addr: the address to check.
3982 * 3988 *
3983 * See is_module_text_address() if you simply want to see if the address 3989 * See is_module_text_address() if you simply want to see if the address
3984 * is code (not data). 3990 * is code (not data).
3985 */ 3991 */
3986 bool is_module_address(unsigned long addr) 3992 bool is_module_address(unsigned long addr)
3987 { 3993 {
3988 bool ret; 3994 bool ret;
3989 3995
3990 preempt_disable(); 3996 preempt_disable();
3991 ret = __module_address(addr) != NULL; 3997 ret = __module_address(addr) != NULL;
3992 preempt_enable(); 3998 preempt_enable();
3993 3999
3994 return ret; 4000 return ret;
3995 } 4001 }
3996 4002
3997 /* 4003 /*
3998 * __module_address - get the module which contains an address. 4004 * __module_address - get the module which contains an address.
3999 * @addr: the address. 4005 * @addr: the address.
4000 * 4006 *
4001 * Must be called with preempt disabled or module mutex held so that 4007 * Must be called with preempt disabled or module mutex held so that
4002 * module doesn't get freed during this. 4008 * module doesn't get freed during this.
4003 */ 4009 */
4004 struct module *__module_address(unsigned long addr) 4010 struct module *__module_address(unsigned long addr)
4005 { 4011 {
4006 struct module *mod; 4012 struct module *mod;
4007 4013
4008 if (addr < module_addr_min || addr > module_addr_max) 4014 if (addr < module_addr_min || addr > module_addr_max)
4009 return NULL; 4015 return NULL;
4010 4016
4011 module_assert_mutex_or_preempt(); 4017 module_assert_mutex_or_preempt();
4012 4018
4013 mod = mod_find(addr); 4019 mod = mod_find(addr);
4014 if (mod) { 4020 if (mod) {
4015 BUG_ON(!within_module(addr, mod)); 4021 BUG_ON(!within_module(addr, mod));
4016 if (mod->state == MODULE_STATE_UNFORMED) 4022 if (mod->state == MODULE_STATE_UNFORMED)
4017 mod = NULL; 4023 mod = NULL;
4018 } 4024 }
4019 return mod; 4025 return mod;
4020 } 4026 }
4021 EXPORT_SYMBOL_GPL(__module_address); 4027 EXPORT_SYMBOL_GPL(__module_address);
4022 4028
4023 /* 4029 /*
4024 * is_module_text_address - is this address inside module code? 4030 * is_module_text_address - is this address inside module code?
4025 * @addr: the address to check. 4031 * @addr: the address to check.
4026 * 4032 *
4027 * See is_module_address() if you simply want to see if the address is 4033 * See is_module_address() if you simply want to see if the address is
4028 * anywhere in a module. See kernel_text_address() for testing if an 4034 * anywhere in a module. See kernel_text_address() for testing if an
4029 * address corresponds to kernel or module code. 4035 * address corresponds to kernel or module code.
4030 */ 4036 */
4031 bool is_module_text_address(unsigned long addr) 4037 bool is_module_text_address(unsigned long addr)
4032 { 4038 {
4033 bool ret; 4039 bool ret;
4034 4040
4035 preempt_disable(); 4041 preempt_disable();
4036 ret = __module_text_address(addr) != NULL; 4042 ret = __module_text_address(addr) != NULL;
4037 preempt_enable(); 4043 preempt_enable();
4038 4044
4039 return ret; 4045 return ret;
4040 } 4046 }
4041 4047
4042 /* 4048 /*
4043 * __module_text_address - get the module whose code contains an address. 4049 * __module_text_address - get the module whose code contains an address.
4044 * @addr: the address. 4050 * @addr: the address.
4045 * 4051 *
4046 * Must be called with preempt disabled or module mutex held so that 4052 * Must be called with preempt disabled or module mutex held so that
4047 * module doesn't get freed during this. 4053 * module doesn't get freed during this.
4048 */ 4054 */
4049 struct module *__module_text_address(unsigned long addr) 4055 struct module *__module_text_address(unsigned long addr)
4050 { 4056 {
4051 struct module *mod = __module_address(addr); 4057 struct module *mod = __module_address(addr);
4052 if (mod) { 4058 if (mod) {
4053 /* Make sure it's within the text section. */ 4059 /* Make sure it's within the text section. */
4054 if (!within(addr, mod->module_init, mod->init_text_size) 4060 if (!within(addr, mod->module_init, mod->init_text_size)
4055 && !within(addr, mod->module_core, mod->core_text_size)) 4061 && !within(addr, mod->module_core, mod->core_text_size))
4056 mod = NULL; 4062 mod = NULL;
4057 } 4063 }
4058 return mod; 4064 return mod;
4059 } 4065 }
4060 EXPORT_SYMBOL_GPL(__module_text_address); 4066 EXPORT_SYMBOL_GPL(__module_text_address);
4061 4067
4062 /* Don't grab lock, we're oopsing. */ 4068 /* Don't grab lock, we're oopsing. */
4063 void print_modules(void) 4069 void print_modules(void)
4064 { 4070 {
4065 struct module *mod; 4071 struct module *mod;
4066 char buf[8]; 4072 char buf[8];
4067 4073
4068 printk(KERN_DEFAULT "Modules linked in:"); 4074 printk(KERN_DEFAULT "Modules linked in:");
4069 /* Most callers should already have preempt disabled, but make sure */ 4075 /* Most callers should already have preempt disabled, but make sure */
4070 preempt_disable(); 4076 preempt_disable();
4071 list_for_each_entry_rcu(mod, &modules, list) { 4077 list_for_each_entry_rcu(mod, &modules, list) {
4072 if (mod->state == MODULE_STATE_UNFORMED) 4078 if (mod->state == MODULE_STATE_UNFORMED)
4073 continue; 4079 continue;
4074 pr_cont(" %s%s", mod->name, module_flags(mod, buf)); 4080 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4075 } 4081 }
4076 preempt_enable(); 4082 preempt_enable();
4077 if (last_unloaded_module[0]) 4083 if (last_unloaded_module[0])
4078 pr_cont(" [last unloaded: %s]", last_unloaded_module); 4084 pr_cont(" [last unloaded: %s]", last_unloaded_module);
4079 pr_cont("\n"); 4085 pr_cont("\n");
4080 } 4086 }
4081 4087
4082 #ifdef CONFIG_MODVERSIONS 4088 #ifdef CONFIG_MODVERSIONS
4083 /* Generate the signature for all relevant module structures here. 4089 /* Generate the signature for all relevant module structures here.
4084 * If these change, we don't want to try to parse the module. */ 4090 * If these change, we don't want to try to parse the module. */
4085 void module_layout(struct module *mod, 4091 void module_layout(struct module *mod,
4086 struct modversion_info *ver, 4092 struct modversion_info *ver,
4087 struct kernel_param *kp, 4093 struct kernel_param *kp,
4088 struct kernel_symbol *ks, 4094 struct kernel_symbol *ks,
4089 struct tracepoint * const *tp) 4095 struct tracepoint * const *tp)
4090 { 4096 {
4091 } 4097 }
4092 EXPORT_SYMBOL(module_layout); 4098 EXPORT_SYMBOL(module_layout);
4093 #endif 4099 #endif
4094 4100