Blame view
kernel/module.c
112 KB
f71d20e96
|
1 |
/* |
1da177e4c
|
2 |
Copyright (C) 2002 Richard Henderson |
51f3d0f47
|
3 |
Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. |
1da177e4c
|
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ |
9984de1a5
|
19 |
#include <linux/export.h> |
8a293be0d
|
20 |
#include <linux/extable.h> |
1da177e4c
|
21 |
#include <linux/moduleloader.h> |
af658dca2
|
22 |
#include <linux/trace_events.h> |
1da177e4c
|
23 |
#include <linux/init.h> |
ae84e3247
|
24 |
#include <linux/kallsyms.h> |
34e1169d9
|
25 |
#include <linux/file.h> |
3b5d5c6b0
|
26 |
#include <linux/fs.h> |
6d7601338
|
27 |
#include <linux/sysfs.h> |
9f1583339
|
28 |
#include <linux/kernel.h> |
1da177e4c
|
29 30 31 |
#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/elf.h> |
3b5d5c6b0
|
32 |
#include <linux/proc_fs.h> |
2e72d51b4
|
33 |
#include <linux/security.h> |
1da177e4c
|
34 35 36 37 |
#include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <linux/rcupdate.h> |
c59ede7b7
|
38 |
#include <linux/capability.h> |
1da177e4c
|
39 40 41 42 43 44 |
#include <linux/cpu.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/vermagic.h> #include <linux/notifier.h> |
f6a570333
|
45 |
#include <linux/sched.h> |
1da177e4c
|
46 |
#include <linux/device.h> |
c988d2b28
|
47 |
#include <linux/string.h> |
97d1f15b7
|
48 |
#include <linux/mutex.h> |
d72b37513
|
49 |
#include <linux/rculist.h> |
7c0f6ba68
|
50 |
#include <linux/uaccess.h> |
1da177e4c
|
51 |
#include <asm/cacheflush.h> |
563ec5cbc
|
52 |
#include <linux/set_memory.h> |
eb8cdec4a
|
53 |
#include <asm/mmu_context.h> |
b817f6fef
|
54 |
#include <linux/license.h> |
6d7623943
|
55 |
#include <asm/sections.h> |
97e1c18e8
|
56 |
#include <linux/tracepoint.h> |
90d595fe5
|
57 |
#include <linux/ftrace.h> |
7e545d6ec
|
58 |
#include <linux/livepatch.h> |
22a9d6456
|
59 |
#include <linux/async.h> |
fbf59bc9d
|
60 |
#include <linux/percpu.h> |
4f2294b6d
|
61 |
#include <linux/kmemleak.h> |
bf5438fca
|
62 |
#include <linux/jump_label.h> |
84e1c6bb3
|
63 |
#include <linux/pfn.h> |
403ed2784
|
64 |
#include <linux/bsearch.h> |
9d5059c95
|
65 |
#include <linux/dynamic_debug.h> |
ca86cad73
|
66 |
#include <linux/audit.h> |
2f3238aeb
|
67 |
#include <uapi/linux/module.h> |
106a4ee25
|
68 |
#include "module-internal.h" |
1da177e4c
|
69 |
|
7ead8b831
|
70 71 |
#define CREATE_TRACE_POINTS #include <trace/events/module.h> |
1da177e4c
|
72 73 74 |
#ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 #endif |
84e1c6bb3
|
75 76 77 |
/* * Modules' sections will be aligned on page boundaries * to ensure complete separation of code and data, but |
0f5bf6d0a
|
78 |
* only when CONFIG_STRICT_MODULE_RWX=y |
84e1c6bb3
|
79 |
*/ |
0f5bf6d0a
|
80 |
#ifdef CONFIG_STRICT_MODULE_RWX |
84e1c6bb3
|
81 82 83 84 |
# define debug_align(X) ALIGN(X, PAGE_SIZE) #else # define debug_align(X) (X) #endif |
1da177e4c
|
85 86 |
/* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
75676500f
|
87 88 89 90 91 |
/* * Mutex protects: * 1) List of modules (also safely readable with preempt_disable), * 2) module_use links, * 3) module_addr_min/module_addr_max. |
e513cc1c0
|
92 |
* (delete and add uses RCU list operations). */ |
c6b378019
|
93 94 |
DEFINE_MUTEX(module_mutex); EXPORT_SYMBOL_GPL(module_mutex); |
1da177e4c
|
95 |
static LIST_HEAD(modules); |
67fc4e0cb
|
96 |
|
6c9692e2d
|
97 |
#ifdef CONFIG_MODULES_TREE_LOOKUP |
106a4ee25
|
98 |
|
93c2e105f
|
99 100 101 102 |
/* * Use a latched RB-tree for __module_address(); this allows us to use * RCU-sched lookups of the address from any context. * |
6c9692e2d
|
103 104 105 |
* This is conditional on PERF_EVENTS || TRACING because those can really hit * __module_address() hard by doing a lot of stack unwinding; potentially from * NMI context. |
93c2e105f
|
106 107 108 |
*/ static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) |
106a4ee25
|
109 |
{ |
7523e4dc5
|
110 |
struct module_layout *layout = container_of(n, struct module_layout, mtn.node); |
106a4ee25
|
111 |
|
7523e4dc5
|
112 |
return (unsigned long)layout->base; |
93c2e105f
|
113 114 115 116 |
} static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) { |
7523e4dc5
|
117 |
struct module_layout *layout = container_of(n, struct module_layout, mtn.node); |
93c2e105f
|
118 |
|
7523e4dc5
|
119 |
return (unsigned long)layout->size; |
93c2e105f
|
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
} static __always_inline bool mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) { return __mod_tree_val(a) < __mod_tree_val(b); } static __always_inline int mod_tree_comp(void *key, struct latch_tree_node *n) { unsigned long val = (unsigned long)key; unsigned long start, end; start = __mod_tree_val(n); if (val < start) return -1; end = start + __mod_tree_size(n); if (val >= end) return 1; |
106a4ee25
|
141 |
|
106a4ee25
|
142 143 |
return 0; } |
93c2e105f
|
144 145 146 147 |
static const struct latch_tree_ops mod_tree_ops = { .less = mod_tree_less, .comp = mod_tree_comp, }; |
4f666546d
|
148 149 150 151 152 153 |
static struct mod_tree_root { struct latch_tree_root root; unsigned long addr_min; unsigned long addr_max; } mod_tree __cacheline_aligned = { .addr_min = -1UL, |
106a4ee25
|
154 |
}; |
106a4ee25
|
155 |
|
4f666546d
|
156 157 158 159 160 161 162 163 164 165 166 167 |
#define module_addr_min mod_tree.addr_min #define module_addr_max mod_tree.addr_max static noinline void __mod_tree_insert(struct mod_tree_node *node) { latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); } static void __mod_tree_remove(struct mod_tree_node *node) { latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); } |
93c2e105f
|
168 169 170 171 172 173 174 |
/* * These modifications: insert, remove_init and remove; are serialized by the * module_mutex. */ static void mod_tree_insert(struct module *mod) { |
7523e4dc5
|
175 176 |
mod->core_layout.mtn.mod = mod; mod->init_layout.mtn.mod = mod; |
93c2e105f
|
177 |
|
7523e4dc5
|
178 179 180 |
__mod_tree_insert(&mod->core_layout.mtn); if (mod->init_layout.size) __mod_tree_insert(&mod->init_layout.mtn); |
93c2e105f
|
181 182 183 184 |
} static void mod_tree_remove_init(struct module *mod) { |
7523e4dc5
|
185 186 |
if (mod->init_layout.size) __mod_tree_remove(&mod->init_layout.mtn); |
93c2e105f
|
187 188 189 190 |
} static void mod_tree_remove(struct module *mod) { |
7523e4dc5
|
191 |
__mod_tree_remove(&mod->core_layout.mtn); |
93c2e105f
|
192 193 |
mod_tree_remove_init(mod); } |
6c9692e2d
|
194 |
static struct module *mod_find(unsigned long addr) |
93c2e105f
|
195 196 |
{ struct latch_tree_node *ltn; |
4f666546d
|
197 |
ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); |
93c2e105f
|
198 199 200 201 202 |
if (!ltn) return NULL; return container_of(ltn, struct mod_tree_node, node)->mod; } |
6c9692e2d
|
203 |
#else /* MODULES_TREE_LOOKUP */ |
4f666546d
|
204 |
static unsigned long module_addr_min = -1UL, module_addr_max = 0; |
6c9692e2d
|
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
static void mod_tree_insert(struct module *mod) { } static void mod_tree_remove_init(struct module *mod) { } static void mod_tree_remove(struct module *mod) { } static struct module *mod_find(unsigned long addr) { struct module *mod; list_for_each_entry_rcu(mod, &modules, list) { if (within_module(addr, mod)) return mod; } return NULL; } #endif /* MODULES_TREE_LOOKUP */ |
4f666546d
|
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
/* * Bounds of module text, for speeding up __module_address. * Protected by module_mutex. */ static void __mod_update_bounds(void *base, unsigned int size) { unsigned long min = (unsigned long)base; unsigned long max = min + size; if (min < module_addr_min) module_addr_min = min; if (max > module_addr_max) module_addr_max = max; } static void mod_update_bounds(struct module *mod) { |
7523e4dc5
|
239 240 241 |
__mod_update_bounds(mod->core_layout.base, mod->core_layout.size); if (mod->init_layout.size) __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); |
4f666546d
|
242 |
} |
67fc4e0cb
|
243 244 245 |
#ifdef CONFIG_KGDB_KDB struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ |
0be964be0
|
246 247 248 249 250 251 252 253 254 255 |
static void module_assert_mutex(void) { lockdep_assert_held(&module_mutex); } static void module_assert_mutex_or_preempt(void) { #ifdef CONFIG_LOCKDEP if (unlikely(!debug_locks)) return; |
9502514f2
|
256 |
WARN_ON_ONCE(!rcu_read_lock_sched_held() && |
0be964be0
|
257 258 259 |
!lockdep_is_held(&module_mutex)); #endif } |
6727bb9c6
|
260 |
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); |
106a4ee25
|
261 |
module_param(sig_enforce, bool_enable_only, 0644); |
1da177e4c
|
262 |
|
fda784e50
|
263 264 265 266 267 268 269 270 271 |
/* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. */ bool is_module_sig_enforced(void) { return sig_enforce; } EXPORT_SYMBOL(is_module_sig_enforced); |
19e4529ee
|
272 273 |
/* Block module loading/unloading? */ int modules_disabled = 0; |
02608bef8
|
274 |
core_param(nomodule, modules_disabled, bint, 0); |
19e4529ee
|
275 |
|
c9a3ba55b
|
276 277 |
/* Waiting for a module to finish initializing? */ static DECLARE_WAIT_QUEUE_HEAD(module_wq); |
e041c6834
|
278 |
static BLOCKING_NOTIFIER_HEAD(module_notify_list); |
1da177e4c
|
279 |
|
6da0b5651
|
280 |
int register_module_notifier(struct notifier_block *nb) |
1da177e4c
|
281 |
{ |
e041c6834
|
282 |
return blocking_notifier_chain_register(&module_notify_list, nb); |
1da177e4c
|
283 284 |
} EXPORT_SYMBOL(register_module_notifier); |
6da0b5651
|
285 |
int unregister_module_notifier(struct notifier_block *nb) |
1da177e4c
|
286 |
{ |
e041c6834
|
287 |
return blocking_notifier_chain_unregister(&module_notify_list, nb); |
1da177e4c
|
288 289 |
} EXPORT_SYMBOL(unregister_module_notifier); |
71d9f5079
|
290 291 292 293 294 |
/* * We require a truly strong try_module_get(): 0 means success. * Otherwise an error is returned due to ongoing or failed * initialization etc. */ |
1da177e4c
|
295 296 |
static inline int strong_try_module_get(struct module *mod) { |
0d21b0e34
|
297 |
BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); |
1da177e4c
|
298 |
if (mod && mod->state == MODULE_STATE_COMING) |
c9a3ba55b
|
299 300 |
return -EBUSY; if (try_module_get(mod)) |
1da177e4c
|
301 |
return 0; |
c9a3ba55b
|
302 303 |
else return -ENOENT; |
1da177e4c
|
304 |
} |
373d4d099
|
305 306 |
static inline void add_taint_module(struct module *mod, unsigned flag, enum lockdep_ok lockdep_ok) |
fa3ba2e81
|
307 |
{ |
373d4d099
|
308 |
add_taint(flag, lockdep_ok); |
7fd8329ba
|
309 |
set_bit(flag, &mod->taints); |
fa3ba2e81
|
310 |
} |
02a3e59a0
|
311 312 313 |
/* * A thread that wants to hold a reference to a module only while it * is running can call this to safely exit. nfsd and lockd use this. |
1da177e4c
|
314 |
*/ |
bf262dcec
|
315 |
void __noreturn __module_put_and_exit(struct module *mod, long code) |
1da177e4c
|
316 317 318 319 320 |
{ module_put(mod); do_exit(code); } EXPORT_SYMBOL(__module_put_and_exit); |
22a8bdeb5
|
321 |
|
1da177e4c
|
322 |
/* Find a module section: 0 means not found. */ |
49668688d
|
323 |
static unsigned int find_sec(const struct load_info *info, const char *name) |
1da177e4c
|
324 325 |
{ unsigned int i; |
49668688d
|
326 327 |
for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; |
1da177e4c
|
328 |
/* Alloc bit cleared means "ignore it." */ |
49668688d
|
329 330 |
if ((shdr->sh_flags & SHF_ALLOC) && strcmp(info->secstrings + shdr->sh_name, name) == 0) |
1da177e4c
|
331 |
return i; |
49668688d
|
332 |
} |
1da177e4c
|
333 334 |
return 0; } |
5e458cc0f
|
335 |
/* Find a module section, or NULL. */ |
49668688d
|
336 |
static void *section_addr(const struct load_info *info, const char *name) |
5e458cc0f
|
337 338 |
{ /* Section 0 has sh_addr 0. */ |
49668688d
|
339 |
return (void *)info->sechdrs[find_sec(info, name)].sh_addr; |
5e458cc0f
|
340 341 342 |
} /* Find a module section, or NULL. Fill in number of "objects" in section. */ |
49668688d
|
343 |
static void *section_objs(const struct load_info *info, |
5e458cc0f
|
344 345 346 347 |
const char *name, size_t object_size, unsigned int *num) { |
49668688d
|
348 |
unsigned int sec = find_sec(info, name); |
5e458cc0f
|
349 350 |
/* Section 0 has sh_addr 0 and sh_size 0. */ |
49668688d
|
351 352 |
*num = info->sechdrs[sec].sh_size / object_size; return (void *)info->sechdrs[sec].sh_addr; |
5e458cc0f
|
353 |
} |
1da177e4c
|
354 355 356 357 358 |
/* Provided by the linker */ extern const struct kernel_symbol __start___ksymtab[]; extern const struct kernel_symbol __stop___ksymtab[]; extern const struct kernel_symbol __start___ksymtab_gpl[]; extern const struct kernel_symbol __stop___ksymtab_gpl[]; |
9f28bb7e1
|
359 360 |
extern const struct kernel_symbol __start___ksymtab_gpl_future[]; extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; |
71810db27
|
361 362 363 |
extern const s32 __start___kcrctab[]; extern const s32 __start___kcrctab_gpl[]; extern const s32 __start___kcrctab_gpl_future[]; |
f7f5b6755
|
364 365 366 367 368 |
#ifdef CONFIG_UNUSED_SYMBOLS extern const struct kernel_symbol __start___ksymtab_unused[]; extern const struct kernel_symbol __stop___ksymtab_unused[]; extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; |
71810db27
|
369 370 |
extern const s32 __start___kcrctab_unused[]; extern const s32 __start___kcrctab_unused_gpl[]; |
f7f5b6755
|
371 |
#endif |
1da177e4c
|
372 373 374 375 |
#ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL #else |
f83ca9fe3
|
376 |
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) |
1da177e4c
|
377 |
#endif |
dafd0940c
|
378 379 380 381 382 |
static bool each_symbol_in_section(const struct symsearch *arr, unsigned int arrsize, struct module *owner, bool (*fn)(const struct symsearch *syms, struct module *owner, |
de4d8d534
|
383 |
void *data), |
dafd0940c
|
384 |
void *data) |
ad9546c99
|
385 |
{ |
de4d8d534
|
386 |
unsigned int j; |
ad9546c99
|
387 |
|
dafd0940c
|
388 |
for (j = 0; j < arrsize; j++) { |
de4d8d534
|
389 390 |
if (fn(&arr[j], owner, data)) return true; |
f71d20e96
|
391 |
} |
dafd0940c
|
392 393 |
return false; |
ad9546c99
|
394 |
} |
dafd0940c
|
395 |
/* Returns true as soon as fn returns true, otherwise false. */ |
de4d8d534
|
396 397 398 399 |
bool each_symbol_section(bool (*fn)(const struct symsearch *arr, struct module *owner, void *data), void *data) |
ad9546c99
|
400 401 |
{ struct module *mod; |
44032e631
|
402 |
static const struct symsearch arr[] = { |
ad9546c99
|
403 |
{ __start___ksymtab, __stop___ksymtab, __start___kcrctab, |
dafd0940c
|
404 |
NOT_GPL_ONLY, false }, |
ad9546c99
|
405 |
{ __start___ksymtab_gpl, __stop___ksymtab_gpl, |
dafd0940c
|
406 407 |
__start___kcrctab_gpl, GPL_ONLY, false }, |
ad9546c99
|
408 |
{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, |
dafd0940c
|
409 410 |
__start___kcrctab_gpl_future, WILL_BE_GPL_ONLY, false }, |
f7f5b6755
|
411 |
#ifdef CONFIG_UNUSED_SYMBOLS |
ad9546c99
|
412 |
{ __start___ksymtab_unused, __stop___ksymtab_unused, |
dafd0940c
|
413 414 |
__start___kcrctab_unused, NOT_GPL_ONLY, true }, |
ad9546c99
|
415 |
{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, |
dafd0940c
|
416 417 |
__start___kcrctab_unused_gpl, GPL_ONLY, true }, |
f7f5b6755
|
418 |
#endif |
ad9546c99
|
419 |
}; |
f71d20e96
|
420 |
|
0be964be0
|
421 |
module_assert_mutex_or_preempt(); |
dafd0940c
|
422 423 |
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) return true; |
f71d20e96
|
424 |
|
d72b37513
|
425 |
list_for_each_entry_rcu(mod, &modules, list) { |
ad9546c99
|
426 427 |
struct symsearch arr[] = { { mod->syms, mod->syms + mod->num_syms, mod->crcs, |
dafd0940c
|
428 |
NOT_GPL_ONLY, false }, |
ad9546c99
|
429 |
{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, |
dafd0940c
|
430 431 |
mod->gpl_crcs, GPL_ONLY, false }, |
ad9546c99
|
432 433 |
{ mod->gpl_future_syms, mod->gpl_future_syms + mod->num_gpl_future_syms, |
dafd0940c
|
434 435 |
mod->gpl_future_crcs, WILL_BE_GPL_ONLY, false }, |
f7f5b6755
|
436 |
#ifdef CONFIG_UNUSED_SYMBOLS |
ad9546c99
|
437 438 |
{ mod->unused_syms, mod->unused_syms + mod->num_unused_syms, |
dafd0940c
|
439 440 |
mod->unused_crcs, NOT_GPL_ONLY, true }, |
ad9546c99
|
441 442 |
{ mod->unused_gpl_syms, mod->unused_gpl_syms + mod->num_unused_gpl_syms, |
dafd0940c
|
443 444 |
mod->unused_gpl_crcs, GPL_ONLY, true }, |
f7f5b6755
|
445 |
#endif |
ad9546c99
|
446 |
}; |
0d21b0e34
|
447 448 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
dafd0940c
|
449 450 451 452 453 |
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) return true; } return false; } |
de4d8d534
|
454 |
EXPORT_SYMBOL_GPL(each_symbol_section); |
dafd0940c
|
455 456 457 458 459 460 461 462 463 |
struct find_symbol_arg { /* Input */ const char *name; bool gplok; bool warn; /* Output */ struct module *owner; |
71810db27
|
464 |
const s32 *crc; |
414fd31b2
|
465 |
const struct kernel_symbol *sym; |
dafd0940c
|
466 |
}; |
de4d8d534
|
467 468 469 |
static bool check_symbol(const struct symsearch *syms, struct module *owner, unsigned int symnum, void *data) |
dafd0940c
|
470 471 |
{ struct find_symbol_arg *fsa = data; |
dafd0940c
|
472 473 474 475 |
if (!fsa->gplok) { if (syms->licence == GPL_ONLY) return false; if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { |
bddb12b32
|
476 477 478 479 |
pr_warn("Symbol %s is being used by a non-GPL module, " "which will not be allowed in the future ", fsa->name); |
9f28bb7e1
|
480 |
} |
1da177e4c
|
481 |
} |
ad9546c99
|
482 |
|
f7f5b6755
|
483 |
#ifdef CONFIG_UNUSED_SYMBOLS |
dafd0940c
|
484 |
if (syms->unused && fsa->warn) { |
bddb12b32
|
485 486 487 488 489 |
pr_warn("Symbol %s is marked as UNUSED, however this module is " "using it. ", fsa->name); pr_warn("This symbol will go away in the future. "); |
7b63c3ab9
|
490 491 492 |
pr_warn("Please evaluate if this is the right api to use and " "if it really is, submit a report to the linux kernel " "mailing list together with submitting your code for " |
bddb12b32
|
493 494 |
"inclusion. "); |
dafd0940c
|
495 |
} |
f7f5b6755
|
496 |
#endif |
dafd0940c
|
497 498 499 |
fsa->owner = owner; fsa->crc = symversion(syms->crcs, symnum); |
414fd31b2
|
500 |
fsa->sym = &syms->start[symnum]; |
dafd0940c
|
501 502 |
return true; } |
7290d5809
|
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 |
static unsigned long kernel_symbol_value(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS return (unsigned long)offset_to_ptr(&sym->value_offset); #else return sym->value; #endif } static const char *kernel_symbol_name(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS return offset_to_ptr(&sym->name_offset); #else return sym->name; #endif } |
403ed2784
|
520 521 522 523 524 |
static int cmp_name(const void *va, const void *vb) { const char *a; const struct kernel_symbol *b; a = va; b = vb; |
7290d5809
|
525 |
return strcmp(a, kernel_symbol_name(b)); |
403ed2784
|
526 |
} |
de4d8d534
|
527 528 529 530 531 |
static bool find_symbol_in_section(const struct symsearch *syms, struct module *owner, void *data) { struct find_symbol_arg *fsa = data; |
403ed2784
|
532 533 534 535 536 537 538 |
struct kernel_symbol *sym; sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) return true; |
de4d8d534
|
539 |
|
de4d8d534
|
540 541 |
return false; } |
414fd31b2
|
542 |
/* Find a symbol and return it, along with, (optional) crc and |
75676500f
|
543 |
* (optional) module which owns it. Needs preempt disabled or module_mutex. */ |
c6b378019
|
544 545 |
const struct kernel_symbol *find_symbol(const char *name, struct module **owner, |
71810db27
|
546 |
const s32 **crc, |
c6b378019
|
547 548 |
bool gplok, bool warn) |
dafd0940c
|
549 550 551 552 553 554 |
{ struct find_symbol_arg fsa; fsa.name = name; fsa.gplok = gplok; fsa.warn = warn; |
de4d8d534
|
555 |
if (each_symbol_section(find_symbol_in_section, &fsa)) { |
dafd0940c
|
556 557 558 559 |
if (owner) *owner = fsa.owner; if (crc) *crc = fsa.crc; |
414fd31b2
|
560 |
return fsa.sym; |
dafd0940c
|
561 |
} |
5e1241692
|
562 563 |
pr_debug("Failed to find symbol %s ", name); |
414fd31b2
|
564 |
return NULL; |
1da177e4c
|
565 |
} |
c6b378019
|
566 |
EXPORT_SYMBOL_GPL(find_symbol); |
1da177e4c
|
567 |
|
fe0d34d24
|
568 569 570 571 |
/* * Search for module by name: must hold module_mutex (or preempt disabled * for read-only access). */ |
4f6de4d51
|
572 |
static struct module *find_module_all(const char *name, size_t len, |
0d21b0e34
|
573 |
bool even_unformed) |
1da177e4c
|
574 575 |
{ struct module *mod; |
fe0d34d24
|
576 |
module_assert_mutex_or_preempt(); |
0be964be0
|
577 |
|
93437353d
|
578 |
list_for_each_entry_rcu(mod, &modules, list) { |
0d21b0e34
|
579 580 |
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) continue; |
4f6de4d51
|
581 |
if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) |
1da177e4c
|
582 583 584 585 |
return mod; } return NULL; } |
0d21b0e34
|
586 587 588 |
struct module *find_module(const char *name) { |
fe0d34d24
|
589 |
module_assert_mutex(); |
4f6de4d51
|
590 |
return find_module_all(name, strlen(name), false); |
0d21b0e34
|
591 |
} |
c6b378019
|
592 |
EXPORT_SYMBOL_GPL(find_module); |
1da177e4c
|
593 594 |
#ifdef CONFIG_SMP |
fbf59bc9d
|
595 |
|
259354dea
|
596 |
static inline void __percpu *mod_percpu(struct module *mod) |
fbf59bc9d
|
597 |
{ |
259354dea
|
598 599 |
return mod->percpu; } |
fbf59bc9d
|
600 |
|
9eb76d779
|
601 |
static int percpu_modalloc(struct module *mod, struct load_info *info) |
259354dea
|
602 |
{ |
9eb76d779
|
603 604 605 606 607 |
Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; unsigned long align = pcpusec->sh_addralign; if (!pcpusec->sh_size) return 0; |
fbf59bc9d
|
608 |
if (align > PAGE_SIZE) { |
bddb12b32
|
609 610 611 |
pr_warn("%s: per-cpu alignment %li > %li ", mod->name, align, PAGE_SIZE); |
fbf59bc9d
|
612 613 |
align = PAGE_SIZE; } |
9eb76d779
|
614 |
mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); |
259354dea
|
615 |
if (!mod->percpu) { |
bddb12b32
|
616 617 618 |
pr_warn("%s: Could not allocate %lu bytes percpu data ", mod->name, (unsigned long)pcpusec->sh_size); |
259354dea
|
619 620 |
return -ENOMEM; } |
9eb76d779
|
621 |
mod->percpu_size = pcpusec->sh_size; |
259354dea
|
622 |
return 0; |
fbf59bc9d
|
623 |
} |
259354dea
|
624 |
static void percpu_modfree(struct module *mod) |
fbf59bc9d
|
625 |
{ |
259354dea
|
626 |
free_percpu(mod->percpu); |
fbf59bc9d
|
627 |
} |
49668688d
|
628 |
static unsigned int find_pcpusec(struct load_info *info) |
6b588c18f
|
629 |
{ |
49668688d
|
630 |
return find_sec(info, ".data..percpu"); |
6b588c18f
|
631 |
} |
259354dea
|
632 633 |
static void percpu_modcopy(struct module *mod, const void *from, unsigned long size) |
6b588c18f
|
634 635 636 637 |
{ int cpu; for_each_possible_cpu(cpu) |
259354dea
|
638 |
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); |
6b588c18f
|
639 |
} |
383776fa7
|
640 |
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) |
10fad5e46
|
641 642 643 644 645 646 647 |
{ struct module *mod; unsigned int cpu; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { |
0d21b0e34
|
648 649 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
10fad5e46
|
650 651 652 653 |
if (!mod->percpu_size) continue; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(mod->percpu, cpu); |
383776fa7
|
654 |
void *va = (void *)addr; |
10fad5e46
|
655 |
|
383776fa7
|
656 |
if (va >= start && va < start + mod->percpu_size) { |
8ce371f98
|
657 |
if (can_addr) { |
383776fa7
|
658 |
*can_addr = (unsigned long) (va - start); |
8ce371f98
|
659 660 661 662 |
*can_addr += (unsigned long) per_cpu_ptr(mod->percpu, get_boot_cpu_id()); } |
10fad5e46
|
663 664 665 666 667 668 669 670 |
preempt_enable(); return true; } } } preempt_enable(); return false; |
6b588c18f
|
671 |
} |
383776fa7
|
672 673 674 675 676 677 678 679 680 681 682 683 684 |
/** * is_module_percpu_address - test whether address is from module static percpu * @addr: address to test * * Test whether @addr belongs to module static percpu area. * * RETURNS: * %true if @addr is from module static percpu area */ bool is_module_percpu_address(unsigned long addr) { return __is_module_percpu_address(addr, NULL); } |
1da177e4c
|
685 |
#else /* ... !CONFIG_SMP */ |
6b588c18f
|
686 |
|
259354dea
|
687 |
static inline void __percpu *mod_percpu(struct module *mod) |
1da177e4c
|
688 689 690 |
{ return NULL; } |
9eb76d779
|
691 |
static int percpu_modalloc(struct module *mod, struct load_info *info) |
259354dea
|
692 |
{ |
9eb76d779
|
693 694 695 696 |
/* UP modules shouldn't have this section: ENOMEM isn't quite right */ if (info->sechdrs[info->index.pcpu].sh_size != 0) return -ENOMEM; return 0; |
259354dea
|
697 698 |
} static inline void percpu_modfree(struct module *mod) |
1da177e4c
|
699 |
{ |
1da177e4c
|
700 |
} |
49668688d
|
701 |
static unsigned int find_pcpusec(struct load_info *info) |
1da177e4c
|
702 703 704 |
{ return 0; } |
259354dea
|
705 706 |
static inline void percpu_modcopy(struct module *mod, const void *from, unsigned long size) |
1da177e4c
|
707 708 709 710 |
{ /* pcpusec should be 0, and size of that section should be 0. */ BUG_ON(size != 0); } |
10fad5e46
|
711 712 713 714 |
bool is_module_percpu_address(unsigned long addr) { return false; } |
6b588c18f
|
715 |
|
383776fa7
|
716 717 718 719 |
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) { return false; } |
1da177e4c
|
720 |
#endif /* CONFIG_SMP */ |
c988d2b28
|
721 722 723 724 725 726 |
#define MODINFO_ATTR(field) \ static void setup_modinfo_##field(struct module *mod, const char *s) \ { \ mod->field = kstrdup(s, GFP_KERNEL); \ } \ static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ |
4befb026c
|
727 |
struct module_kobject *mk, char *buffer) \ |
c988d2b28
|
728 |
{ \ |
cc56ded3f
|
729 730 |
return scnprintf(buffer, PAGE_SIZE, "%s ", mk->mod->field); \ |
c988d2b28
|
731 732 733 734 735 736 737 |
} \ static int modinfo_##field##_exists(struct module *mod) \ { \ return mod->field != NULL; \ } \ static void free_modinfo_##field(struct module *mod) \ { \ |
22a8bdeb5
|
738 739 |
kfree(mod->field); \ mod->field = NULL; \ |
c988d2b28
|
740 741 |
} \ static struct module_attribute modinfo_##field = { \ |
7b595756e
|
742 |
.attr = { .name = __stringify(field), .mode = 0444 }, \ |
c988d2b28
|
743 744 745 746 747 748 749 750 |
.show = show_modinfo_##field, \ .setup = setup_modinfo_##field, \ .test = modinfo_##field##_exists, \ .free = free_modinfo_##field, \ }; MODINFO_ATTR(version); MODINFO_ATTR(srcversion); |
e14af7eeb
|
751 |
static char last_unloaded_module[MODULE_NAME_LEN+1]; |
03e88ae1b
|
752 |
#ifdef CONFIG_MODULE_UNLOAD |
eb0c53771
|
753 754 |
EXPORT_TRACEPOINT_SYMBOL(module_get); |
e513cc1c0
|
755 756 |
/* MODULE_REF_BASE is the base reference count by kmodule loader. */ #define MODULE_REF_BASE 1 |
1da177e4c
|
757 |
/* Init the unload section of the module. */ |
9f85a4bbb
|
758 |
static int module_unload_init(struct module *mod) |
1da177e4c
|
759 |
{ |
e513cc1c0
|
760 761 762 763 764 |
/* * Initialize reference counter to MODULE_REF_BASE. * refcnt == 0 means module is going. */ atomic_set(&mod->refcnt, MODULE_REF_BASE); |
9f85a4bbb
|
765 |
|
2c02dfe7f
|
766 767 |
INIT_LIST_HEAD(&mod->source_list); INIT_LIST_HEAD(&mod->target_list); |
e1783a240
|
768 |
|
1da177e4c
|
769 |
/* Hold reference count during initialization. */ |
e513cc1c0
|
770 |
atomic_inc(&mod->refcnt); |
9f85a4bbb
|
771 772 |
return 0; |
1da177e4c
|
773 |
} |
1da177e4c
|
774 775 776 777 |
/* Does a already use b? */ static int already_uses(struct module *a, struct module *b) { struct module_use *use; |
2c02dfe7f
|
778 779 |
list_for_each_entry(use, &b->source_list, source_list) { if (use->source == a) { |
5e1241692
|
780 781 |
pr_debug("%s uses %s! ", a->name, b->name); |
1da177e4c
|
782 783 784 |
return 1; } } |
5e1241692
|
785 786 |
pr_debug("%s does not use %s! ", a->name, b->name); |
1da177e4c
|
787 788 |
return 0; } |
2c02dfe7f
|
789 790 791 792 793 794 795 796 797 |
/* * Module a uses b * - we add 'a' as a "source", 'b' as a "target" of module use * - the module_use is added to the list of 'b' sources (so * 'b' can walk the list to see who sourced them), and of 'a' * targets (so 'a' can see what modules it targets). */ static int add_module_usage(struct module *a, struct module *b) { |
2c02dfe7f
|
798 |
struct module_use *use; |
5e1241692
|
799 800 |
pr_debug("Allocating new usage for %s. ", a->name); |
2c02dfe7f
|
801 |
use = kmalloc(sizeof(*use), GFP_ATOMIC); |
9ad045742
|
802 |
if (!use) |
2c02dfe7f
|
803 |
return -ENOMEM; |
2c02dfe7f
|
804 805 806 807 808 |
use->source = a; use->target = b; list_add(&use->source_list, &b->source_list); list_add(&use->target_list, &a->target_list); |
2c02dfe7f
|
809 810 |
return 0; } |
75676500f
|
811 |
/* Module a uses b: caller needs module_mutex() */ |
9bea7f239
|
812 |
int ref_module(struct module *a, struct module *b) |
1da177e4c
|
813 |
{ |
c8e21ced0
|
814 |
int err; |
270a6c4ca
|
815 |
|
9bea7f239
|
816 |
if (b == NULL || already_uses(a, b)) |
218ce7351
|
817 |
return 0; |
218ce7351
|
818 |
|
9bea7f239
|
819 820 |
/* If module isn't available, we fail. */ err = strong_try_module_get(b); |
c9a3ba55b
|
821 |
if (err) |
9bea7f239
|
822 |
return err; |
1da177e4c
|
823 |
|
2c02dfe7f
|
824 825 |
err = add_module_usage(a, b); if (err) { |
1da177e4c
|
826 |
module_put(b); |
9bea7f239
|
827 |
return err; |
1da177e4c
|
828 |
} |
9bea7f239
|
829 |
return 0; |
1da177e4c
|
830 |
} |
9bea7f239
|
831 |
EXPORT_SYMBOL_GPL(ref_module); |
1da177e4c
|
832 833 834 835 |
/* Clear the unload stuff of the module. */ static void module_unload_free(struct module *mod) { |
2c02dfe7f
|
836 |
struct module_use *use, *tmp; |
1da177e4c
|
837 |
|
75676500f
|
838 |
mutex_lock(&module_mutex); |
2c02dfe7f
|
839 840 |
list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { struct module *i = use->target; |
5e1241692
|
841 842 |
pr_debug("%s unusing %s ", mod->name, i->name); |
2c02dfe7f
|
843 844 845 846 |
module_put(i); list_del(&use->source_list); list_del(&use->target_list); kfree(use); |
1da177e4c
|
847 |
} |
75676500f
|
848 |
mutex_unlock(&module_mutex); |
1da177e4c
|
849 850 851 |
} #ifdef CONFIG_MODULE_FORCE_UNLOAD |
fb1697933
|
852 |
static inline int try_force_unload(unsigned int flags) |
1da177e4c
|
853 854 855 |
{ int ret = (flags & O_TRUNC); if (ret) |
373d4d099
|
856 |
add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); |
1da177e4c
|
857 858 859 |
return ret; } #else |
fb1697933
|
860 |
static inline int try_force_unload(unsigned int flags) |
1da177e4c
|
861 862 863 864 |
{ return 0; } #endif /* CONFIG_MODULE_FORCE_UNLOAD */ |
e513cc1c0
|
865 866 |
/* Try to release refcount of module, 0 means success. */ static int try_release_module_ref(struct module *mod) |
1da177e4c
|
867 |
{ |
e513cc1c0
|
868 |
int ret; |
1da177e4c
|
869 |
|
e513cc1c0
|
870 871 872 873 874 875 |
/* Try to decrement refcnt which we set at loading */ ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); BUG_ON(ret < 0); if (ret) /* Someone can put this right now, recover with checking */ ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); |
1da177e4c
|
876 |
|
e513cc1c0
|
877 878 |
return ret; } |
1da177e4c
|
879 |
|
e513cc1c0
|
880 881 |
static int try_stop_module(struct module *mod, int flags, int *forced) { |
da39ba5e1
|
882 |
/* If it's not unused, quit unless we're forcing. */ |
e513cc1c0
|
883 884 885 |
if (try_release_module_ref(mod) != 0) { *forced = try_force_unload(flags); if (!(*forced)) |
1da177e4c
|
886 887 888 889 |
return -EWOULDBLOCK; } /* Mark it as dying. */ |
e513cc1c0
|
890 |
mod->state = MODULE_STATE_GOING; |
1da177e4c
|
891 |
|
e513cc1c0
|
892 |
return 0; |
1da177e4c
|
893 |
} |
d5db139ab
|
894 895 896 897 898 899 900 901 902 903 |
/** * module_refcount - return the refcount or -1 if unloading * * @mod: the module we're checking * * Returns: * -1 if the module is in the process of unloading * otherwise the number of references in the kernel to the module */ int module_refcount(struct module *mod) |
1da177e4c
|
904 |
{ |
d5db139ab
|
905 |
return atomic_read(&mod->refcnt) - MODULE_REF_BASE; |
1da177e4c
|
906 907 908 909 910 |
} EXPORT_SYMBOL(module_refcount); /* This exists whether we can unload or not */ static void free_module(struct module *mod); |
17da2bd90
|
911 912 |
SYSCALL_DEFINE2(delete_module, const char __user *, name_user, unsigned int, flags) |
1da177e4c
|
913 914 |
{ struct module *mod; |
dfff0a067
|
915 |
char name[MODULE_NAME_LEN]; |
1da177e4c
|
916 |
int ret, forced = 0; |
3d43321b7
|
917 |
if (!capable(CAP_SYS_MODULE) || modules_disabled) |
dfff0a067
|
918 919 920 921 922 |
return -EPERM; if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) return -EFAULT; name[MODULE_NAME_LEN-1] = '\0'; |
f6276ac95
|
923 |
audit_log_kern_module(name); |
3fc1f1e27
|
924 925 |
if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR; |
1da177e4c
|
926 927 928 929 930 931 |
mod = find_module(name); if (!mod) { ret = -ENOENT; goto out; } |
2c02dfe7f
|
932 |
if (!list_empty(&mod->source_list)) { |
1da177e4c
|
933 934 935 936 937 938 939 |
/* Other modules depend on us: get rid of them first. */ ret = -EWOULDBLOCK; goto out; } /* Doing init or already dying? */ if (mod->state != MODULE_STATE_LIVE) { |
3f2b9c9cd
|
940 |
/* FIXME: if (force), slam module count damn the torpedoes */ |
5e1241692
|
941 942 |
pr_debug("%s already dying ", mod->name); |
1da177e4c
|
943 944 945 946 947 |
ret = -EBUSY; goto out; } /* If it has an init func, it must have an exit func to unload */ |
af49d9248
|
948 |
if (mod->init && !mod->exit) { |
fb1697933
|
949 |
forced = try_force_unload(flags); |
1da177e4c
|
950 951 952 953 954 955 |
if (!forced) { /* This module can't be removed */ ret = -EBUSY; goto out; } } |
1da177e4c
|
956 957 958 959 |
/* Stop the machine so refcounts can't move and disable module. */ ret = try_stop_module(mod, flags, &forced); if (ret != 0) goto out; |
df4b565e1
|
960 |
mutex_unlock(&module_mutex); |
25985edce
|
961 |
/* Final destruction now no one is using it. */ |
df4b565e1
|
962 |
if (mod->exit != NULL) |
1da177e4c
|
963 |
mod->exit(); |
df4b565e1
|
964 965 |
blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); |
7e545d6ec
|
966 |
klp_module_going(mod); |
7dcd182be
|
967 |
ftrace_release_mod(mod); |
22a9d6456
|
968 |
async_synchronize_full(); |
75676500f
|
969 |
|
e14af7eeb
|
970 |
/* Store the name of the last unloaded module for diagnostic purposes */ |
efa5345e3
|
971 |
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); |
1da177e4c
|
972 |
|
75676500f
|
973 974 975 |
free_module(mod); return 0; out: |
6389a3851
|
976 |
mutex_unlock(&module_mutex); |
1da177e4c
|
977 978 |
return ret; } |
d1e99d7ae
|
979 |
static inline void print_unload_info(struct seq_file *m, struct module *mod) |
1da177e4c
|
980 981 982 |
{ struct module_use *use; int printed_something = 0; |
d5db139ab
|
983 |
seq_printf(m, " %i ", module_refcount(mod)); |
1da177e4c
|
984 |
|
6da0b5651
|
985 986 987 988 |
/* * Always include a trailing , so userspace can differentiate * between this and the old multi-field proc format. */ |
2c02dfe7f
|
989 |
list_for_each_entry(use, &mod->source_list, source_list) { |
1da177e4c
|
990 |
printed_something = 1; |
2c02dfe7f
|
991 |
seq_printf(m, "%s,", use->source->name); |
1da177e4c
|
992 |
} |
1da177e4c
|
993 994 |
if (mod->init != NULL && mod->exit == NULL) { printed_something = 1; |
6da0b5651
|
995 |
seq_puts(m, "[permanent],"); |
1da177e4c
|
996 997 998 |
} if (!printed_something) |
6da0b5651
|
999 |
seq_puts(m, "-"); |
1da177e4c
|
1000 1001 1002 1003 1004 |
} void __symbol_put(const char *symbol) { struct module *owner; |
1da177e4c
|
1005 |
|
24da1cbff
|
1006 |
preempt_disable(); |
414fd31b2
|
1007 |
if (!find_symbol(symbol, &owner, NULL, true, false)) |
1da177e4c
|
1008 1009 |
BUG(); module_put(owner); |
24da1cbff
|
1010 |
preempt_enable(); |
1da177e4c
|
1011 1012 |
} EXPORT_SYMBOL(__symbol_put); |
7d1d16e41
|
1013 |
/* Note this assumes addr is a function, which it currently always is. */ |
1da177e4c
|
1014 1015 |
void symbol_put_addr(void *addr) { |
5e3766138
|
1016 |
struct module *modaddr; |
7d1d16e41
|
1017 |
unsigned long a = (unsigned long)dereference_function_descriptor(addr); |
1da177e4c
|
1018 |
|
7d1d16e41
|
1019 |
if (core_kernel_text(a)) |
5e3766138
|
1020 |
return; |
1da177e4c
|
1021 |
|
275d7d44d
|
1022 1023 1024 1025 1026 |
/* * Even though we hold a reference on the module; we still need to * disable preemption in order to safely traverse the data structure. */ preempt_disable(); |
7d1d16e41
|
1027 |
modaddr = __module_text_address(a); |
a6e6abd57
|
1028 |
BUG_ON(!modaddr); |
5e3766138
|
1029 |
module_put(modaddr); |
275d7d44d
|
1030 |
preempt_enable(); |
1da177e4c
|
1031 1032 1033 1034 |
} EXPORT_SYMBOL_GPL(symbol_put_addr); static ssize_t show_refcnt(struct module_attribute *mattr, |
4befb026c
|
1035 |
struct module_kobject *mk, char *buffer) |
1da177e4c
|
1036 |
{ |
d5db139ab
|
1037 1038 |
return sprintf(buffer, "%i ", module_refcount(mk->mod)); |
1da177e4c
|
1039 |
} |
cca3e7073
|
1040 1041 |
static struct module_attribute modinfo_refcnt = __ATTR(refcnt, 0444, show_refcnt, NULL); |
1da177e4c
|
1042 |
|
d53799be6
|
1043 1044 1045 1046 |
void __module_get(struct module *module) { if (module) { preempt_disable(); |
2f35c41f5
|
1047 |
atomic_inc(&module->refcnt); |
d53799be6
|
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 |
trace_module_get(module, _RET_IP_); preempt_enable(); } } EXPORT_SYMBOL(__module_get); bool try_module_get(struct module *module) { bool ret = true; if (module) { preempt_disable(); |
e513cc1c0
|
1060 1061 1062 |
/* Note: here, we can fail to get a reference */ if (likely(module_is_live(module) && atomic_inc_not_zero(&module->refcnt) != 0)) |
d53799be6
|
1063 |
trace_module_get(module, _RET_IP_); |
e513cc1c0
|
1064 |
else |
d53799be6
|
1065 1066 1067 1068 1069 1070 1071 |
ret = false; preempt_enable(); } return ret; } EXPORT_SYMBOL(try_module_get); |
f6a570333
|
1072 1073 |
void module_put(struct module *module) { |
e513cc1c0
|
1074 |
int ret; |
f6a570333
|
1075 |
if (module) { |
e1783a240
|
1076 |
preempt_disable(); |
e513cc1c0
|
1077 1078 |
ret = atomic_dec_if_positive(&module->refcnt); WARN_ON(ret < 0); /* Failed to put refcount */ |
ae832d1e0
|
1079 |
trace_module_put(module, _RET_IP_); |
e1783a240
|
1080 |
preempt_enable(); |
f6a570333
|
1081 1082 1083 |
} } EXPORT_SYMBOL(module_put); |
1da177e4c
|
1084 |
#else /* !CONFIG_MODULE_UNLOAD */ |
d1e99d7ae
|
1085 |
static inline void print_unload_info(struct seq_file *m, struct module *mod) |
1da177e4c
|
1086 1087 |
{ /* We don't know the usage count, or what modules are using. */ |
6da0b5651
|
1088 |
seq_puts(m, " - -"); |
1da177e4c
|
1089 1090 1091 1092 1093 |
} static inline void module_unload_free(struct module *mod) { } |
9bea7f239
|
1094 |
int ref_module(struct module *a, struct module *b) |
1da177e4c
|
1095 |
{ |
9bea7f239
|
1096 |
return strong_try_module_get(b); |
1da177e4c
|
1097 |
} |
9bea7f239
|
1098 |
EXPORT_SYMBOL_GPL(ref_module); |
1da177e4c
|
1099 |
|
9f85a4bbb
|
1100 |
static inline int module_unload_init(struct module *mod) |
1da177e4c
|
1101 |
{ |
9f85a4bbb
|
1102 |
return 0; |
1da177e4c
|
1103 1104 |
} #endif /* CONFIG_MODULE_UNLOAD */ |
53999bf34
|
1105 1106 1107 |
static size_t module_flags_taint(struct module *mod, char *buf) { size_t l = 0; |
7fd8329ba
|
1108 1109 1110 1111 |
int i; for (i = 0; i < TAINT_FLAGS_COUNT; i++) { if (taint_flags[i].module && test_bit(i, &mod->taints)) |
5eb7c0d04
|
1112 |
buf[l++] = taint_flags[i].c_true; |
7fd8329ba
|
1113 |
} |
53999bf34
|
1114 |
|
53999bf34
|
1115 1116 |
return l; } |
1f71740ab
|
1117 |
static ssize_t show_initstate(struct module_attribute *mattr, |
4befb026c
|
1118 |
struct module_kobject *mk, char *buffer) |
1f71740ab
|
1119 1120 |
{ const char *state = "unknown"; |
4befb026c
|
1121 |
switch (mk->mod->state) { |
1f71740ab
|
1122 1123 1124 1125 1126 1127 1128 1129 1130 |
case MODULE_STATE_LIVE: state = "live"; break; case MODULE_STATE_COMING: state = "coming"; break; case MODULE_STATE_GOING: state = "going"; break; |
0d21b0e34
|
1131 1132 |
default: BUG(); |
1f71740ab
|
1133 1134 1135 1136 |
} return sprintf(buffer, "%s ", state); } |
cca3e7073
|
1137 1138 |
static struct module_attribute modinfo_initstate = __ATTR(initstate, 0444, show_initstate, NULL); |
1f71740ab
|
1139 |
|
88bfa3247
|
1140 1141 1142 1143 |
static ssize_t store_uevent(struct module_attribute *mattr, struct module_kobject *mk, const char *buffer, size_t count) { |
f7debeebc
|
1144 1145 1146 1147 |
int rc; rc = kobject_synth_uevent(&mk->kobj, buffer, count); return rc ? rc : count; |
88bfa3247
|
1148 |
} |
cca3e7073
|
1149 1150 1151 1152 1153 1154 |
struct module_attribute module_uevent = __ATTR(uevent, 0200, NULL, store_uevent); static ssize_t show_coresize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { |
7523e4dc5
|
1155 1156 |
return sprintf(buffer, "%u ", mk->mod->core_layout.size); |
cca3e7073
|
1157 1158 1159 1160 1161 1162 1163 1164 |
} static struct module_attribute modinfo_coresize = __ATTR(coresize, 0444, show_coresize, NULL); static ssize_t show_initsize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { |
7523e4dc5
|
1165 1166 |
return sprintf(buffer, "%u ", mk->mod->init_layout.size); |
cca3e7073
|
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 |
} static struct module_attribute modinfo_initsize = __ATTR(initsize, 0444, show_initsize, NULL); static ssize_t show_taint(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { size_t l; l = module_flags_taint(mk->mod, buffer); buffer[l++] = ' '; return l; } static struct module_attribute modinfo_taint = __ATTR(taint, 0444, show_taint, NULL); |
88bfa3247
|
1185 |
|
03e88ae1b
|
1186 |
static struct module_attribute *modinfo_attrs[] = { |
cca3e7073
|
1187 |
&module_uevent, |
03e88ae1b
|
1188 1189 |
&modinfo_version, &modinfo_srcversion, |
cca3e7073
|
1190 1191 1192 1193 |
&modinfo_initstate, &modinfo_coresize, &modinfo_initsize, &modinfo_taint, |
03e88ae1b
|
1194 |
#ifdef CONFIG_MODULE_UNLOAD |
cca3e7073
|
1195 |
&modinfo_refcnt, |
03e88ae1b
|
1196 1197 1198 |
#endif NULL, }; |
1da177e4c
|
1199 |
static const char vermagic[] = VERMAGIC_STRING; |
c6e665c8f
|
1200 |
static int try_to_force_load(struct module *mod, const char *reason) |
826e4506a
|
1201 1202 |
{ #ifdef CONFIG_MODULE_FORCE_LOAD |
25ddbb18a
|
1203 |
if (!test_taint(TAINT_FORCED_MODULE)) |
bddb12b32
|
1204 1205 |
pr_warn("%s: %s: kernel tainted. ", mod->name, reason); |
373d4d099
|
1206 |
add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); |
826e4506a
|
1207 1208 1209 1210 1211 |
return 0; #else return -ENOEXEC; #endif } |
1da177e4c
|
1212 |
#ifdef CONFIG_MODVERSIONS |
71810db27
|
1213 1214 |
static u32 resolve_rel_crc(const s32 *crc) |
d4703aefd
|
1215 |
{ |
71810db27
|
1216 |
return *(u32 *)((void *)crc + *crc); |
d4703aefd
|
1217 |
} |
490194269
|
1218 |
static int check_version(const struct load_info *info, |
1da177e4c
|
1219 |
const char *symname, |
6da0b5651
|
1220 |
struct module *mod, |
71810db27
|
1221 |
const s32 *crc) |
1da177e4c
|
1222 |
{ |
490194269
|
1223 1224 |
Elf_Shdr *sechdrs = info->sechdrs; unsigned int versindex = info->index.vers; |
1da177e4c
|
1225 1226 1227 1228 1229 1230 |
unsigned int i, num_versions; struct modversion_info *versions; /* Exporting module didn't supply crcs? OK, we're already tainted. */ if (!crc) return 1; |
a5dd69707
|
1231 1232 1233 |
/* No versions at all? modprobe --force does this. */ if (versindex == 0) return try_to_force_load(mod, symname) == 0; |
1da177e4c
|
1234 1235 1236 1237 1238 |
versions = (void *) sechdrs[versindex].sh_addr; num_versions = sechdrs[versindex].sh_size / sizeof(struct modversion_info); for (i = 0; i < num_versions; i++) { |
71810db27
|
1239 |
u32 crcval; |
1da177e4c
|
1240 1241 |
if (strcmp(versions[i].name, symname) != 0) continue; |
71810db27
|
1242 1243 1244 1245 1246 |
if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) crcval = resolve_rel_crc(crc); else crcval = *crc; if (versions[i].crc == crcval) |
1da177e4c
|
1247 |
return 1; |
71810db27
|
1248 1249 1250 |
pr_debug("Found checksum %X vs module %lX ", crcval, versions[i].crc); |
826e4506a
|
1251 |
goto bad_version; |
1da177e4c
|
1252 |
} |
826e4506a
|
1253 |
|
faaae2a58
|
1254 |
/* Broken toolchain. Warn once, then let it go.. */ |
3e2e857f9
|
1255 1256 |
pr_warn_once("%s: no symbol version for %s ", info->name, symname); |
faaae2a58
|
1257 |
return 1; |
826e4506a
|
1258 1259 |
bad_version: |
6da0b5651
|
1260 1261 |
pr_warn("%s: disagrees about version of symbol %s ", |
3e2e857f9
|
1262 |
info->name, symname); |
826e4506a
|
1263 |
return 0; |
1da177e4c
|
1264 |
} |
490194269
|
1265 |
static inline int check_modstruct_version(const struct load_info *info, |
1da177e4c
|
1266 1267 |
struct module *mod) { |
71810db27
|
1268 |
const s32 *crc; |
1da177e4c
|
1269 |
|
926a59b1d
|
1270 1271 1272 1273 1274 |
/* * Since this should be found in kernel (which can't be removed), no * locking is necessary -- use preempt_disable() to placate lockdep. */ preempt_disable(); |
996302c5e
|
1275 |
if (!find_symbol("module_layout", NULL, &crc, true, false)) { |
926a59b1d
|
1276 |
preempt_enable(); |
1da177e4c
|
1277 |
BUG(); |
926a59b1d
|
1278 1279 |
} preempt_enable(); |
996302c5e
|
1280 |
return check_version(info, "module_layout", mod, crc); |
1da177e4c
|
1281 |
} |
91e37a793
|
1282 1283 1284 |
/* First part is kernel version, which we ignore if module has crcs. */ static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) |
1da177e4c
|
1285 |
{ |
91e37a793
|
1286 1287 1288 1289 |
if (has_crcs) { amagic += strcspn(amagic, " "); bmagic += strcspn(bmagic, " "); } |
1da177e4c
|
1290 1291 1292 |
return strcmp(amagic, bmagic) == 0; } #else |
490194269
|
1293 |
static inline int check_version(const struct load_info *info, |
1da177e4c
|
1294 |
const char *symname, |
6da0b5651
|
1295 |
struct module *mod, |
71810db27
|
1296 |
const s32 *crc) |
1da177e4c
|
1297 1298 1299 |
{ return 1; } |
490194269
|
1300 |
static inline int check_modstruct_version(const struct load_info *info, |
1da177e4c
|
1301 1302 1303 1304 |
struct module *mod) { return 1; } |
91e37a793
|
1305 1306 |
static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) |
1da177e4c
|
1307 1308 1309 1310 |
{ return strcmp(amagic, bmagic) == 0; } #endif /* CONFIG_MODVERSIONS */ |
75676500f
|
1311 |
/* Resolve a symbol for this module. I.e. if we find one, record usage. */ |
49668688d
|
1312 1313 |
static const struct kernel_symbol *resolve_symbol(struct module *mod, const struct load_info *info, |
414fd31b2
|
1314 |
const char *name, |
9bea7f239
|
1315 |
char ownername[]) |
1da177e4c
|
1316 1317 |
{ struct module *owner; |
414fd31b2
|
1318 |
const struct kernel_symbol *sym; |
71810db27
|
1319 |
const s32 *crc; |
9bea7f239
|
1320 |
int err; |
1da177e4c
|
1321 |
|
d64810f56
|
1322 1323 1324 1325 1326 1327 |
/* * The module_mutex should not be a heavily contended lock; * if we get the occasional sleep here, we'll go an extra iteration * in the wait_event_interruptible(), which is harmless. */ sched_annotate_sleep(); |
75676500f
|
1328 |
mutex_lock(&module_mutex); |
414fd31b2
|
1329 |
sym = find_symbol(name, &owner, &crc, |
25ddbb18a
|
1330 |
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); |
9bea7f239
|
1331 1332 |
if (!sym) goto unlock; |
490194269
|
1333 |
if (!check_version(info, name, mod, crc)) { |
9bea7f239
|
1334 1335 |
sym = ERR_PTR(-EINVAL); goto getname; |
1da177e4c
|
1336 |
} |
9bea7f239
|
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 |
err = ref_module(mod, owner); if (err) { sym = ERR_PTR(err); goto getname; } getname: /* We must make copy under the lock if we failed to get ref. */ strncpy(ownername, module_name(owner), MODULE_NAME_LEN); unlock: |
75676500f
|
1348 |
mutex_unlock(&module_mutex); |
218ce7351
|
1349 |
return sym; |
1da177e4c
|
1350 |
} |
49668688d
|
1351 1352 1353 1354 |
static const struct kernel_symbol * resolve_symbol_wait(struct module *mod, const struct load_info *info, const char *name) |
9bea7f239
|
1355 1356 |
{ const struct kernel_symbol *ksym; |
49668688d
|
1357 |
char owner[MODULE_NAME_LEN]; |
9bea7f239
|
1358 1359 |
if (wait_event_interruptible_timeout(module_wq, |
49668688d
|
1360 1361 |
!IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) || PTR_ERR(ksym) != -EBUSY, |
9bea7f239
|
1362 |
30 * HZ) <= 0) { |
bddb12b32
|
1363 1364 1365 |
pr_warn("%s: gave up waiting for init of module %s. ", mod->name, owner); |
9bea7f239
|
1366 1367 1368 |
} return ksym; } |
1da177e4c
|
1369 1370 1371 1372 |
/* * /sys/module/foo/sections stuff * J. Corbet <corbet@lwn.net> */ |
8f6d03781
|
1373 |
#ifdef CONFIG_SYSFS |
10b465aaf
|
1374 |
|
8f6d03781
|
1375 |
#ifdef CONFIG_KALLSYMS |
10b465aaf
|
1376 1377 1378 1379 |
static inline bool sect_empty(const Elf_Shdr *sect) { return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; } |
6da0b5651
|
1380 |
struct module_sect_attr { |
a58730c42
|
1381 1382 1383 1384 |
struct module_attribute mattr; char *name; unsigned long address; }; |
6da0b5651
|
1385 |
struct module_sect_attrs { |
a58730c42
|
1386 1387 1388 1389 |
struct attribute_group grp; unsigned int nsections; struct module_sect_attr attrs[0]; }; |
1da177e4c
|
1390 |
static ssize_t module_sect_show(struct module_attribute *mattr, |
4befb026c
|
1391 |
struct module_kobject *mk, char *buf) |
1da177e4c
|
1392 1393 1394 |
{ struct module_sect_attr *sattr = container_of(mattr, struct module_sect_attr, mattr); |
be71eda53
|
1395 1396 1397 |
return sprintf(buf, "0x%px ", kptr_restrict < 2 ? (void *)sattr->address : NULL); |
1da177e4c
|
1398 |
} |
04b1db9fd
|
1399 1400 |
static void free_sect_attrs(struct module_sect_attrs *sect_attrs) { |
a58730c42
|
1401 |
unsigned int section; |
04b1db9fd
|
1402 1403 1404 1405 1406 |
for (section = 0; section < sect_attrs->nsections; section++) kfree(sect_attrs->attrs[section].name); kfree(sect_attrs); } |
8f6d03781
|
1407 |
static void add_sect_attrs(struct module *mod, const struct load_info *info) |
1da177e4c
|
1408 1409 1410 1411 1412 |
{ unsigned int nloaded = 0, i, size[2]; struct module_sect_attrs *sect_attrs; struct module_sect_attr *sattr; struct attribute **gattr; |
22a8bdeb5
|
1413 |
|
1da177e4c
|
1414 |
/* Count loaded sections and allocate structures */ |
8f6d03781
|
1415 1416 |
for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i])) |
1da177e4c
|
1417 1418 1419 1420 1421 |
nloaded++; size[0] = ALIGN(sizeof(*sect_attrs) + nloaded * sizeof(sect_attrs->attrs[0]), sizeof(sect_attrs->grp.attrs[0])); size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); |
04b1db9fd
|
1422 1423 |
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); if (sect_attrs == NULL) |
1da177e4c
|
1424 1425 1426 1427 1428 |
return; /* Setup section attributes. */ sect_attrs->grp.name = "sections"; sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; |
04b1db9fd
|
1429 |
sect_attrs->nsections = 0; |
1da177e4c
|
1430 1431 |
sattr = §_attrs->attrs[0]; gattr = §_attrs->grp.attrs[0]; |
8f6d03781
|
1432 1433 1434 |
for (i = 0; i < info->hdr->e_shnum; i++) { Elf_Shdr *sec = &info->sechdrs[i]; if (sect_empty(sec)) |
35dead423
|
1435 |
continue; |
8f6d03781
|
1436 1437 |
sattr->address = sec->sh_addr; sattr->name = kstrdup(info->secstrings + sec->sh_name, |
04b1db9fd
|
1438 1439 1440 1441 |
GFP_KERNEL); if (sattr->name == NULL) goto out; sect_attrs->nsections++; |
361795b1e
|
1442 |
sysfs_attr_init(&sattr->mattr.attr); |
1da177e4c
|
1443 1444 1445 |
sattr->mattr.show = module_sect_show; sattr->mattr.store = NULL; sattr->mattr.attr.name = sattr->name; |
277642dcc
|
1446 |
sattr->mattr.attr.mode = S_IRUSR; |
1da177e4c
|
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 |
*(gattr++) = &(sattr++)->mattr.attr; } *gattr = NULL; if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp)) goto out; mod->sect_attrs = sect_attrs; return; out: |
04b1db9fd
|
1457 |
free_sect_attrs(sect_attrs); |
1da177e4c
|
1458 1459 1460 1461 1462 1463 1464 1465 1466 |
} static void remove_sect_attrs(struct module *mod) { if (mod->sect_attrs) { sysfs_remove_group(&mod->mkobj.kobj, &mod->sect_attrs->grp); /* We are positive that no one is using any sect attrs * at this point. Deallocate immediately. */ |
04b1db9fd
|
1467 |
free_sect_attrs(mod->sect_attrs); |
1da177e4c
|
1468 1469 1470 |
mod->sect_attrs = NULL; } } |
6d7601338
|
1471 1472 1473 1474 1475 1476 1477 1478 1479 |
/* * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. */ struct module_notes_attrs { struct kobject *dir; unsigned int notes; struct bin_attribute attrs[0]; }; |
2c3c8bea6
|
1480 |
static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, |
6d7601338
|
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 |
struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { /* * The caller checked the pos and count against our size. */ memcpy(buf, bin_attr->private + pos, count); return count; } static void free_notes_attrs(struct module_notes_attrs *notes_attrs, unsigned int i) { if (notes_attrs->dir) { while (i-- > 0) sysfs_remove_bin_file(notes_attrs->dir, ¬es_attrs->attrs[i]); |
e94320939
|
1498 |
kobject_put(notes_attrs->dir); |
6d7601338
|
1499 1500 1501 |
} kfree(notes_attrs); } |
8f6d03781
|
1502 |
static void add_notes_attrs(struct module *mod, const struct load_info *info) |
6d7601338
|
1503 1504 1505 1506 |
{ unsigned int notes, loaded, i; struct module_notes_attrs *notes_attrs; struct bin_attribute *nattr; |
ea6bff368
|
1507 1508 1509 |
/* failed to create section attributes, so can't create notes */ if (!mod->sect_attrs) return; |
6d7601338
|
1510 1511 |
/* Count notes sections and allocate structures. */ notes = 0; |
8f6d03781
|
1512 1513 1514 |
for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i]) && (info->sechdrs[i].sh_type == SHT_NOTE)) |
6d7601338
|
1515 1516 1517 1518 |
++notes; if (notes == 0) return; |
acafe7e30
|
1519 |
notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), |
6d7601338
|
1520 1521 1522 1523 1524 1525 |
GFP_KERNEL); if (notes_attrs == NULL) return; notes_attrs->notes = notes; nattr = ¬es_attrs->attrs[0]; |
8f6d03781
|
1526 1527 |
for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { if (sect_empty(&info->sechdrs[i])) |
6d7601338
|
1528 |
continue; |
8f6d03781
|
1529 |
if (info->sechdrs[i].sh_type == SHT_NOTE) { |
361795b1e
|
1530 |
sysfs_bin_attr_init(nattr); |
6d7601338
|
1531 1532 |
nattr->attr.name = mod->sect_attrs->attrs[loaded].name; nattr->attr.mode = S_IRUGO; |
8f6d03781
|
1533 1534 |
nattr->size = info->sechdrs[i].sh_size; nattr->private = (void *) info->sechdrs[i].sh_addr; |
6d7601338
|
1535 1536 1537 1538 1539 |
nattr->read = module_notes_read; ++nattr; } ++loaded; } |
4ff6abff8
|
1540 |
notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); |
6d7601338
|
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 |
if (!notes_attrs->dir) goto out; for (i = 0; i < notes; ++i) if (sysfs_create_bin_file(notes_attrs->dir, ¬es_attrs->attrs[i])) goto out; mod->notes_attrs = notes_attrs; return; out: free_notes_attrs(notes_attrs, i); } static void remove_notes_attrs(struct module *mod) { if (mod->notes_attrs) free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); } |
1da177e4c
|
1561 |
#else |
04b1db9fd
|
1562 |
|
8f6d03781
|
1563 1564 |
static inline void add_sect_attrs(struct module *mod, const struct load_info *info) |
1da177e4c
|
1565 1566 1567 1568 1569 1570 |
{ } static inline void remove_sect_attrs(struct module *mod) { } |
6d7601338
|
1571 |
|
8f6d03781
|
1572 1573 |
static inline void add_notes_attrs(struct module *mod, const struct load_info *info) |
6d7601338
|
1574 1575 1576 1577 1578 1579 |
{ } static inline void remove_notes_attrs(struct module *mod) { } |
8f6d03781
|
1580 |
#endif /* CONFIG_KALLSYMS */ |
1da177e4c
|
1581 |
|
1ba5c08b5
|
1582 |
static void del_usage_links(struct module *mod) |
80a3d1bb4
|
1583 1584 1585 |
{ #ifdef CONFIG_MODULE_UNLOAD struct module_use *use; |
80a3d1bb4
|
1586 |
|
75676500f
|
1587 |
mutex_lock(&module_mutex); |
1ba5c08b5
|
1588 1589 |
list_for_each_entry(use, &mod->target_list, target_list) sysfs_remove_link(use->target->holders_dir, mod->name); |
75676500f
|
1590 |
mutex_unlock(&module_mutex); |
80a3d1bb4
|
1591 1592 |
#endif } |
1ba5c08b5
|
1593 |
static int add_usage_links(struct module *mod) |
80a3d1bb4
|
1594 |
{ |
1ba5c08b5
|
1595 |
int ret = 0; |
80a3d1bb4
|
1596 1597 |
#ifdef CONFIG_MODULE_UNLOAD struct module_use *use; |
75676500f
|
1598 |
mutex_lock(&module_mutex); |
1ba5c08b5
|
1599 1600 1601 1602 1603 1604 |
list_for_each_entry(use, &mod->target_list, target_list) { ret = sysfs_create_link(use->target->holders_dir, &mod->mkobj.kobj, mod->name); if (ret) break; } |
75676500f
|
1605 |
mutex_unlock(&module_mutex); |
1ba5c08b5
|
1606 1607 |
if (ret) del_usage_links(mod); |
80a3d1bb4
|
1608 |
#endif |
1ba5c08b5
|
1609 |
return ret; |
80a3d1bb4
|
1610 |
} |
6407ebb27
|
1611 |
static int module_add_modinfo_attrs(struct module *mod) |
c988d2b28
|
1612 1613 |
{ struct module_attribute *attr; |
03e88ae1b
|
1614 |
struct module_attribute *temp_attr; |
c988d2b28
|
1615 1616 |
int error = 0; int i; |
03e88ae1b
|
1617 1618 1619 1620 1621 1622 1623 |
mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * (ARRAY_SIZE(modinfo_attrs) + 1)), GFP_KERNEL); if (!mod->modinfo_attrs) return -ENOMEM; temp_attr = mod->modinfo_attrs; |
c988d2b28
|
1624 |
for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { |
c75b590d6
|
1625 |
if (!attr->test || attr->test(mod)) { |
03e88ae1b
|
1626 |
memcpy(temp_attr, attr, sizeof(*temp_attr)); |
361795b1e
|
1627 |
sysfs_attr_init(&temp_attr->attr); |
6da0b5651
|
1628 1629 |
error = sysfs_create_file(&mod->mkobj.kobj, &temp_attr->attr); |
03e88ae1b
|
1630 1631 |
++temp_attr; } |
c988d2b28
|
1632 1633 1634 |
} return error; } |
6407ebb27
|
1635 |
static void module_remove_modinfo_attrs(struct module *mod) |
c988d2b28
|
1636 1637 1638 |
{ struct module_attribute *attr; int i; |
03e88ae1b
|
1639 1640 1641 1642 |
for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { /* pick a field to test for end of list */ if (!attr->attr.name) break; |
6da0b5651
|
1643 |
sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); |
03e88ae1b
|
1644 1645 |
if (attr->free) attr->free(mod); |
c988d2b28
|
1646 |
} |
03e88ae1b
|
1647 |
kfree(mod->modinfo_attrs); |
c988d2b28
|
1648 |
} |
1da177e4c
|
1649 |
|
942e44312
|
1650 1651 1652 1653 1654 1655 1656 |
static void mod_kobject_put(struct module *mod) { DECLARE_COMPLETION_ONSTACK(c); mod->mkobj.kobj_completion = &c; kobject_put(&mod->mkobj.kobj); wait_for_completion(&c); } |
6407ebb27
|
1657 |
static int mod_sysfs_init(struct module *mod) |
1da177e4c
|
1658 1659 |
{ int err; |
6494a93d5
|
1660 |
struct kobject *kobj; |
1da177e4c
|
1661 |
|
823bccfc4
|
1662 |
if (!module_sysfs_initialized) { |
bddb12b32
|
1663 1664 |
pr_err("%s: module sysfs not initialized ", mod->name); |
1cc5f7142
|
1665 1666 1667 |
err = -EINVAL; goto out; } |
6494a93d5
|
1668 1669 1670 |
kobj = kset_find_obj(module_kset, mod->name); if (kobj) { |
bddb12b32
|
1671 1672 |
pr_err("%s: module is already loaded ", mod->name); |
6494a93d5
|
1673 1674 1675 1676 |
kobject_put(kobj); err = -EINVAL; goto out; } |
1da177e4c
|
1677 |
mod->mkobj.mod = mod; |
e17e0f51a
|
1678 |
|
ac3c8141f
|
1679 1680 1681 1682 1683 |
memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); mod->mkobj.kobj.kset = module_kset; err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, "%s", mod->name); if (err) |
942e44312
|
1684 |
mod_kobject_put(mod); |
270a6c4ca
|
1685 |
|
97c146ef0
|
1686 |
/* delay uevent until full sysfs population */ |
270a6c4ca
|
1687 1688 1689 |
out: return err; } |
6407ebb27
|
1690 |
static int mod_sysfs_setup(struct module *mod, |
8f6d03781
|
1691 |
const struct load_info *info, |
270a6c4ca
|
1692 1693 1694 1695 |
struct kernel_param *kparam, unsigned int num_params) { int err; |
80a3d1bb4
|
1696 1697 1698 |
err = mod_sysfs_init(mod); if (err) goto out; |
4ff6abff8
|
1699 |
mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); |
240936e18
|
1700 1701 |
if (!mod->holders_dir) { err = -ENOMEM; |
270a6c4ca
|
1702 |
goto out_unreg; |
240936e18
|
1703 |
} |
270a6c4ca
|
1704 |
|
1da177e4c
|
1705 1706 |
err = module_param_sysfs_setup(mod, kparam, num_params); if (err) |
270a6c4ca
|
1707 |
goto out_unreg_holders; |
1da177e4c
|
1708 |
|
c988d2b28
|
1709 1710 |
err = module_add_modinfo_attrs(mod); if (err) |
e17e0f51a
|
1711 |
goto out_unreg_param; |
c988d2b28
|
1712 |
|
1ba5c08b5
|
1713 1714 1715 |
err = add_usage_links(mod); if (err) goto out_unreg_modinfo_attrs; |
8f6d03781
|
1716 1717 |
add_sect_attrs(mod, info); add_notes_attrs(mod, info); |
80a3d1bb4
|
1718 |
|
e17e0f51a
|
1719 |
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); |
1da177e4c
|
1720 |
return 0; |
1ba5c08b5
|
1721 1722 |
out_unreg_modinfo_attrs: module_remove_modinfo_attrs(mod); |
e17e0f51a
|
1723 1724 |
out_unreg_param: module_param_sysfs_remove(mod); |
270a6c4ca
|
1725 |
out_unreg_holders: |
78a2d906b
|
1726 |
kobject_put(mod->holders_dir); |
270a6c4ca
|
1727 |
out_unreg: |
942e44312
|
1728 |
mod_kobject_put(mod); |
80a3d1bb4
|
1729 |
out: |
1da177e4c
|
1730 1731 |
return err; } |
34e4e2fef
|
1732 1733 1734 |
static void mod_sysfs_fini(struct module *mod) { |
8f6d03781
|
1735 1736 |
remove_notes_attrs(mod); remove_sect_attrs(mod); |
942e44312
|
1737 |
mod_kobject_put(mod); |
34e4e2fef
|
1738 |
} |
cf2fde7b3
|
1739 1740 1741 1742 |
static void init_param_lock(struct module *mod) { mutex_init(&mod->param_lock); } |
8f6d03781
|
1743 |
#else /* !CONFIG_SYSFS */ |
34e4e2fef
|
1744 |
|
8f6d03781
|
1745 1746 |
static int mod_sysfs_setup(struct module *mod, const struct load_info *info, |
6407ebb27
|
1747 1748 1749 1750 1751 |
struct kernel_param *kparam, unsigned int num_params) { return 0; } |
34e4e2fef
|
1752 1753 1754 |
static void mod_sysfs_fini(struct module *mod) { } |
36b0360d1
|
1755 1756 1757 |
static void module_remove_modinfo_attrs(struct module *mod) { } |
80a3d1bb4
|
1758 1759 1760 |
static void del_usage_links(struct module *mod) { } |
cf2fde7b3
|
1761 1762 1763 |
static void init_param_lock(struct module *mod) { } |
34e4e2fef
|
1764 |
#endif /* CONFIG_SYSFS */ |
1da177e4c
|
1765 |
|
36b0360d1
|
1766 |
static void mod_sysfs_teardown(struct module *mod) |
1da177e4c
|
1767 |
{ |
80a3d1bb4
|
1768 |
del_usage_links(mod); |
c988d2b28
|
1769 |
module_remove_modinfo_attrs(mod); |
1da177e4c
|
1770 |
module_param_sysfs_remove(mod); |
78a2d906b
|
1771 1772 |
kobject_put(mod->mkobj.drivers_dir); kobject_put(mod->holders_dir); |
34e4e2fef
|
1773 |
mod_sysfs_fini(mod); |
1da177e4c
|
1774 |
} |
0f5bf6d0a
|
1775 |
#ifdef CONFIG_STRICT_MODULE_RWX |
84e1c6bb3
|
1776 1777 1778 |
/* * LKM RO/NX protection: protect module's text/ro-data * from modification and any data from execution. |
85c898db6
|
1779 1780 |
* * General layout of module is: |
444d13ff1
|
1781 1782 1783 1784 1785 |
* [text] [read-only-data] [ro-after-init] [writable data] * text_size -----^ ^ ^ ^ * ro_size ------------------------| | | * ro_after_init_size -----------------------------| | * size -----------------------------------------------------------| |
85c898db6
|
1786 1787 |
* * These values are always page-aligned (as is base) |
84e1c6bb3
|
1788 |
*/ |
85c898db6
|
1789 1790 |
static void frob_text(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) |
84e1c6bb3
|
1791 |
{ |
85c898db6
|
1792 1793 1794 1795 |
BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); set_memory((unsigned long)layout->base, layout->text_size >> PAGE_SHIFT); |
84e1c6bb3
|
1796 |
} |
84e1c6bb3
|
1797 |
|
85c898db6
|
1798 1799 |
static void frob_rodata(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) |
84e1c6bb3
|
1800 |
{ |
85c898db6
|
1801 1802 1803 1804 1805 |
BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); set_memory((unsigned long)layout->base + layout->text_size, (layout->ro_size - layout->text_size) >> PAGE_SHIFT); |
84e1c6bb3
|
1806 |
} |
444d13ff1
|
1807 1808 1809 1810 1811 1812 1813 1814 1815 |
static void frob_ro_after_init(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); set_memory((unsigned long)layout->base + layout->ro_size, (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); } |
85c898db6
|
1816 1817 |
static void frob_writable_data(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) |
84e1c6bb3
|
1818 |
{ |
85c898db6
|
1819 |
BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); |
444d13ff1
|
1820 |
BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); |
85c898db6
|
1821 |
BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); |
444d13ff1
|
1822 1823 |
set_memory((unsigned long)layout->base + layout->ro_after_init_size, (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); |
84e1c6bb3
|
1824 |
} |
84e1c6bb3
|
1825 |
|
85c898db6
|
1826 1827 |
/* livepatching wants to disable read-only so it can frob module. */ void module_disable_ro(const struct module *mod) |
20ef10c1b
|
1828 |
{ |
39290b389
|
1829 1830 |
if (!rodata_enabled) return; |
85c898db6
|
1831 1832 |
frob_text(&mod->core_layout, set_memory_rw); frob_rodata(&mod->core_layout, set_memory_rw); |
444d13ff1
|
1833 |
frob_ro_after_init(&mod->core_layout, set_memory_rw); |
85c898db6
|
1834 1835 |
frob_text(&mod->init_layout, set_memory_rw); frob_rodata(&mod->init_layout, set_memory_rw); |
20ef10c1b
|
1836 |
} |
84e1c6bb3
|
1837 |
|
444d13ff1
|
1838 |
void module_enable_ro(const struct module *mod, bool after_init) |
01526ed08
|
1839 |
{ |
39290b389
|
1840 1841 |
if (!rodata_enabled) return; |
85c898db6
|
1842 1843 1844 1845 |
frob_text(&mod->core_layout, set_memory_ro); frob_rodata(&mod->core_layout, set_memory_ro); frob_text(&mod->init_layout, set_memory_ro); frob_rodata(&mod->init_layout, set_memory_ro); |
444d13ff1
|
1846 1847 1848 |
if (after_init) frob_ro_after_init(&mod->core_layout, set_memory_ro); |
84e1c6bb3
|
1849 |
} |
85c898db6
|
1850 |
static void module_enable_nx(const struct module *mod) |
01526ed08
|
1851 |
{ |
85c898db6
|
1852 |
frob_rodata(&mod->core_layout, set_memory_nx); |
444d13ff1
|
1853 |
frob_ro_after_init(&mod->core_layout, set_memory_nx); |
85c898db6
|
1854 1855 1856 |
frob_writable_data(&mod->core_layout, set_memory_nx); frob_rodata(&mod->init_layout, set_memory_nx); frob_writable_data(&mod->init_layout, set_memory_nx); |
01526ed08
|
1857 |
} |
85c898db6
|
1858 |
static void module_disable_nx(const struct module *mod) |
01526ed08
|
1859 |
{ |
85c898db6
|
1860 |
frob_rodata(&mod->core_layout, set_memory_x); |
444d13ff1
|
1861 |
frob_ro_after_init(&mod->core_layout, set_memory_x); |
85c898db6
|
1862 1863 1864 |
frob_writable_data(&mod->core_layout, set_memory_x); frob_rodata(&mod->init_layout, set_memory_x); frob_writable_data(&mod->init_layout, set_memory_x); |
84e1c6bb3
|
1865 1866 1867 |
} /* Iterate through all modules and set each module's text as RW */ |
5d05c7084
|
1868 |
void set_all_modules_text_rw(void) |
84e1c6bb3
|
1869 1870 |
{ struct module *mod; |
39290b389
|
1871 1872 |
if (!rodata_enabled) return; |
84e1c6bb3
|
1873 1874 |
mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { |
0d21b0e34
|
1875 1876 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
85c898db6
|
1877 1878 1879 |
frob_text(&mod->core_layout, set_memory_rw); frob_text(&mod->init_layout, set_memory_rw); |
84e1c6bb3
|
1880 1881 1882 1883 1884 |
} mutex_unlock(&module_mutex); } /* Iterate through all modules and set each module's text as RO */ |
5d05c7084
|
1885 |
void set_all_modules_text_ro(void) |
84e1c6bb3
|
1886 1887 |
{ struct module *mod; |
39290b389
|
1888 1889 |
if (!rodata_enabled) return; |
84e1c6bb3
|
1890 1891 |
mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { |
905dd707f
|
1892 1893 1894 1895 1896 1897 1898 |
/* * Ignore going modules since it's possible that ro * protection has already been disabled, otherwise we'll * run into protection faults at module deallocation. */ if (mod->state == MODULE_STATE_UNFORMED || mod->state == MODULE_STATE_GOING) |
0d21b0e34
|
1899 |
continue; |
85c898db6
|
1900 1901 1902 |
frob_text(&mod->core_layout, set_memory_ro); frob_text(&mod->init_layout, set_memory_ro); |
84e1c6bb3
|
1903 1904 1905 |
} mutex_unlock(&module_mutex); } |
85c898db6
|
1906 1907 1908 |
static void disable_ro_nx(const struct module_layout *layout) { |
39290b389
|
1909 1910 1911 1912 1913 |
if (rodata_enabled) { frob_text(layout, set_memory_rw); frob_rodata(layout, set_memory_rw); frob_ro_after_init(layout, set_memory_rw); } |
85c898db6
|
1914 |
frob_rodata(layout, set_memory_x); |
444d13ff1
|
1915 |
frob_ro_after_init(layout, set_memory_x); |
85c898db6
|
1916 1917 |
frob_writable_data(layout, set_memory_x); } |
84e1c6bb3
|
1918 |
#else |
85c898db6
|
1919 1920 1921 |
static void disable_ro_nx(const struct module_layout *layout) { } static void module_enable_nx(const struct module *mod) { } static void module_disable_nx(const struct module *mod) { } |
84e1c6bb3
|
1922 |
#endif |
1ce15ef4f
|
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 |
#ifdef CONFIG_LIVEPATCH /* * Persist Elf information about a module. Copy the Elf header, * section header table, section string table, and symtab section * index from info to mod->klp_info. */ static int copy_module_elf(struct module *mod, struct load_info *info) { unsigned int size, symndx; int ret; size = sizeof(*mod->klp_info); mod->klp_info = kmalloc(size, GFP_KERNEL); if (mod->klp_info == NULL) return -ENOMEM; /* Elf header */ size = sizeof(mod->klp_info->hdr); memcpy(&mod->klp_info->hdr, info->hdr, size); /* Elf section header table */ size = sizeof(*info->sechdrs) * info->hdr->e_shnum; |
9be936f4b
|
1945 |
mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); |
1ce15ef4f
|
1946 1947 1948 1949 |
if (mod->klp_info->sechdrs == NULL) { ret = -ENOMEM; goto free_info; } |
1ce15ef4f
|
1950 1951 1952 |
/* Elf section name string table */ size = info->sechdrs[info->hdr->e_shstrndx].sh_size; |
9be936f4b
|
1953 |
mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); |
1ce15ef4f
|
1954 1955 1956 1957 |
if (mod->klp_info->secstrings == NULL) { ret = -ENOMEM; goto free_sechdrs; } |
1ce15ef4f
|
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 |
/* Elf symbol section index */ symndx = info->index.sym; mod->klp_info->symndx = symndx; /* * For livepatch modules, core_kallsyms.symtab is a complete * copy of the original symbol table. Adjust sh_addr to point * to core_kallsyms.symtab since the copy of the symtab in module * init memory is freed at the end of do_init_module(). */ mod->klp_info->sechdrs[symndx].sh_addr = \ (unsigned long) mod->core_kallsyms.symtab; return 0; free_sechdrs: kfree(mod->klp_info->sechdrs); free_info: kfree(mod->klp_info); return ret; } static void free_module_elf(struct module *mod) { kfree(mod->klp_info->sechdrs); kfree(mod->klp_info->secstrings); kfree(mod->klp_info); } #else /* !CONFIG_LIVEPATCH */ static int copy_module_elf(struct module *mod, struct load_info *info) { return 0; } static void free_module_elf(struct module *mod) { } #endif /* CONFIG_LIVEPATCH */ |
be1f221c0
|
1997 |
void __weak module_memfree(void *module_region) |
74e08fcf7
|
1998 1999 2000 2001 2002 2003 2004 |
{ vfree(module_region); } void __weak module_arch_cleanup(struct module *mod) { } |
d453cded0
|
2005 2006 2007 |
void __weak module_arch_freeing_init(struct module *mod) { } |
75676500f
|
2008 |
/* Free a module, remove from lists, etc. */ |
1da177e4c
|
2009 2010 |
static void free_module(struct module *mod) { |
7ead8b831
|
2011 |
trace_module_free(mod); |
36b0360d1
|
2012 |
mod_sysfs_teardown(mod); |
1da177e4c
|
2013 |
|
944a1fa01
|
2014 2015 |
/* We leave it in list to prevent duplicate loads, but make sure * that noone uses it while it's being deconstructed. */ |
d3051b489
|
2016 |
mutex_lock(&module_mutex); |
944a1fa01
|
2017 |
mod->state = MODULE_STATE_UNFORMED; |
d3051b489
|
2018 |
mutex_unlock(&module_mutex); |
944a1fa01
|
2019 |
|
b82bab4bb
|
2020 2021 |
/* Remove dynamic debug info */ ddebug_remove_module(mod->name); |
1da177e4c
|
2022 2023 2024 2025 2026 |
/* Arch-specific cleanup. */ module_arch_cleanup(mod); /* Module unload stuff */ module_unload_free(mod); |
e180a6b77
|
2027 2028 |
/* Free any allocated parameters. */ destroy_params(mod->kp, mod->num_kp); |
1ce15ef4f
|
2029 2030 |
if (is_livepatch_module(mod)) free_module_elf(mod); |
944a1fa01
|
2031 2032 |
/* Now we can delete it from the lists */ mutex_lock(&module_mutex); |
461e34aed
|
2033 2034 |
/* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); |
93c2e105f
|
2035 |
mod_tree_remove(mod); |
0286b5ea1
|
2036 |
/* Remove this module from bug list, this uses list_del_rcu */ |
461e34aed
|
2037 |
module_bug_cleanup(mod); |
0be964be0
|
2038 2039 |
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ synchronize_sched(); |
944a1fa01
|
2040 |
mutex_unlock(&module_mutex); |
85c898db6
|
2041 2042 |
/* This may be empty, but that's OK */ disable_ro_nx(&mod->init_layout); |
d453cded0
|
2043 |
module_arch_freeing_init(mod); |
7523e4dc5
|
2044 |
module_memfree(mod->init_layout.base); |
1da177e4c
|
2045 |
kfree(mod->args); |
259354dea
|
2046 |
percpu_modfree(mod); |
9f85a4bbb
|
2047 |
|
35a9393c9
|
2048 |
/* Free lock-classes; relies on the preceding sync_rcu(). */ |
7523e4dc5
|
2049 |
lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); |
fbb9ce953
|
2050 |
|
1da177e4c
|
2051 |
/* Finally, free the core (containing the module structure) */ |
85c898db6
|
2052 |
disable_ro_nx(&mod->core_layout); |
7523e4dc5
|
2053 |
module_memfree(mod->core_layout.base); |
1da177e4c
|
2054 2055 2056 2057 2058 |
} void *__symbol_get(const char *symbol) { struct module *owner; |
414fd31b2
|
2059 |
const struct kernel_symbol *sym; |
1da177e4c
|
2060 |
|
24da1cbff
|
2061 |
preempt_disable(); |
414fd31b2
|
2062 2063 2064 |
sym = find_symbol(symbol, &owner, NULL, true, true); if (sym && strong_try_module_get(owner)) sym = NULL; |
24da1cbff
|
2065 |
preempt_enable(); |
1da177e4c
|
2066 |
|
7290d5809
|
2067 |
return sym ? (void *)kernel_symbol_value(sym) : NULL; |
1da177e4c
|
2068 2069 |
} EXPORT_SYMBOL_GPL(__symbol_get); |
eea8b54dc
|
2070 2071 |
/* * Ensure that an exported symbol [global namespace] does not already exist |
02a3e59a0
|
2072 |
* in the kernel or in some other module's exported symbol table. |
be593f4ce
|
2073 2074 |
* * You must hold the module_mutex. |
eea8b54dc
|
2075 2076 2077 |
*/ static int verify_export_symbols(struct module *mod) { |
b211104d1
|
2078 |
unsigned int i; |
eea8b54dc
|
2079 |
struct module *owner; |
b211104d1
|
2080 2081 2082 2083 2084 2085 2086 2087 |
const struct kernel_symbol *s; struct { const struct kernel_symbol *sym; unsigned int num; } arr[] = { { mod->syms, mod->num_syms }, { mod->gpl_syms, mod->num_gpl_syms }, { mod->gpl_future_syms, mod->num_gpl_future_syms }, |
f7f5b6755
|
2088 |
#ifdef CONFIG_UNUSED_SYMBOLS |
b211104d1
|
2089 2090 |
{ mod->unused_syms, mod->num_unused_syms }, { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, |
f7f5b6755
|
2091 |
#endif |
b211104d1
|
2092 |
}; |
eea8b54dc
|
2093 |
|
b211104d1
|
2094 2095 |
for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { |
7290d5809
|
2096 2097 |
if (find_symbol(kernel_symbol_name(s), &owner, NULL, true, false)) { |
bddb12b32
|
2098 |
pr_err("%s: exports duplicate symbol %s" |
b211104d1
|
2099 2100 |
" (owned by %s) ", |
7290d5809
|
2101 2102 |
mod->name, kernel_symbol_name(s), module_name(owner)); |
b211104d1
|
2103 2104 |
return -ENOEXEC; } |
eea8b54dc
|
2105 |
} |
b211104d1
|
2106 2107 |
} return 0; |
eea8b54dc
|
2108 |
} |
9a4b9708f
|
2109 |
/* Change all symbols so that st_value encodes the pointer directly. */ |
49668688d
|
2110 2111 2112 2113 |
static int simplify_symbols(struct module *mod, const struct load_info *info) { Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; Elf_Sym *sym = (void *)symsec->sh_addr; |
1da177e4c
|
2114 |
unsigned long secbase; |
49668688d
|
2115 |
unsigned int i; |
1da177e4c
|
2116 |
int ret = 0; |
414fd31b2
|
2117 |
const struct kernel_symbol *ksym; |
1da177e4c
|
2118 |
|
49668688d
|
2119 2120 |
for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { const char *name = info->strtab + sym[i].st_name; |
1da177e4c
|
2121 2122 |
switch (sym[i].st_shndx) { case SHN_COMMON: |
80375980f
|
2123 2124 2125 |
/* Ignore common symbols */ if (!strncmp(name, "__gnu_lto", 9)) break; |
1da177e4c
|
2126 2127 |
/* We compiled with -fno-common. These are not supposed to happen. */ |
5e1241692
|
2128 2129 |
pr_debug("Common symbol: %s ", name); |
6da0b5651
|
2130 2131 |
pr_warn("%s: please compile with -fno-common ", |
1da177e4c
|
2132 2133 2134 2135 2136 2137 |
mod->name); ret = -ENOEXEC; break; case SHN_ABS: /* Don't need to do anything */ |
5e1241692
|
2138 2139 |
pr_debug("Absolute symbol: 0x%08lx ", |
1da177e4c
|
2140 2141 |
(long)sym[i].st_value); break; |
1ce15ef4f
|
2142 2143 2144 |
case SHN_LIVEPATCH: /* Livepatch symbols are resolved by livepatch */ break; |
1da177e4c
|
2145 |
case SHN_UNDEF: |
49668688d
|
2146 |
ksym = resolve_symbol_wait(mod, info, name); |
1da177e4c
|
2147 |
/* Ok if resolved. */ |
9bea7f239
|
2148 |
if (ksym && !IS_ERR(ksym)) { |
7290d5809
|
2149 |
sym[i].st_value = kernel_symbol_value(ksym); |
1da177e4c
|
2150 |
break; |
414fd31b2
|
2151 |
} |
1da177e4c
|
2152 |
/* Ok if weak. */ |
9bea7f239
|
2153 |
if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) |
1da177e4c
|
2154 |
break; |
9bea7f239
|
2155 |
ret = PTR_ERR(ksym) ?: -ENOENT; |
62267e0ec
|
2156 2157 2158 |
pr_warn("%s: Unknown symbol %s (err %d) ", mod->name, name, ret); |
1da177e4c
|
2159 2160 2161 2162 |
break; default: /* Divert to percpu allocation if a percpu var. */ |
49668688d
|
2163 |
if (sym[i].st_shndx == info->index.pcpu) |
259354dea
|
2164 |
secbase = (unsigned long)mod_percpu(mod); |
1da177e4c
|
2165 |
else |
49668688d
|
2166 |
secbase = info->sechdrs[sym[i].st_shndx].sh_addr; |
1da177e4c
|
2167 2168 2169 2170 2171 2172 2173 |
sym[i].st_value += secbase; break; } } return ret; } |
49668688d
|
2174 |
static int apply_relocations(struct module *mod, const struct load_info *info) |
22e268ebe
|
2175 2176 2177 2178 2179 |
{ unsigned int i; int err = 0; /* Now do relocations. */ |
49668688d
|
2180 2181 |
for (i = 1; i < info->hdr->e_shnum; i++) { unsigned int infosec = info->sechdrs[i].sh_info; |
22e268ebe
|
2182 2183 |
/* Not a valid relocation section? */ |
49668688d
|
2184 |
if (infosec >= info->hdr->e_shnum) |
22e268ebe
|
2185 2186 2187 |
continue; /* Don't bother with non-allocated sections */ |
49668688d
|
2188 |
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) |
22e268ebe
|
2189 |
continue; |
1ce15ef4f
|
2190 2191 2192 |
/* Livepatch relocation sections are applied by livepatch */ if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) continue; |
49668688d
|
2193 2194 2195 2196 2197 2198 |
if (info->sechdrs[i].sh_type == SHT_REL) err = apply_relocate(info->sechdrs, info->strtab, info->index.sym, i, mod); else if (info->sechdrs[i].sh_type == SHT_RELA) err = apply_relocate_add(info->sechdrs, info->strtab, info->index.sym, i, mod); |
22e268ebe
|
2199 2200 2201 2202 2203 |
if (err < 0) break; } return err; } |
088af9a6e
|
2204 2205 2206 2207 2208 2209 2210 |
/* Additional bytes needed by arch in front of individual sections */ unsigned int __weak arch_mod_section_prepend(struct module *mod, unsigned int section) { /* default implementation just returns zero */ return 0; } |
1da177e4c
|
2211 |
/* Update size with this section: return offset. */ |
088af9a6e
|
2212 2213 |
static long get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, unsigned int section) |
1da177e4c
|
2214 2215 |
{ long ret; |
088af9a6e
|
2216 |
*size += arch_mod_section_prepend(mod, section); |
1da177e4c
|
2217 2218 2219 2220 2221 2222 2223 2224 2225 |
ret = ALIGN(*size, sechdr->sh_addralign ?: 1); *size = ret + sechdr->sh_size; return ret; } /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ |
49668688d
|
2226 |
static void layout_sections(struct module *mod, struct load_info *info) |
1da177e4c
|
2227 2228 2229 2230 2231 2232 2233 |
{ static unsigned long const masks[][2] = { /* NOTE: all executable code must be the first section * in this array; otherwise modify the text_size * finder in the two loops below */ { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, |
444d13ff1
|
2234 |
{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, |
1da177e4c
|
2235 2236 2237 2238 |
{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, { ARCH_SHF_SMALL | SHF_ALLOC, 0 } }; unsigned int m, i; |
49668688d
|
2239 2240 |
for (i = 0; i < info->hdr->e_shnum; i++) info->sechdrs[i].sh_entsize = ~0UL; |
1da177e4c
|
2241 |
|
5e1241692
|
2242 2243 |
pr_debug("Core section allocation order: "); |
1da177e4c
|
2244 |
for (m = 0; m < ARRAY_SIZE(masks); ++m) { |
49668688d
|
2245 2246 2247 |
for (i = 0; i < info->hdr->e_shnum; ++i) { Elf_Shdr *s = &info->sechdrs[i]; const char *sname = info->secstrings + s->sh_name; |
1da177e4c
|
2248 2249 2250 2251 |
if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL |
49668688d
|
2252 |
|| strstarts(sname, ".init")) |
1da177e4c
|
2253 |
continue; |
7523e4dc5
|
2254 |
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); |
5e1241692
|
2255 2256 |
pr_debug("\t%s ", sname); |
1da177e4c
|
2257 |
} |
84e1c6bb3
|
2258 2259 |
switch (m) { case 0: /* executable */ |
7523e4dc5
|
2260 2261 |
mod->core_layout.size = debug_align(mod->core_layout.size); mod->core_layout.text_size = mod->core_layout.size; |
84e1c6bb3
|
2262 2263 |
break; case 1: /* RO: text and ro-data */ |
7523e4dc5
|
2264 2265 |
mod->core_layout.size = debug_align(mod->core_layout.size); mod->core_layout.ro_size = mod->core_layout.size; |
84e1c6bb3
|
2266 |
break; |
444d13ff1
|
2267 2268 2269 2270 2271 |
case 2: /* RO after init */ mod->core_layout.size = debug_align(mod->core_layout.size); mod->core_layout.ro_after_init_size = mod->core_layout.size; break; case 4: /* whole core */ |
7523e4dc5
|
2272 |
mod->core_layout.size = debug_align(mod->core_layout.size); |
84e1c6bb3
|
2273 2274 |
break; } |
1da177e4c
|
2275 |
} |
5e1241692
|
2276 2277 |
pr_debug("Init section allocation order: "); |
1da177e4c
|
2278 |
for (m = 0; m < ARRAY_SIZE(masks); ++m) { |
49668688d
|
2279 2280 2281 |
for (i = 0; i < info->hdr->e_shnum; ++i) { Elf_Shdr *s = &info->sechdrs[i]; const char *sname = info->secstrings + s->sh_name; |
1da177e4c
|
2282 2283 2284 2285 |
if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL |
49668688d
|
2286 |
|| !strstarts(sname, ".init")) |
1da177e4c
|
2287 |
continue; |
7523e4dc5
|
2288 |
s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) |
1da177e4c
|
2289 |
| INIT_OFFSET_MASK); |
5e1241692
|
2290 2291 |
pr_debug("\t%s ", sname); |
1da177e4c
|
2292 |
} |
84e1c6bb3
|
2293 2294 |
switch (m) { case 0: /* executable */ |
7523e4dc5
|
2295 2296 |
mod->init_layout.size = debug_align(mod->init_layout.size); mod->init_layout.text_size = mod->init_layout.size; |
84e1c6bb3
|
2297 2298 |
break; case 1: /* RO: text and ro-data */ |
7523e4dc5
|
2299 2300 |
mod->init_layout.size = debug_align(mod->init_layout.size); mod->init_layout.ro_size = mod->init_layout.size; |
84e1c6bb3
|
2301 |
break; |
444d13ff1
|
2302 2303 2304 2305 2306 2307 2308 2309 |
case 2: /* * RO after init doesn't apply to init_layout (only * core_layout), so it just takes the value of ro_size. */ mod->init_layout.ro_after_init_size = mod->init_layout.ro_size; break; case 4: /* whole init */ |
7523e4dc5
|
2310 |
mod->init_layout.size = debug_align(mod->init_layout.size); |
84e1c6bb3
|
2311 2312 |
break; } |
1da177e4c
|
2313 2314 |
} } |
1da177e4c
|
2315 2316 2317 2318 |
static void set_license(struct module *mod, const char *license) { if (!license) license = "unspecified"; |
fa3ba2e81
|
2319 |
if (!license_is_gpl_compatible(license)) { |
25ddbb18a
|
2320 |
if (!test_taint(TAINT_PROPRIETARY_MODULE)) |
bddb12b32
|
2321 2322 2323 |
pr_warn("%s: module license '%s' taints kernel. ", mod->name, license); |
373d4d099
|
2324 2325 |
add_taint_module(mod, TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); |
1da177e4c
|
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 |
} } /* Parse tag=value strings from .modinfo section */ static char *next_string(char *string, unsigned long *secsize) { /* Skip non-zero chars */ while (string[0]) { string++; if ((*secsize)-- <= 1) return NULL; } /* Skip any zero padding. */ while (!string[0]) { string++; if ((*secsize)-- <= 1) return NULL; } return string; } |
49668688d
|
2347 |
static char *get_modinfo(struct load_info *info, const char *tag) |
1da177e4c
|
2348 2349 2350 |
{ char *p; unsigned int taglen = strlen(tag); |
49668688d
|
2351 2352 |
Elf_Shdr *infosec = &info->sechdrs[info->index.info]; unsigned long size = infosec->sh_size; |
1da177e4c
|
2353 |
|
5fdc7db64
|
2354 2355 2356 2357 2358 |
/* * get_modinfo() calls made before rewrite_section_headers() * must use sh_offset, as sh_addr isn't set! */ for (p = (char *)info->hdr + infosec->sh_offset; p; p = next_string(p, &size)) { |
1da177e4c
|
2359 2360 2361 2362 2363 |
if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') return p + taglen + 1; } return NULL; } |
49668688d
|
2364 |
static void setup_modinfo(struct module *mod, struct load_info *info) |
c988d2b28
|
2365 2366 2367 2368 2369 2370 |
{ struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->setup) |
49668688d
|
2371 |
attr->setup(mod, get_modinfo(info, attr->attr.name)); |
c988d2b28
|
2372 2373 |
} } |
c988d2b28
|
2374 |
|
a263f7763
|
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 |
static void free_modinfo(struct module *mod) { struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->free) attr->free(mod); } } |
1da177e4c
|
2385 |
#ifdef CONFIG_KALLSYMS |
15bba37d6
|
2386 2387 2388 2389 2390 2391 |
/* lookup symbol in given range of kernel_symbols */ static const struct kernel_symbol *lookup_symbol(const char *name, const struct kernel_symbol *start, const struct kernel_symbol *stop) { |
9d63487f8
|
2392 2393 |
return bsearch(name, start, stop - start, sizeof(struct kernel_symbol), cmp_name); |
15bba37d6
|
2394 |
} |
ca4787b77
|
2395 2396 |
static int is_exported(const char *name, unsigned long value, const struct module *mod) |
1da177e4c
|
2397 |
{ |
ca4787b77
|
2398 2399 2400 |
const struct kernel_symbol *ks; if (!mod) ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); |
3fd6805f4
|
2401 |
else |
ca4787b77
|
2402 |
ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); |
7290d5809
|
2403 |
return ks != NULL && kernel_symbol_value(ks) == value; |
1da177e4c
|
2404 2405 2406 |
} /* As per nm */ |
eded41c1c
|
2407 |
static char elf_type(const Elf_Sym *sym, const struct load_info *info) |
1da177e4c
|
2408 |
{ |
eded41c1c
|
2409 |
const Elf_Shdr *sechdrs = info->sechdrs; |
1da177e4c
|
2410 2411 2412 2413 2414 2415 2416 2417 |
if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) return 'v'; else return 'w'; } if (sym->st_shndx == SHN_UNDEF) return 'U'; |
e02244185
|
2418 |
if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) |
1da177e4c
|
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 |
return 'a'; if (sym->st_shndx >= SHN_LORESERVE) return '?'; if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) return 't'; if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) return 'r'; else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) return 'g'; else return 'd'; } if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) return 's'; else return 'b'; } |
eded41c1c
|
2439 2440 |
if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, ".debug")) { |
1da177e4c
|
2441 |
return 'n'; |
eded41c1c
|
2442 |
} |
1da177e4c
|
2443 2444 |
return '?'; } |
4a4962263
|
2445 |
static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, |
e02244185
|
2446 |
unsigned int shnum, unsigned int pcpundx) |
4a4962263
|
2447 2448 2449 2450 2451 2452 2453 |
{ const Elf_Shdr *sec; if (src->st_shndx == SHN_UNDEF || src->st_shndx >= shnum || !src->st_name) return false; |
e02244185
|
2454 2455 2456 2457 |
#ifdef CONFIG_KALLSYMS_ALL if (src->st_shndx == pcpundx) return true; #endif |
4a4962263
|
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 |
sec = sechdrs + src->st_shndx; if (!(sec->sh_flags & SHF_ALLOC) #ifndef CONFIG_KALLSYMS_ALL || !(sec->sh_flags & SHF_EXECINSTR) #endif || (sec->sh_entsize & INIT_OFFSET_MASK)) return false; return true; } |
48fd11880
|
2468 2469 2470 2471 2472 2473 2474 |
/* * We only allocate and copy the strings needed by the parts of symtab * we keep. This is simple, but has the effect of making multiple * copies of duplicates. We could be more sophisticated, see * linux-kernel thread starting with * <73defb5e4bca04a6431392cc341112b1@localhost>. */ |
49668688d
|
2475 |
static void layout_symtab(struct module *mod, struct load_info *info) |
4a4962263
|
2476 |
{ |
49668688d
|
2477 2478 |
Elf_Shdr *symsect = info->sechdrs + info->index.sym; Elf_Shdr *strsect = info->sechdrs + info->index.str; |
4a4962263
|
2479 |
const Elf_Sym *src; |
54523ec71
|
2480 |
unsigned int i, nsrc, ndst, strtab_size = 0; |
4a4962263
|
2481 2482 2483 |
/* Put symbol section at end of init part of module. */ symsect->sh_flags |= SHF_ALLOC; |
7523e4dc5
|
2484 |
symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, |
49668688d
|
2485 |
info->index.sym) | INIT_OFFSET_MASK; |
5e1241692
|
2486 2487 |
pr_debug("\t%s ", info->secstrings + symsect->sh_name); |
4a4962263
|
2488 |
|
49668688d
|
2489 |
src = (void *)info->hdr + symsect->sh_offset; |
4a4962263
|
2490 |
nsrc = symsect->sh_size / sizeof(*src); |
70b1e9161
|
2491 |
|
48fd11880
|
2492 |
/* Compute total space required for the core symbols' strtab. */ |
59ef28b1f
|
2493 |
for (ndst = i = 0; i < nsrc; i++) { |
1ce15ef4f
|
2494 |
if (i == 0 || is_livepatch_module(mod) || |
e02244185
|
2495 2496 |
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { |
59ef28b1f
|
2497 |
strtab_size += strlen(&info->strtab[src[i].st_name])+1; |
48fd11880
|
2498 |
ndst++; |
554bdfe5a
|
2499 |
} |
59ef28b1f
|
2500 |
} |
4a4962263
|
2501 2502 |
/* Append room for core symbols at end of core part. */ |
7523e4dc5
|
2503 2504 2505 2506 |
info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); mod->core_layout.size += strtab_size; mod->core_layout.size = debug_align(mod->core_layout.size); |
4a4962263
|
2507 |
|
554bdfe5a
|
2508 2509 |
/* Put string table section at end of init part of module. */ strsect->sh_flags |= SHF_ALLOC; |
7523e4dc5
|
2510 |
strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, |
49668688d
|
2511 |
info->index.str) | INIT_OFFSET_MASK; |
5e1241692
|
2512 2513 |
pr_debug("\t%s ", info->secstrings + strsect->sh_name); |
8244062ef
|
2514 2515 2516 2517 2518 2519 2520 |
/* We'll tack temporary mod_kallsyms on the end. */ mod->init_layout.size = ALIGN(mod->init_layout.size, __alignof__(struct mod_kallsyms)); info->mod_kallsyms_init_off = mod->init_layout.size; mod->init_layout.size += sizeof(struct mod_kallsyms); mod->init_layout.size = debug_align(mod->init_layout.size); |
4a4962263
|
2521 |
} |
8244062ef
|
2522 2523 2524 2525 2526 |
/* * We use the full symtab and strtab which layout_symtab arranged to * be appended to the init section. Later we switch to the cut-down * core-only ones. */ |
811d66a0e
|
2527 |
static void add_kallsyms(struct module *mod, const struct load_info *info) |
1da177e4c
|
2528 |
{ |
4a4962263
|
2529 2530 2531 |
unsigned int i, ndst; const Elf_Sym *src; Elf_Sym *dst; |
554bdfe5a
|
2532 |
char *s; |
eded41c1c
|
2533 |
Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; |
1da177e4c
|
2534 |
|
8244062ef
|
2535 2536 2537 2538 2539 |
/* Set up to point into init section. */ mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; mod->kallsyms->symtab = (void *)symsec->sh_addr; mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); |
511ca6ae4
|
2540 |
/* Make sure we get permanent strtab: don't use info->strtab. */ |
8244062ef
|
2541 |
mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; |
1da177e4c
|
2542 2543 |
/* Set types up while we still have access to sections. */ |
8244062ef
|
2544 2545 2546 2547 2548 2549 2550 2551 2552 |
for (i = 0; i < mod->kallsyms->num_symtab; i++) mod->kallsyms->symtab[i].st_info = elf_type(&mod->kallsyms->symtab[i], info); /* Now populate the cut down core kallsyms for after init. */ mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; src = mod->kallsyms->symtab; for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { |
1ce15ef4f
|
2553 |
if (i == 0 || is_livepatch_module(mod) || |
e02244185
|
2554 2555 |
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { |
59ef28b1f
|
2556 |
dst[ndst] = src[i]; |
8244062ef
|
2557 2558 |
dst[ndst++].st_name = s - mod->core_kallsyms.strtab; s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], |
59ef28b1f
|
2559 2560 |
KSYM_NAME_LEN) + 1; } |
4a4962263
|
2561 |
} |
8244062ef
|
2562 |
mod->core_kallsyms.num_symtab = ndst; |
1da177e4c
|
2563 2564 |
} #else |
49668688d
|
2565 |
static inline void layout_symtab(struct module *mod, struct load_info *info) |
4a4962263
|
2566 2567 |
{ } |
3ae91c21d
|
2568 |
|
abbce906d
|
2569 |
static void add_kallsyms(struct module *mod, const struct load_info *info) |
1da177e4c
|
2570 2571 2572 |
{ } #endif /* CONFIG_KALLSYMS */ |
527963127
|
2573 |
static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num) |
346e15beb
|
2574 |
{ |
811d66a0e
|
2575 2576 |
if (!debug) return; |
e9d376f0f
|
2577 |
#ifdef CONFIG_DYNAMIC_DEBUG |
527963127
|
2578 |
if (ddebug_add_module(debug, num, mod->name)) |
bddb12b32
|
2579 2580 2581 |
pr_err("dynamic debug error adding module: %s ", debug->modname); |
e9d376f0f
|
2582 |
#endif |
5e458cc0f
|
2583 |
} |
346e15beb
|
2584 |
|
527963127
|
2585 |
static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug) |
ff49d74ad
|
2586 2587 |
{ if (debug) |
527963127
|
2588 |
ddebug_remove_module(mod->name); |
ff49d74ad
|
2589 |
} |
74e08fcf7
|
2590 2591 |
void * __weak module_alloc(unsigned long size) { |
82fab442f
|
2592 |
return vmalloc_exec(size); |
74e08fcf7
|
2593 |
} |
4f2294b6d
|
2594 |
#ifdef CONFIG_DEBUG_KMEMLEAK |
49668688d
|
2595 2596 |
static void kmemleak_load_module(const struct module *mod, const struct load_info *info) |
4f2294b6d
|
2597 2598 2599 2600 |
{ unsigned int i; /* only scan the sections containing data */ |
c017b4be3
|
2601 |
kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); |
4f2294b6d
|
2602 |
|
49668688d
|
2603 |
for (i = 1; i < info->hdr->e_shnum; i++) { |
06c9494c0
|
2604 2605 2606 2607 |
/* Scan all writable sections that's not executable */ if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || !(info->sechdrs[i].sh_flags & SHF_WRITE) || (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) |
4f2294b6d
|
2608 |
continue; |
49668688d
|
2609 2610 |
kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, info->sechdrs[i].sh_size, GFP_KERNEL); |
4f2294b6d
|
2611 2612 2613 |
} } #else |
49668688d
|
2614 2615 |
static inline void kmemleak_load_module(const struct module *mod, const struct load_info *info) |
4f2294b6d
|
2616 2617 2618 |
{ } #endif |
106a4ee25
|
2619 |
#ifdef CONFIG_MODULE_SIG |
bca014caa
|
2620 |
static int module_sig_check(struct load_info *info, int flags) |
106a4ee25
|
2621 2622 |
{ int err = -ENOKEY; |
34e1169d9
|
2623 2624 |
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; const void *mod = info->hdr; |
caabe2405
|
2625 |
|
bca014caa
|
2626 2627 2628 2629 2630 2631 |
/* * Require flags == 0, as a module with version information * removed is no longer the module that was signed */ if (flags == 0 && info->len > markerlen && |
34e1169d9
|
2632 |
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { |
caabe2405
|
2633 |
/* We truncate the module to discard the signature */ |
34e1169d9
|
2634 |
info->len -= markerlen; |
f314dfea1
|
2635 |
err = mod_verify_sig(mod, info); |
106a4ee25
|
2636 2637 2638 2639 2640 2641 2642 2643 |
} if (!err) { info->sig_ok = true; return 0; } /* Not having a signature is only an error if we're strict. */ |
2c8fd268f
|
2644 |
if (err == -ENOKEY && !is_module_sig_enforced()) |
106a4ee25
|
2645 2646 2647 2648 2649 |
err = 0; return err; } #else /* !CONFIG_MODULE_SIG */ |
bca014caa
|
2650 |
static int module_sig_check(struct load_info *info, int flags) |
106a4ee25
|
2651 2652 2653 2654 |
{ return 0; } #endif /* !CONFIG_MODULE_SIG */ |
34e1169d9
|
2655 2656 |
/* Sanity checks against invalid binaries, wrong arch, weird elf version. */ static int elf_header_check(struct load_info *info) |
40dd2560e
|
2657 |
{ |
34e1169d9
|
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 |
if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 || info->hdr->e_type != ET_REL || !elf_check_arch(info->hdr) || info->hdr->e_shentsize != sizeof(Elf_Shdr)) return -ENOEXEC; if (info->hdr->e_shoff >= info->len || (info->hdr->e_shnum * sizeof(Elf_Shdr) > info->len - info->hdr->e_shoff)) return -ENOEXEC; |
40dd2560e
|
2671 |
|
34e1169d9
|
2672 2673 |
return 0; } |
3afe9f849
|
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 |
#define COPY_CHUNK_SIZE (16*PAGE_SIZE) static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) { do { unsigned long n = min(len, COPY_CHUNK_SIZE); if (copy_from_user(dst, usrc, n) != 0) return -EFAULT; cond_resched(); dst += n; usrc += n; len -= n; } while (len); return 0; } |
1ce15ef4f
|
2690 |
#ifdef CONFIG_LIVEPATCH |
2992ef29a
|
2691 |
static int check_modinfo_livepatch(struct module *mod, struct load_info *info) |
1ce15ef4f
|
2692 |
{ |
2992ef29a
|
2693 2694 2695 |
if (get_modinfo(info, "livepatch")) { mod->klp = true; add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); |
7598d167d
|
2696 2697 2698 |
pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH ", mod->name); |
2992ef29a
|
2699 |
} |
1ce15ef4f
|
2700 2701 2702 2703 |
return 0; } #else /* !CONFIG_LIVEPATCH */ |
2992ef29a
|
2704 |
static int check_modinfo_livepatch(struct module *mod, struct load_info *info) |
1ce15ef4f
|
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 |
{ if (get_modinfo(info, "livepatch")) { pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", mod->name); return -ENOEXEC; } return 0; } #endif /* CONFIG_LIVEPATCH */ |
caf7501a1
|
2715 2716 2717 2718 2719 2720 2721 2722 2723 |
static void check_modinfo_retpoline(struct module *mod, struct load_info *info) { if (retpoline_module_ok(get_modinfo(info, "retpoline"))) return; pr_warn("%s: loading module not compiled with retpoline compiler. ", mod->name); } |
34e1169d9
|
2724 2725 2726 |
/* Sets info->hdr and info->len. */ static int copy_module_from_user(const void __user *umod, unsigned long len, struct load_info *info) |
40dd2560e
|
2727 2728 |
{ int err; |
40dd2560e
|
2729 |
|
34e1169d9
|
2730 2731 |
info->len = len; if (info->len < sizeof(*(info->hdr))) |
40dd2560e
|
2732 |
return -ENOEXEC; |
c77b8cdf7
|
2733 |
err = security_kernel_load_data(LOADING_MODULE); |
2e72d51b4
|
2734 2735 |
if (err) return err; |
40dd2560e
|
2736 |
/* Suck in entire file: we'll want most of it. */ |
cc9e605dc
|
2737 |
info->hdr = __vmalloc(info->len, |
19809c2da
|
2738 |
GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL); |
34e1169d9
|
2739 |
if (!info->hdr) |
40dd2560e
|
2740 |
return -ENOMEM; |
3afe9f849
|
2741 |
if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { |
34e1169d9
|
2742 2743 |
vfree(info->hdr); return -EFAULT; |
40dd2560e
|
2744 |
} |
34e1169d9
|
2745 2746 |
return 0; } |
d913188c7
|
2747 2748 |
static void free_copy(struct load_info *info) { |
d913188c7
|
2749 2750 |
vfree(info->hdr); } |
2f3238aeb
|
2751 |
static int rewrite_section_headers(struct load_info *info, int flags) |
8b5f61a79
|
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 |
{ unsigned int i; /* This should always be true, but let's be sure. */ info->sechdrs[0].sh_addr = 0; for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; if (shdr->sh_type != SHT_NOBITS && info->len < shdr->sh_offset + shdr->sh_size) { |
bddb12b32
|
2762 2763 |
pr_err("Module len %lu truncated ", info->len); |
8b5f61a79
|
2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 |
return -ENOEXEC; } /* Mark all sections sh_addr with their address in the temporary image. */ shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; #ifndef CONFIG_MODULE_UNLOAD /* Don't load .exit sections */ if (strstarts(info->secstrings+shdr->sh_name, ".exit")) shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; #endif |
8b5f61a79
|
2776 |
} |
d6df72a06
|
2777 2778 |
/* Track but don't keep modinfo and version sections. */ |
3e2e857f9
|
2779 |
info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; |
d6df72a06
|
2780 |
info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; |
3e2e857f9
|
2781 |
|
8b5f61a79
|
2782 2783 |
return 0; } |
3264d3f9d
|
2784 2785 2786 2787 2788 |
/* * Set up our basic convenience variables (pointers to section headers, * search for module section index etc), and do some basic section * verification. * |
81a0abd9f
|
2789 2790 |
* Set info->mod to the temporary copy of the module in info->hdr. The final one * will be allocated in move_module(). |
3264d3f9d
|
2791 |
*/ |
81a0abd9f
|
2792 |
static int setup_load_info(struct load_info *info, int flags) |
3264d3f9d
|
2793 2794 |
{ unsigned int i; |
3264d3f9d
|
2795 2796 2797 |
/* Set up the convenience variables */ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; |
8b5f61a79
|
2798 2799 |
info->secstrings = (void *)info->hdr + info->sechdrs[info->hdr->e_shstrndx].sh_offset; |
3264d3f9d
|
2800 |
|
5fdc7db64
|
2801 2802 2803 2804 2805 2806 |
/* Try to find a name early so we can log errors with a module name */ info->index.info = find_sec(info, ".modinfo"); if (!info->index.info) info->name = "(missing .modinfo section)"; else info->name = get_modinfo(info, "name"); |
3264d3f9d
|
2807 |
|
8b5f61a79
|
2808 2809 |
/* Find internal symbols and strings. */ for (i = 1; i < info->hdr->e_shnum; i++) { |
3264d3f9d
|
2810 2811 2812 |
if (info->sechdrs[i].sh_type == SHT_SYMTAB) { info->index.sym = i; info->index.str = info->sechdrs[i].sh_link; |
8b5f61a79
|
2813 2814 2815 |
info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; break; |
3264d3f9d
|
2816 |
} |
3264d3f9d
|
2817 |
} |
5fdc7db64
|
2818 2819 2820 2821 2822 |
if (info->index.sym == 0) { pr_warn("%s: module has no symbols (stripped?) ", info->name); return -ENOEXEC; } |
49668688d
|
2823 |
info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); |
3264d3f9d
|
2824 |
if (!info->index.mod) { |
3e2e857f9
|
2825 2826 2827 |
pr_warn("%s: No module found in object ", info->name ?: "(missing .modinfo name field)"); |
81a0abd9f
|
2828 |
return -ENOEXEC; |
3264d3f9d
|
2829 2830 |
} /* This is temporary: point mod into copy of data. */ |
5fdc7db64
|
2831 |
info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; |
3264d3f9d
|
2832 |
|
3e2e857f9
|
2833 |
/* |
5fdc7db64
|
2834 |
* If we didn't load the .modinfo 'name' field earlier, fall back to |
3e2e857f9
|
2835 2836 2837 |
* on-disk struct mod 'name' field. */ if (!info->name) |
81a0abd9f
|
2838 |
info->name = info->mod->name; |
3e2e857f9
|
2839 |
|
5fdc7db64
|
2840 2841 2842 2843 |
if (flags & MODULE_INIT_IGNORE_MODVERSIONS) info->index.vers = 0; /* Pretend no __versions section! */ else info->index.vers = find_sec(info, "__versions"); |
3264d3f9d
|
2844 |
|
49668688d
|
2845 |
info->index.pcpu = find_pcpusec(info); |
3264d3f9d
|
2846 |
|
81a0abd9f
|
2847 |
return 0; |
3264d3f9d
|
2848 |
} |
2f3238aeb
|
2849 |
static int check_modinfo(struct module *mod, struct load_info *info, int flags) |
40dd2560e
|
2850 |
{ |
49668688d
|
2851 |
const char *modmagic = get_modinfo(info, "vermagic"); |
40dd2560e
|
2852 |
int err; |
2f3238aeb
|
2853 2854 |
if (flags & MODULE_INIT_IGNORE_VERMAGIC) modmagic = NULL; |
40dd2560e
|
2855 2856 2857 2858 2859 |
/* This is allowed: modprobe --force will invalidate it. */ if (!modmagic) { err = try_to_force_load(mod, "bad vermagic"); if (err) return err; |
49668688d
|
2860 |
} else if (!same_magic(modmagic, vermagic, info->index.vers)) { |
bddb12b32
|
2861 2862 |
pr_err("%s: version magic '%s' should be '%s' ", |
3e2e857f9
|
2863 |
info->name, modmagic, vermagic); |
40dd2560e
|
2864 2865 |
return -ENOEXEC; } |
3205c36cf
|
2866 2867 2868 2869 2870 |
if (!get_modinfo(info, "intree")) { if (!test_taint(TAINT_OOT_MODULE)) pr_warn("%s: loading out-of-tree module taints kernel. ", mod->name); |
373d4d099
|
2871 |
add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); |
3205c36cf
|
2872 |
} |
2449b8ba0
|
2873 |
|
caf7501a1
|
2874 |
check_modinfo_retpoline(mod, info); |
49668688d
|
2875 |
if (get_modinfo(info, "staging")) { |
373d4d099
|
2876 |
add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); |
bddb12b32
|
2877 2878 2879 |
pr_warn("%s: module is from the staging directory, the quality " "is unknown, you have been warned. ", mod->name); |
40dd2560e
|
2880 |
} |
22e268ebe
|
2881 |
|
2992ef29a
|
2882 |
err = check_modinfo_livepatch(mod, info); |
1ce15ef4f
|
2883 2884 |
if (err) return err; |
22e268ebe
|
2885 |
/* Set up license info based on the info section */ |
49668688d
|
2886 |
set_license(mod, get_modinfo(info, "license")); |
22e268ebe
|
2887 |
|
40dd2560e
|
2888 2889 |
return 0; } |
eb3057df7
|
2890 |
static int find_module_sections(struct module *mod, struct load_info *info) |
f91a13bb9
|
2891 |
{ |
49668688d
|
2892 |
mod->kp = section_objs(info, "__param", |
f91a13bb9
|
2893 |
sizeof(*mod->kp), &mod->num_kp); |
49668688d
|
2894 |
mod->syms = section_objs(info, "__ksymtab", |
f91a13bb9
|
2895 |
sizeof(*mod->syms), &mod->num_syms); |
49668688d
|
2896 2897 |
mod->crcs = section_addr(info, "__kcrctab"); mod->gpl_syms = section_objs(info, "__ksymtab_gpl", |
f91a13bb9
|
2898 2899 |
sizeof(*mod->gpl_syms), &mod->num_gpl_syms); |
49668688d
|
2900 2901 |
mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); mod->gpl_future_syms = section_objs(info, |
f91a13bb9
|
2902 2903 2904 |
"__ksymtab_gpl_future", sizeof(*mod->gpl_future_syms), &mod->num_gpl_future_syms); |
49668688d
|
2905 |
mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); |
f91a13bb9
|
2906 2907 |
#ifdef CONFIG_UNUSED_SYMBOLS |
49668688d
|
2908 |
mod->unused_syms = section_objs(info, "__ksymtab_unused", |
f91a13bb9
|
2909 2910 |
sizeof(*mod->unused_syms), &mod->num_unused_syms); |
49668688d
|
2911 2912 |
mod->unused_crcs = section_addr(info, "__kcrctab_unused"); mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", |
f91a13bb9
|
2913 2914 |
sizeof(*mod->unused_gpl_syms), &mod->num_unused_gpl_syms); |
49668688d
|
2915 |
mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); |
f91a13bb9
|
2916 2917 |
#endif #ifdef CONFIG_CONSTRUCTORS |
49668688d
|
2918 |
mod->ctors = section_objs(info, ".ctors", |
f91a13bb9
|
2919 |
sizeof(*mod->ctors), &mod->num_ctors); |
eb3057df7
|
2920 2921 2922 2923 2924 2925 2926 2927 |
if (!mod->ctors) mod->ctors = section_objs(info, ".init_array", sizeof(*mod->ctors), &mod->num_ctors); else if (find_sec(info, ".init_array")) { /* * This shouldn't happen with same compiler and binutils * building all parts of the module. */ |
6da0b5651
|
2928 2929 |
pr_warn("%s: has both .ctors and .init_array. ", |
eb3057df7
|
2930 2931 2932 |
mod->name); return -EINVAL; } |
f91a13bb9
|
2933 2934 2935 |
#endif #ifdef CONFIG_TRACEPOINTS |
654986462
|
2936 2937 2938 |
mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", sizeof(*mod->tracepoints_ptrs), &mod->num_tracepoints); |
f91a13bb9
|
2939 |
#endif |
bf5438fca
|
2940 2941 2942 2943 2944 |
#ifdef HAVE_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", sizeof(*mod->jump_entries), &mod->num_jump_entries); #endif |
f91a13bb9
|
2945 |
#ifdef CONFIG_EVENT_TRACING |
49668688d
|
2946 |
mod->trace_events = section_objs(info, "_ftrace_events", |
f91a13bb9
|
2947 2948 |
sizeof(*mod->trace_events), &mod->num_trace_events); |
99be647c5
|
2949 2950 2951 |
mod->trace_evals = section_objs(info, "_ftrace_eval_map", sizeof(*mod->trace_evals), &mod->num_trace_evals); |
f91a13bb9
|
2952 |
#endif |
13b9b6e74
|
2953 2954 2955 2956 |
#ifdef CONFIG_TRACING mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", sizeof(*mod->trace_bprintk_fmt_start), &mod->num_trace_bprintk_fmt); |
13b9b6e74
|
2957 |
#endif |
f91a13bb9
|
2958 2959 |
#ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ |
49668688d
|
2960 |
mod->ftrace_callsites = section_objs(info, "__mcount_loc", |
f91a13bb9
|
2961 2962 2963 |
sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif |
540adea38
|
2964 2965 2966 2967 |
#ifdef CONFIG_FUNCTION_ERROR_INJECTION mod->ei_funcs = section_objs(info, "_error_injection_whitelist", sizeof(*mod->ei_funcs), &mod->num_ei_funcs); |
92ace9991
|
2968 |
#endif |
811d66a0e
|
2969 2970 |
mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); |
49668688d
|
2971 |
if (section_addr(info, "__obsparm")) |
bddb12b32
|
2972 2973 |
pr_warn("%s: Ignoring obsolete parameters ", mod->name); |
811d66a0e
|
2974 2975 2976 |
info->debug = section_objs(info, "__verbose", sizeof(*info->debug), &info->num_debug); |
eb3057df7
|
2977 2978 |
return 0; |
f91a13bb9
|
2979 |
} |
49668688d
|
2980 |
static int move_module(struct module *mod, struct load_info *info) |
65b8a9b4d
|
2981 2982 2983 2984 2985 |
{ int i; void *ptr; /* Do the allocs. */ |
7523e4dc5
|
2986 |
ptr = module_alloc(mod->core_layout.size); |
65b8a9b4d
|
2987 2988 2989 2990 2991 2992 2993 |
/* * The pointer to this block is stored in the module structure * which is inside the block. Just mark it as not being a * leak. */ kmemleak_not_leak(ptr); if (!ptr) |
d913188c7
|
2994 |
return -ENOMEM; |
65b8a9b4d
|
2995 |
|
7523e4dc5
|
2996 2997 |
memset(ptr, 0, mod->core_layout.size); mod->core_layout.base = ptr; |
65b8a9b4d
|
2998 |
|
7523e4dc5
|
2999 3000 |
if (mod->init_layout.size) { ptr = module_alloc(mod->init_layout.size); |
82fab442f
|
3001 3002 3003 3004 3005 3006 3007 3008 |
/* * The pointer to this block is stored in the module structure * which is inside the block. This block doesn't need to be * scanned as it contains data and code that will be freed * after the module is initialized. */ kmemleak_ignore(ptr); if (!ptr) { |
7523e4dc5
|
3009 |
module_memfree(mod->core_layout.base); |
82fab442f
|
3010 3011 |
return -ENOMEM; } |
7523e4dc5
|
3012 3013 |
memset(ptr, 0, mod->init_layout.size); mod->init_layout.base = ptr; |
82fab442f
|
3014 |
} else |
7523e4dc5
|
3015 |
mod->init_layout.base = NULL; |
65b8a9b4d
|
3016 3017 |
/* Transfer each section which specifies SHF_ALLOC */ |
5e1241692
|
3018 3019 |
pr_debug("final section addresses: "); |
49668688d
|
3020 |
for (i = 0; i < info->hdr->e_shnum; i++) { |
65b8a9b4d
|
3021 |
void *dest; |
49668688d
|
3022 |
Elf_Shdr *shdr = &info->sechdrs[i]; |
65b8a9b4d
|
3023 |
|
49668688d
|
3024 |
if (!(shdr->sh_flags & SHF_ALLOC)) |
65b8a9b4d
|
3025 |
continue; |
49668688d
|
3026 |
if (shdr->sh_entsize & INIT_OFFSET_MASK) |
7523e4dc5
|
3027 |
dest = mod->init_layout.base |
49668688d
|
3028 |
+ (shdr->sh_entsize & ~INIT_OFFSET_MASK); |
65b8a9b4d
|
3029 |
else |
7523e4dc5
|
3030 |
dest = mod->core_layout.base + shdr->sh_entsize; |
65b8a9b4d
|
3031 |
|
49668688d
|
3032 3033 |
if (shdr->sh_type != SHT_NOBITS) memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); |
65b8a9b4d
|
3034 |
/* Update sh_addr to point to copy in image. */ |
49668688d
|
3035 |
shdr->sh_addr = (unsigned long)dest; |
5e1241692
|
3036 3037 3038 |
pr_debug("\t0x%lx %s ", (long)shdr->sh_addr, info->secstrings + shdr->sh_name); |
65b8a9b4d
|
3039 |
} |
d913188c7
|
3040 3041 |
return 0; |
65b8a9b4d
|
3042 |
} |
49668688d
|
3043 |
static int check_module_license_and_versions(struct module *mod) |
22e268ebe
|
3044 |
{ |
3205c36cf
|
3045 |
int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); |
22e268ebe
|
3046 3047 3048 3049 3050 3051 |
/* * ndiswrapper is under GPL by itself, but loads proprietary modules. * Don't use add_taint_module(), as it would prevent ndiswrapper from * using GPL-only symbols it needs. */ if (strcmp(mod->name, "ndiswrapper") == 0) |
373d4d099
|
3052 |
add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); |
22e268ebe
|
3053 3054 3055 |
/* driverloader was caught wrongly pretending to be under GPL */ if (strcmp(mod->name, "driverloader") == 0) |
373d4d099
|
3056 3057 |
add_taint_module(mod, TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); |
22e268ebe
|
3058 |
|
c99af3752
|
3059 3060 |
/* lve claims to be GPL but upstream won't provide source */ if (strcmp(mod->name, "lve") == 0) |
373d4d099
|
3061 3062 |
add_taint_module(mod, TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); |
c99af3752
|
3063 |
|
3205c36cf
|
3064 3065 3066 |
if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) pr_warn("%s: module license taints kernel. ", mod->name); |
22e268ebe
|
3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 |
#ifdef CONFIG_MODVERSIONS if ((mod->num_syms && !mod->crcs) || (mod->num_gpl_syms && !mod->gpl_crcs) || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) #ifdef CONFIG_UNUSED_SYMBOLS || (mod->num_unused_syms && !mod->unused_crcs) || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) #endif ) { return try_to_force_load(mod, "no versions for exported symbols"); } #endif return 0; } static void flush_module_icache(const struct module *mod) { mm_segment_t old_fs; /* flush the icache in correct context */ old_fs = get_fs(); set_fs(KERNEL_DS); /* * Flush the instruction cache, since we've played with text. * Do it before processing of module parameters, so the module * can provide parameter accessor functions of its own. */ |
7523e4dc5
|
3096 3097 3098 3099 3100 3101 |
if (mod->init_layout.base) flush_icache_range((unsigned long)mod->init_layout.base, (unsigned long)mod->init_layout.base + mod->init_layout.size); flush_icache_range((unsigned long)mod->core_layout.base, (unsigned long)mod->core_layout.base + mod->core_layout.size); |
22e268ebe
|
3102 3103 3104 |
set_fs(old_fs); } |
74e08fcf7
|
3105 3106 3107 3108 3109 3110 3111 |
int __weak module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { return 0; } |
be7de5f91
|
3112 3113 |
/* module_blacklist is a comma-separated list of module names */ static char *module_blacklist; |
96b5b1945
|
3114 |
static bool blacklisted(const char *module_name) |
be7de5f91
|
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 |
{ const char *p; size_t len; if (!module_blacklist) return false; for (p = module_blacklist; *p; p += len) { len = strcspn(p, ","); if (strlen(module_name) == len && !memcmp(module_name, p, len)) return true; if (p[len] == ',') len++; } return false; } core_param(module_blacklist, module_blacklist, charp, 0400); |
2f3238aeb
|
3132 |
static struct module *layout_and_allocate(struct load_info *info, int flags) |
1da177e4c
|
3133 |
{ |
1da177e4c
|
3134 |
struct module *mod; |
444d13ff1
|
3135 |
unsigned int ndx; |
d913188c7
|
3136 |
int err; |
3ae91c21d
|
3137 |
|
81a0abd9f
|
3138 |
err = check_modinfo(info->mod, info, flags); |
40dd2560e
|
3139 3140 |
if (err) return ERR_PTR(err); |
1da177e4c
|
3141 |
|
1da177e4c
|
3142 |
/* Allow arches to frob section contents and sizes. */ |
49668688d
|
3143 |
err = module_frob_arch_sections(info->hdr, info->sechdrs, |
81a0abd9f
|
3144 |
info->secstrings, info->mod); |
1da177e4c
|
3145 |
if (err < 0) |
8d8022e8a
|
3146 |
return ERR_PTR(err); |
1da177e4c
|
3147 |
|
8d8022e8a
|
3148 3149 |
/* We will do a special allocation for per-cpu sections later. */ info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; |
1da177e4c
|
3150 |
|
444d13ff1
|
3151 3152 3153 3154 3155 3156 3157 3158 |
/* * Mark ro_after_init section with SHF_RO_AFTER_INIT so that * layout_sections() can put it in the right place. * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. */ ndx = find_sec(info, ".data..ro_after_init"); if (ndx) info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; |
1da177e4c
|
3159 3160 3161 |
/* Determine total sizes, and put offsets in sh_entsize. For now this is done generically; there doesn't appear to be any special cases for the architectures. */ |
81a0abd9f
|
3162 3163 |
layout_sections(info->mod, info); layout_symtab(info->mod, info); |
1da177e4c
|
3164 |
|
65b8a9b4d
|
3165 |
/* Allocate and move to the final place */ |
81a0abd9f
|
3166 |
err = move_module(info->mod, info); |
d913188c7
|
3167 |
if (err) |
8d8022e8a
|
3168 |
return ERR_PTR(err); |
d913188c7
|
3169 3170 3171 |
/* Module has been copied to its final place now: return it. */ mod = (void *)info->sechdrs[info->index.mod].sh_addr; |
49668688d
|
3172 |
kmemleak_load_module(mod, info); |
d913188c7
|
3173 |
return mod; |
d913188c7
|
3174 3175 3176 3177 3178 |
} /* mod is no longer valid after this! */ static void module_deallocate(struct module *mod, struct load_info *info) { |
d913188c7
|
3179 |
percpu_modfree(mod); |
d453cded0
|
3180 |
module_arch_freeing_init(mod); |
7523e4dc5
|
3181 3182 |
module_memfree(mod->init_layout.base); module_memfree(mod->core_layout.base); |
d913188c7
|
3183 |
} |
74e08fcf7
|
3184 3185 3186 3187 3188 3189 |
int __weak module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { return 0; } |
811d66a0e
|
3190 3191 |
static int post_relocation(struct module *mod, const struct load_info *info) { |
51f3d0f47
|
3192 |
/* Sort exception table now relocations are done. */ |
811d66a0e
|
3193 3194 3195 3196 3197 |
sort_extable(mod->extable, mod->extable + mod->num_exentries); /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); |
51f3d0f47
|
3198 |
/* Setup kallsyms-specific fields. */ |
811d66a0e
|
3199 3200 3201 3202 3203 |
add_kallsyms(mod, info); /* Arch-specific module finalizing. */ return module_finalize(info->hdr, info->sechdrs, mod); } |
9bb9c3be5
|
3204 3205 3206 3207 3208 |
/* Is this module of this name done loading? No locks held. */ static bool finished_loading(const char *name) { struct module *mod; bool ret; |
9cc019b8c
|
3209 3210 3211 3212 3213 3214 |
/* * The module_mutex should not be a heavily contended lock; * if we get the occasional sleep here, we'll go an extra iteration * in the wait_event_interruptible(), which is harmless. */ sched_annotate_sleep(); |
9bb9c3be5
|
3215 |
mutex_lock(&module_mutex); |
4f6de4d51
|
3216 |
mod = find_module_all(name, strlen(name), true); |
0d21b0e34
|
3217 3218 |
ret = !mod || mod->state == MODULE_STATE_LIVE || mod->state == MODULE_STATE_GOING; |
9bb9c3be5
|
3219 3220 3221 3222 |
mutex_unlock(&module_mutex); return ret; } |
34e1169d9
|
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 |
/* Call module constructors. */ static void do_mod_ctors(struct module *mod) { #ifdef CONFIG_CONSTRUCTORS unsigned long i; for (i = 0; i < mod->num_ctors; i++) mod->ctors[i](); #endif } |
c74963790
|
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 |
/* For freeing module_init on success, in case kallsyms traversing */ struct mod_initfree { struct rcu_head rcu; void *module_init; }; static void do_free_init(struct rcu_head *head) { struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); module_memfree(m->module_init); kfree(m); } |
be02a1862
|
3245 3246 3247 3248 3249 3250 3251 |
/* * This is where the real work happens. * * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb * helper command 'lx-symbols'. */ static noinline int do_init_module(struct module *mod) |
34e1169d9
|
3252 3253 |
{ int ret = 0; |
c74963790
|
3254 3255 3256 3257 3258 3259 3260 |
struct mod_initfree *freeinit; freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); if (!freeinit) { ret = -ENOMEM; goto fail; } |
7523e4dc5
|
3261 |
freeinit->module_init = mod->init_layout.base; |
34e1169d9
|
3262 |
|
774a1221e
|
3263 3264 3265 3266 3267 |
/* * We want to find out whether @mod uses async during init. Clear * PF_USED_ASYNC. async_schedule*() will set it. */ current->flags &= ~PF_USED_ASYNC; |
34e1169d9
|
3268 3269 3270 3271 3272 |
do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) ret = do_one_initcall(mod->init); if (ret < 0) { |
c74963790
|
3273 |
goto fail_free_freeinit; |
34e1169d9
|
3274 3275 |
} if (ret > 0) { |
bddb12b32
|
3276 3277 3278 3279 3280 3281 |
pr_warn("%s: '%s'->init suspiciously returned %d, it should " "follow 0/-E convention " "%s: loading module anyway... ", __func__, mod->name, ret, __func__); |
34e1169d9
|
3282 3283 3284 3285 3286 3287 3288 |
dump_stack(); } /* Now it's a first class citizen! */ mod->state = MODULE_STATE_LIVE; blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); |
774a1221e
|
3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 |
/* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock. For example, a newly * detected block device can trigger request_module() of the * default iosched from async probing task. Once userland helper * reaches here, async_synchronize_full() will wait on the async * task waiting on request_module() and deadlock. * * This deadlock is avoided by perfomring async_synchronize_full() * iff module init queued any async jobs. This isn't a full * solution as it will deadlock the same if module loading from * async jobs nests more than once; however, due to the various * constraints, this hack seems to be the best option for now. * Please refer to the following thread for details. * * http://thread.gmane.org/gmane.linux.kernel/1420814 */ |
f2411da74
|
3306 |
if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) |
774a1221e
|
3307 |
async_synchronize_full(); |
34e1169d9
|
3308 |
|
aba4b5c22
|
3309 |
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + |
3e234289f
|
3310 |
mod->init_layout.size); |
34e1169d9
|
3311 3312 3313 3314 3315 |
mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); trim_init_extable(mod); #ifdef CONFIG_KALLSYMS |
8244062ef
|
3316 3317 |
/* Switch to core kallsyms now init is done: kallsyms may be walking! */ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); |
34e1169d9
|
3318 |
#endif |
444d13ff1
|
3319 |
module_enable_ro(mod, true); |
93c2e105f
|
3320 |
mod_tree_remove_init(mod); |
85c898db6
|
3321 |
disable_ro_nx(&mod->init_layout); |
d453cded0
|
3322 |
module_arch_freeing_init(mod); |
7523e4dc5
|
3323 3324 3325 |
mod->init_layout.base = NULL; mod->init_layout.size = 0; mod->init_layout.ro_size = 0; |
444d13ff1
|
3326 |
mod->init_layout.ro_after_init_size = 0; |
7523e4dc5
|
3327 |
mod->init_layout.text_size = 0; |
c74963790
|
3328 3329 |
/* * We want to free module_init, but be aware that kallsyms may be |
0be964be0
|
3330 3331 3332 |
* walking this with preempt disabled. In all the failure paths, we * call synchronize_sched(), but we don't want to slow down the success * path, so use actual RCU here. |
ae646f0b9
|
3333 3334 3335 3336 3337 |
* Note that module_alloc() on most architectures creates W+X page * mappings which won't be cleaned up until do_free_init() runs. Any * code such as mark_rodata_ro() which depends on those mappings to * be cleaned up needs to sync with the queued work - ie * rcu_barrier_sched() |
c74963790
|
3338 |
*/ |
0be964be0
|
3339 |
call_rcu_sched(&freeinit->rcu, do_free_init); |
34e1169d9
|
3340 3341 3342 3343 |
mutex_unlock(&module_mutex); wake_up_all(&module_wq); return 0; |
c74963790
|
3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 |
fail_free_freeinit: kfree(freeinit); fail: /* Try to protect us from buggy refcounters. */ mod->state = MODULE_STATE_GOING; synchronize_sched(); module_put(mod); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); |
7e545d6ec
|
3354 |
klp_module_going(mod); |
7dcd182be
|
3355 |
ftrace_release_mod(mod); |
c74963790
|
3356 3357 3358 |
free_module(mod); wake_up_all(&module_wq); return ret; |
34e1169d9
|
3359 3360 3361 3362 3363 3364 3365 3366 3367 |
} static int may_init_module(void) { if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; return 0; } |
a3535c7e4
|
3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 |
/* * We try to place it in the list now to make sure it's unique before * we dedicate too many resources. In particular, temporary percpu * memory exhaustion. */ static int add_unformed_module(struct module *mod) { int err; struct module *old; mod->state = MODULE_STATE_UNFORMED; again: mutex_lock(&module_mutex); |
4f6de4d51
|
3382 3383 |
old = find_module_all(mod->name, strlen(mod->name), true); if (old != NULL) { |
a3535c7e4
|
3384 3385 3386 3387 |
if (old->state == MODULE_STATE_COMING || old->state == MODULE_STATE_UNFORMED) { /* Wait in case it fails to load. */ mutex_unlock(&module_mutex); |
9cc019b8c
|
3388 3389 |
err = wait_event_interruptible(module_wq, finished_loading(mod->name)); |
a3535c7e4
|
3390 3391 3392 3393 3394 3395 3396 |
if (err) goto out_unlocked; goto again; } err = -EEXIST; goto out; } |
4f666546d
|
3397 |
mod_update_bounds(mod); |
a3535c7e4
|
3398 |
list_add_rcu(&mod->list, &modules); |
93c2e105f
|
3399 |
mod_tree_insert(mod); |
a3535c7e4
|
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 |
err = 0; out: mutex_unlock(&module_mutex); out_unlocked: return err; } static int complete_formation(struct module *mod, struct load_info *info) { int err; mutex_lock(&module_mutex); /* Find duplicate symbols (must be called under lock). */ err = verify_export_symbols(mod); if (err < 0) goto out; /* This relies on module_mutex for list integrity. */ module_bug_finalize(info->hdr, info->sechdrs, mod); |
444d13ff1
|
3421 |
module_enable_ro(mod, false); |
85c898db6
|
3422 |
module_enable_nx(mod); |
4982223e5
|
3423 |
|
a3535c7e4
|
3424 3425 3426 |
/* Mark state as coming so strong_try_module_get() ignores us, * but kallsyms etc. can see us. */ mod->state = MODULE_STATE_COMING; |
4982223e5
|
3427 |
mutex_unlock(&module_mutex); |
4982223e5
|
3428 |
return 0; |
a3535c7e4
|
3429 3430 3431 3432 3433 |
out: mutex_unlock(&module_mutex); return err; } |
4c973d162
|
3434 3435 |
static int prepare_coming_module(struct module *mod) { |
7e545d6ec
|
3436 |
int err; |
4c973d162
|
3437 |
ftrace_module_enable(mod); |
7e545d6ec
|
3438 3439 3440 |
err = klp_module_coming(mod); if (err) return err; |
4c973d162
|
3441 3442 3443 3444 |
blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); return 0; } |
ecc861705
|
3445 3446 |
static int unknown_module_param_cb(char *param, char *val, const char *modname, void *arg) |
54041d8a7
|
3447 |
{ |
f2411da74
|
3448 3449 3450 3451 3452 3453 3454 |
struct module *mod = arg; int ret; if (strcmp(param, "async_probe") == 0) { mod->async_probe_requested = true; return 0; } |
6da0b5651
|
3455 |
/* Check for magic 'dyndbg' arg */ |
f2411da74
|
3456 |
ret = ddebug_dyndbg_module_param_cb(param, val, modname); |
bddb12b32
|
3457 3458 3459 |
if (ret != 0) pr_warn("%s: unknown parameter '%s' ignored ", modname, param); |
54041d8a7
|
3460 3461 |
return 0; } |
d913188c7
|
3462 3463 |
/* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ |
2f3238aeb
|
3464 3465 |
static int load_module(struct load_info *info, const char __user *uargs, int flags) |
d913188c7
|
3466 |
{ |
a3535c7e4
|
3467 |
struct module *mod; |
5fdc7db64
|
3468 |
long err = 0; |
51e158c12
|
3469 |
char *after_dashes; |
d913188c7
|
3470 |
|
5fdc7db64
|
3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 |
err = elf_header_check(info); if (err) goto free_copy; err = setup_load_info(info, flags); if (err) goto free_copy; if (blacklisted(info->name)) { err = -EPERM; goto free_copy; } |
bca014caa
|
3483 |
err = module_sig_check(info, flags); |
34e1169d9
|
3484 3485 |
if (err) goto free_copy; |
d913188c7
|
3486 |
|
5fdc7db64
|
3487 |
err = rewrite_section_headers(info, flags); |
d913188c7
|
3488 |
if (err) |
34e1169d9
|
3489 |
goto free_copy; |
d913188c7
|
3490 |
|
5fdc7db64
|
3491 3492 3493 3494 3495 |
/* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(info, info->mod)) { err = -ENOEXEC; goto free_copy; } |
d913188c7
|
3496 |
/* Figure out module layout, and allocate all the memory. */ |
2f3238aeb
|
3497 |
mod = layout_and_allocate(info, flags); |
65b8a9b4d
|
3498 3499 |
if (IS_ERR(mod)) { err = PTR_ERR(mod); |
d913188c7
|
3500 |
goto free_copy; |
1da177e4c
|
3501 |
} |
1da177e4c
|
3502 |
|
ca86cad73
|
3503 |
audit_log_kern_module(mod->name); |
a3535c7e4
|
3504 3505 3506 |
/* Reserve our place in the list. */ err = add_unformed_module(mod); if (err) |
1fb9341ac
|
3507 |
goto free_module; |
1fb9341ac
|
3508 |
|
106a4ee25
|
3509 |
#ifdef CONFIG_MODULE_SIG |
34e1169d9
|
3510 |
mod->sig_ok = info->sig_ok; |
64748a2c9
|
3511 |
if (!mod->sig_ok) { |
bddb12b32
|
3512 |
pr_notice_once("%s: module verification failed: signature " |
ab92ebbb8
|
3513 |
"and/or required key missing - tainting " |
bddb12b32
|
3514 3515 |
"kernel ", mod->name); |
66cc69e34
|
3516 |
add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); |
64748a2c9
|
3517 |
} |
106a4ee25
|
3518 |
#endif |
8d8022e8a
|
3519 |
/* To avoid stressing percpu allocator, do this once we're unique. */ |
9eb76d779
|
3520 |
err = percpu_modalloc(mod, info); |
8d8022e8a
|
3521 3522 |
if (err) goto unlink_mod; |
49668688d
|
3523 |
/* Now module is in final location, initialize linked lists, etc. */ |
9f85a4bbb
|
3524 3525 |
err = module_unload_init(mod); if (err) |
1fb9341ac
|
3526 |
goto unlink_mod; |
1da177e4c
|
3527 |
|
cf2fde7b3
|
3528 |
init_param_lock(mod); |
b51d23e4e
|
3529 |
|
22e268ebe
|
3530 3531 |
/* Now we've got everything in the final locations, we can * find optional sections. */ |
eb3057df7
|
3532 3533 3534 |
err = find_module_sections(mod, info); if (err) goto free_unload; |
9b37ccfc6
|
3535 |
|
49668688d
|
3536 |
err = check_module_license_and_versions(mod); |
22e268ebe
|
3537 3538 |
if (err) goto free_unload; |
9841d61d7
|
3539 |
|
c988d2b28
|
3540 |
/* Set up MODINFO_ATTR fields */ |
34e1169d9
|
3541 |
setup_modinfo(mod, info); |
c988d2b28
|
3542 |
|
1da177e4c
|
3543 |
/* Fix up syms, so that st_value is a pointer to location. */ |
34e1169d9
|
3544 |
err = simplify_symbols(mod, info); |
1da177e4c
|
3545 |
if (err < 0) |
d913188c7
|
3546 |
goto free_modinfo; |
1da177e4c
|
3547 |
|
34e1169d9
|
3548 |
err = apply_relocations(mod, info); |
22e268ebe
|
3549 |
if (err < 0) |
d913188c7
|
3550 |
goto free_modinfo; |
1da177e4c
|
3551 |
|
34e1169d9
|
3552 |
err = post_relocation(mod, info); |
1da177e4c
|
3553 |
if (err < 0) |
d913188c7
|
3554 |
goto free_modinfo; |
1da177e4c
|
3555 |
|
22e268ebe
|
3556 |
flush_module_icache(mod); |
378bac820
|
3557 |
|
6526c534b
|
3558 3559 3560 3561 3562 3563 |
/* Now copy in args */ mod->args = strndup_user(uargs, ~0UL >> 1); if (IS_ERR(mod->args)) { err = PTR_ERR(mod->args); goto free_arch_cleanup; } |
8d3b33f67
|
3564 |
|
527963127
|
3565 |
dynamic_debug_setup(mod, info->debug, info->num_debug); |
ff49d74ad
|
3566 |
|
a949ae560
|
3567 3568 |
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ ftrace_module_init(mod); |
a3535c7e4
|
3569 3570 3571 |
/* Finally it's fully formed, ready to start executing. */ err = complete_formation(mod, info); if (err) |
1fb9341ac
|
3572 |
goto ddebug_cleanup; |
be593f4ce
|
3573 |
|
4c973d162
|
3574 3575 3576 |
err = prepare_coming_module(mod); if (err) goto bug_cleanup; |
51f3d0f47
|
3577 |
/* Module is ready to execute: parsing args may do that. */ |
51e158c12
|
3578 |
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, |
4355efbd8
|
3579 |
-32768, 32767, mod, |
ecc861705
|
3580 |
unknown_module_param_cb); |
51e158c12
|
3581 3582 |
if (IS_ERR(after_dashes)) { err = PTR_ERR(after_dashes); |
4c973d162
|
3583 |
goto coming_cleanup; |
51e158c12
|
3584 3585 3586 3587 3588 |
} else if (after_dashes) { pr_warn("%s: parameters '%s' after `--' ignored ", mod->name, after_dashes); } |
1da177e4c
|
3589 |
|
ca86cad73
|
3590 |
/* Link in to sysfs. */ |
34e1169d9
|
3591 |
err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); |
1da177e4c
|
3592 |
if (err < 0) |
4c973d162
|
3593 |
goto coming_cleanup; |
80a3d1bb4
|
3594 |
|
1ce15ef4f
|
3595 3596 3597 3598 3599 |
if (is_livepatch_module(mod)) { err = copy_module_elf(mod, info); if (err < 0) goto sysfs_cleanup; } |
48fd11880
|
3600 |
/* Get rid of temporary copy. */ |
34e1169d9
|
3601 |
free_copy(info); |
1da177e4c
|
3602 3603 |
/* Done! */ |
51f3d0f47
|
3604 |
trace_module_load(mod); |
34e1169d9
|
3605 3606 |
return do_init_module(mod); |
1da177e4c
|
3607 |
|
1ce15ef4f
|
3608 3609 |
sysfs_cleanup: mod_sysfs_teardown(mod); |
4c973d162
|
3610 |
coming_cleanup: |
885a78d4a
|
3611 |
mod->state = MODULE_STATE_GOING; |
a5544880a
|
3612 |
destroy_params(mod->kp, mod->num_kp); |
4c973d162
|
3613 3614 |
blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); |
7e545d6ec
|
3615 |
klp_module_going(mod); |
1fb9341ac
|
3616 3617 |
bug_cleanup: /* module_bug_cleanup needs module_mutex protection */ |
75676500f
|
3618 |
mutex_lock(&module_mutex); |
5336377d6
|
3619 |
module_bug_cleanup(mod); |
ee61abb32
|
3620 |
mutex_unlock(&module_mutex); |
ff7e0055b
|
3621 3622 |
/* we can't deallocate the module until we clear memory protection */ |
85c898db6
|
3623 3624 |
module_disable_ro(mod); module_disable_nx(mod); |
ff7e0055b
|
3625 |
|
a3535c7e4
|
3626 |
ddebug_cleanup: |
1323eac7f
|
3627 |
ftrace_release_mod(mod); |
527963127
|
3628 |
dynamic_debug_remove(mod, info->debug); |
e91defa26
|
3629 |
synchronize_sched(); |
6526c534b
|
3630 3631 |
kfree(mod->args); free_arch_cleanup: |
1da177e4c
|
3632 |
module_arch_cleanup(mod); |
d913188c7
|
3633 |
free_modinfo: |
a263f7763
|
3634 |
free_modinfo(mod); |
22e268ebe
|
3635 |
free_unload: |
1da177e4c
|
3636 |
module_unload_free(mod); |
1fb9341ac
|
3637 3638 3639 3640 |
unlink_mod: mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); |
758556bdc
|
3641 |
mod_tree_remove(mod); |
1fb9341ac
|
3642 |
wake_up_all(&module_wq); |
0be964be0
|
3643 3644 |
/* Wait for RCU-sched synchronizing before releasing mod->list. */ synchronize_sched(); |
1fb9341ac
|
3645 |
mutex_unlock(&module_mutex); |
d913188c7
|
3646 |
free_module: |
35a9393c9
|
3647 |
/* Free lock-classes; relies on the preceding sync_rcu() */ |
7523e4dc5
|
3648 |
lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); |
35a9393c9
|
3649 |
|
34e1169d9
|
3650 |
module_deallocate(mod, info); |
d913188c7
|
3651 |
free_copy: |
34e1169d9
|
3652 3653 |
free_copy(info); return err; |
b99b87f70
|
3654 |
} |
17da2bd90
|
3655 3656 |
SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) |
1da177e4c
|
3657 |
{ |
34e1169d9
|
3658 3659 |
int err; struct load_info info = { }; |
1da177e4c
|
3660 |
|
34e1169d9
|
3661 3662 3663 |
err = may_init_module(); if (err) return err; |
1da177e4c
|
3664 |
|
34e1169d9
|
3665 3666 3667 |
pr_debug("init_module: umod=%p, len=%lu, uargs=%p ", umod, len, uargs); |
1da177e4c
|
3668 |
|
34e1169d9
|
3669 3670 3671 |
err = copy_module_from_user(umod, len, &info); if (err) return err; |
1da177e4c
|
3672 |
|
2f3238aeb
|
3673 |
return load_module(&info, uargs, 0); |
34e1169d9
|
3674 |
} |
94462ad3b
|
3675 |
|
2f3238aeb
|
3676 |
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) |
34e1169d9
|
3677 |
{ |
34e1169d9
|
3678 |
struct load_info info = { }; |
a1db74209
|
3679 3680 3681 |
loff_t size; void *hdr; int err; |
94462ad3b
|
3682 |
|
34e1169d9
|
3683 3684 3685 |
err = may_init_module(); if (err) return err; |
1da177e4c
|
3686 |
|
2f3238aeb
|
3687 3688 |
pr_debug("finit_module: fd=%d, uargs=%p, flags=%i ", fd, uargs, flags); |
6c5db22d2
|
3689 |
|
2f3238aeb
|
3690 3691 3692 |
if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS |MODULE_INIT_IGNORE_VERMAGIC)) return -EINVAL; |
d6de2c80e
|
3693 |
|
a1db74209
|
3694 3695 |
err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX, READING_MODULE); |
34e1169d9
|
3696 3697 |
if (err) return err; |
a1db74209
|
3698 3699 |
info.hdr = hdr; info.len = size; |
1da177e4c
|
3700 |
|
2f3238aeb
|
3701 |
return load_module(&info, uargs, flags); |
1da177e4c
|
3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 |
} static inline int within(unsigned long addr, void *start, unsigned long size) { return ((void *)addr >= start && (void *)addr < start + size); } #ifdef CONFIG_KALLSYMS /* * This ignores the intensely annoying "mapping symbols" found * in ARM ELF files: $a, $t and $d. */ static inline int is_arm_mapping_symbol(const char *str) { |
2e3a10a15
|
3716 3717 |
if (str[0] == '.' && str[1] == 'L') return true; |
6c34f1f54
|
3718 |
return str[0] == '$' && strchr("axtd", str[1]) |
1da177e4c
|
3719 3720 |
&& (str[2] == '\0' || str[2] == '.'); } |
8244062ef
|
3721 |
static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) |
2e7bac536
|
3722 |
{ |
8244062ef
|
3723 |
return kallsyms->strtab + kallsyms->symtab[symnum].st_name; |
2e7bac536
|
3724 |
} |
1da177e4c
|
3725 3726 3727 3728 3729 3730 3731 |
static const char *get_ksymbol(struct module *mod, unsigned long addr, unsigned long *size, unsigned long *offset) { unsigned int i, best = 0; unsigned long nextval; |
8244062ef
|
3732 |
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); |
1da177e4c
|
3733 3734 |
/* At worse, next value is at end of module */ |
a06f6211e
|
3735 |
if (within_module_init(addr, mod)) |
7523e4dc5
|
3736 |
nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; |
22a8bdeb5
|
3737 |
else |
7523e4dc5
|
3738 |
nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; |
1da177e4c
|
3739 |
|
25985edce
|
3740 |
/* Scan for closest preceding symbol, and next symbol. (ELF |
22a8bdeb5
|
3741 |
starts real symbols at 1). */ |
8244062ef
|
3742 3743 |
for (i = 1; i < kallsyms->num_symtab; i++) { if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) |
1da177e4c
|
3744 3745 3746 3747 |
continue; /* We ignore unnamed symbols: they're uninformative * and inserted at a whim. */ |
8244062ef
|
3748 3749 |
if (*symname(kallsyms, i) == '\0' || is_arm_mapping_symbol(symname(kallsyms, i))) |
2e7bac536
|
3750 |
continue; |
8244062ef
|
3751 3752 |
if (kallsyms->symtab[i].st_value <= addr && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) |
1da177e4c
|
3753 |
best = i; |
8244062ef
|
3754 3755 3756 |
if (kallsyms->symtab[i].st_value > addr && kallsyms->symtab[i].st_value < nextval) nextval = kallsyms->symtab[i].st_value; |
1da177e4c
|
3757 3758 3759 3760 |
} if (!best) return NULL; |
ffb451227
|
3761 |
if (size) |
8244062ef
|
3762 |
*size = nextval - kallsyms->symtab[best].st_value; |
ffb451227
|
3763 |
if (offset) |
8244062ef
|
3764 3765 |
*offset = addr - kallsyms->symtab[best].st_value; return symname(kallsyms, best); |
1da177e4c
|
3766 |
} |
b865ea643
|
3767 3768 3769 3770 3771 |
void * __weak dereference_module_function_descriptor(struct module *mod, void *ptr) { return ptr; } |
6dd06c9fb
|
3772 3773 |
/* For kallsyms to ask for address resolution. NULL means not found. Careful * not to lock to avoid deadlock on oopses, simply disable preemption. */ |
92dfc9dc7
|
3774 |
const char *module_address_lookup(unsigned long addr, |
6dd06c9fb
|
3775 3776 3777 3778 |
unsigned long *size, unsigned long *offset, char **modname, char *namebuf) |
1da177e4c
|
3779 |
{ |
cb2a52052
|
3780 |
const char *ret = NULL; |
b7df4d1b2
|
3781 |
struct module *mod; |
1da177e4c
|
3782 |
|
cb2a52052
|
3783 |
preempt_disable(); |
b7df4d1b2
|
3784 3785 3786 3787 3788 |
mod = __module_address(addr); if (mod) { if (modname) *modname = mod->name; ret = get_ksymbol(mod, addr, size, offset); |
1da177e4c
|
3789 |
} |
6dd06c9fb
|
3790 3791 3792 3793 3794 |
/* Make a copy in here where it's safe */ if (ret) { strncpy(namebuf, ret, KSYM_NAME_LEN - 1); ret = namebuf; } |
cb2a52052
|
3795 |
preempt_enable(); |
b7df4d1b2
|
3796 |
|
92dfc9dc7
|
3797 |
return ret; |
1da177e4c
|
3798 |
} |
9d65cb4a1
|
3799 3800 3801 |
int lookup_module_symbol_name(unsigned long addr, char *symname) { struct module *mod; |
cb2a52052
|
3802 |
preempt_disable(); |
d72b37513
|
3803 |
list_for_each_entry_rcu(mod, &modules, list) { |
0d21b0e34
|
3804 3805 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
9b20a352d
|
3806 |
if (within_module(addr, mod)) { |
9d65cb4a1
|
3807 3808 3809 3810 3811 |
const char *sym; sym = get_ksymbol(mod, addr, NULL, NULL); if (!sym) goto out; |
9281acea6
|
3812 |
strlcpy(symname, sym, KSYM_NAME_LEN); |
cb2a52052
|
3813 |
preempt_enable(); |
9d65cb4a1
|
3814 3815 3816 3817 |
return 0; } } out: |
cb2a52052
|
3818 |
preempt_enable(); |
9d65cb4a1
|
3819 3820 |
return -ERANGE; } |
a5c43dae7
|
3821 3822 3823 3824 |
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) { struct module *mod; |
cb2a52052
|
3825 |
preempt_disable(); |
d72b37513
|
3826 |
list_for_each_entry_rcu(mod, &modules, list) { |
0d21b0e34
|
3827 3828 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
9b20a352d
|
3829 |
if (within_module(addr, mod)) { |
a5c43dae7
|
3830 3831 3832 3833 3834 3835 |
const char *sym; sym = get_ksymbol(mod, addr, size, offset); if (!sym) goto out; if (modname) |
9281acea6
|
3836 |
strlcpy(modname, mod->name, MODULE_NAME_LEN); |
a5c43dae7
|
3837 |
if (name) |
9281acea6
|
3838 |
strlcpy(name, sym, KSYM_NAME_LEN); |
cb2a52052
|
3839 |
preempt_enable(); |
a5c43dae7
|
3840 3841 3842 3843 |
return 0; } } out: |
cb2a52052
|
3844 |
preempt_enable(); |
a5c43dae7
|
3845 3846 |
return -ERANGE; } |
ea07890a6
|
3847 3848 |
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) |
1da177e4c
|
3849 3850 |
{ struct module *mod; |
cb2a52052
|
3851 |
preempt_disable(); |
d72b37513
|
3852 |
list_for_each_entry_rcu(mod, &modules, list) { |
8244062ef
|
3853 |
struct mod_kallsyms *kallsyms; |
0d21b0e34
|
3854 3855 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
8244062ef
|
3856 3857 3858 3859 3860 |
kallsyms = rcu_dereference_sched(mod->kallsyms); if (symnum < kallsyms->num_symtab) { *value = kallsyms->symtab[symnum].st_value; *type = kallsyms->symtab[symnum].st_info; strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); |
9281acea6
|
3861 |
strlcpy(module_name, mod->name, MODULE_NAME_LEN); |
ca4787b77
|
3862 |
*exported = is_exported(name, *value, mod); |
cb2a52052
|
3863 |
preempt_enable(); |
ea07890a6
|
3864 |
return 0; |
1da177e4c
|
3865 |
} |
8244062ef
|
3866 |
symnum -= kallsyms->num_symtab; |
1da177e4c
|
3867 |
} |
cb2a52052
|
3868 |
preempt_enable(); |
ea07890a6
|
3869 |
return -ERANGE; |
1da177e4c
|
3870 3871 3872 3873 3874 |
} static unsigned long mod_find_symname(struct module *mod, const char *name) { unsigned int i; |
8244062ef
|
3875 |
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); |
1da177e4c
|
3876 |
|
8244062ef
|
3877 3878 |
for (i = 0; i < kallsyms->num_symtab; i++) if (strcmp(name, symname(kallsyms, i)) == 0 && |
9f2d1e68c
|
3879 |
kallsyms->symtab[i].st_shndx != SHN_UNDEF) |
8244062ef
|
3880 |
return kallsyms->symtab[i].st_value; |
1da177e4c
|
3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 |
return 0; } /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name) { struct module *mod; char *colon; unsigned long ret = 0; /* Don't lock: we're in enough trouble already. */ |
cb2a52052
|
3892 |
preempt_disable(); |
175861882
|
3893 |
if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { |
4f6de4d51
|
3894 |
if ((mod = find_module_all(name, colon - name, false)) != NULL) |
1da177e4c
|
3895 |
ret = mod_find_symname(mod, colon+1); |
1da177e4c
|
3896 |
} else { |
0d21b0e34
|
3897 3898 3899 |
list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; |
1da177e4c
|
3900 3901 |
if ((ret = mod_find_symname(mod, name)) != 0) break; |
0d21b0e34
|
3902 |
} |
1da177e4c
|
3903 |
} |
cb2a52052
|
3904 |
preempt_enable(); |
1da177e4c
|
3905 3906 |
return ret; } |
75a66614d
|
3907 3908 3909 3910 3911 3912 3913 3914 |
int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data) { struct module *mod; unsigned int i; int ret; |
0be964be0
|
3915 |
module_assert_mutex(); |
75a66614d
|
3916 |
list_for_each_entry(mod, &modules, list) { |
8244062ef
|
3917 3918 |
/* We hold module_mutex: no need for rcu_dereference_sched */ struct mod_kallsyms *kallsyms = mod->kallsyms; |
0d21b0e34
|
3919 3920 |
if (mod->state == MODULE_STATE_UNFORMED) continue; |
8244062ef
|
3921 |
for (i = 0; i < kallsyms->num_symtab; i++) { |
9f2d1e68c
|
3922 3923 3924 |
if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) continue; |
8244062ef
|
3925 3926 |
ret = fn(data, symname(kallsyms, i), mod, kallsyms->symtab[i].st_value); |
75a66614d
|
3927 3928 3929 3930 3931 3932 |
if (ret != 0) return ret; } } return 0; } |
1da177e4c
|
3933 |
#endif /* CONFIG_KALLSYMS */ |
7fd8329ba
|
3934 3935 3936 3937 |
/* Maximum number of characters written by module_flags() */ #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ |
21aa9280b
|
3938 |
static char *module_flags(struct module *mod, char *buf) |
fa3ba2e81
|
3939 3940 |
{ int bx = 0; |
0d21b0e34
|
3941 |
BUG_ON(mod->state == MODULE_STATE_UNFORMED); |
21aa9280b
|
3942 3943 3944 |
if (mod->taints || mod->state == MODULE_STATE_GOING || mod->state == MODULE_STATE_COMING) { |
fa3ba2e81
|
3945 |
buf[bx++] = '('; |
cca3e7073
|
3946 |
bx += module_flags_taint(mod, buf + bx); |
21aa9280b
|
3947 3948 3949 3950 3951 3952 |
/* Show a - for module-is-being-unloaded */ if (mod->state == MODULE_STATE_GOING) buf[bx++] = '-'; /* Show a + for module-is-being-loaded */ if (mod->state == MODULE_STATE_COMING) buf[bx++] = '+'; |
fa3ba2e81
|
3953 3954 3955 3956 3957 3958 |
buf[bx++] = ')'; } buf[bx] = '\0'; return buf; } |
3b5d5c6b0
|
3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 |
#ifdef CONFIG_PROC_FS /* Called by the /proc file system to return a list of modules. */ static void *m_start(struct seq_file *m, loff_t *pos) { mutex_lock(&module_mutex); return seq_list_start(&modules, *pos); } static void *m_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &modules, pos); } static void m_stop(struct seq_file *m, void *p) { mutex_unlock(&module_mutex); } |
1da177e4c
|
3976 3977 3978 |
static int m_show(struct seq_file *m, void *p) { struct module *mod = list_entry(p, struct module, list); |
7fd8329ba
|
3979 |
char buf[MODULE_FLAGS_BUF_SIZE]; |
668533dc0
|
3980 |
void *value; |
fa3ba2e81
|
3981 |
|
0d21b0e34
|
3982 3983 3984 |
/* We always ignore unformed modules. */ if (mod->state == MODULE_STATE_UNFORMED) return 0; |
2f0f2a334
|
3985 |
seq_printf(m, "%s %u", |
7523e4dc5
|
3986 |
mod->name, mod->init_layout.size + mod->core_layout.size); |
1da177e4c
|
3987 3988 3989 3990 |
print_unload_info(m, mod); /* Informative for users. */ seq_printf(m, " %s", |
6da0b5651
|
3991 3992 |
mod->state == MODULE_STATE_GOING ? "Unloading" : mod->state == MODULE_STATE_COMING ? "Loading" : |
1da177e4c
|
3993 3994 |
"Live"); /* Used by oprofile and other similar tools. */ |
668533dc0
|
3995 3996 |
value = m->private ? NULL : mod->core_layout.base; seq_printf(m, " 0x%px", value); |
1da177e4c
|
3997 |
|
fa3ba2e81
|
3998 3999 |
/* Taints info */ if (mod->taints) |
21aa9280b
|
4000 |
seq_printf(m, " %s", module_flags(mod, buf)); |
fa3ba2e81
|
4001 |
|
6da0b5651
|
4002 4003 |
seq_puts(m, " "); |
1da177e4c
|
4004 4005 4006 4007 4008 4009 4010 4011 |
return 0; } /* Format: modulename size refcount deps address Where refcount is a number or -, and deps is a comma-separated list of depends or -. */ |
3b5d5c6b0
|
4012 |
static const struct seq_operations modules_op = { |
1da177e4c
|
4013 4014 4015 4016 4017 |
.start = m_start, .next = m_next, .stop = m_stop, .show = m_show }; |
516fb7f2e
|
4018 4019 4020 4021 4022 4023 4024 |
/* * This also sets the "private" pointer to non-NULL if the * kernel pointers should be hidden (so you can just test * "m->private" to see if you should keep the values private). * * We use the same logic as for /proc/kallsyms. */ |
3b5d5c6b0
|
4025 4026 |
static int modules_open(struct inode *inode, struct file *file) { |
516fb7f2e
|
4027 4028 4029 4030 4031 4032 |
int err = seq_open(file, &modules_op); if (!err) { struct seq_file *m = file->private_data; m->private = kallsyms_show_value() ? NULL : (void *)8ul; } |
3f553b308
|
4033 |
return err; |
3b5d5c6b0
|
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 |
} static const struct file_operations proc_modules_operations = { .open = modules_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_modules_init(void) { proc_create("modules", 0, NULL, &proc_modules_operations); return 0; } module_init(proc_modules_init); #endif |
1da177e4c
|
4050 4051 4052 |
/* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr) { |
1da177e4c
|
4053 4054 |
const struct exception_table_entry *e = NULL; struct module *mod; |
24da1cbff
|
4055 |
preempt_disable(); |
5ff22646d
|
4056 4057 4058 |
mod = __module_address(addr); if (!mod) goto out; |
22a8bdeb5
|
4059 |
|
5ff22646d
|
4060 4061 4062 4063 |
if (!mod->num_exentries) goto out; e = search_extable(mod->extable, |
a94c33dd1
|
4064 |
mod->num_exentries, |
5ff22646d
|
4065 4066 |
addr); out: |
24da1cbff
|
4067 |
preempt_enable(); |
1da177e4c
|
4068 |
|
5ff22646d
|
4069 4070 4071 4072 |
/* * Now, if we found one, we are running inside it now, hence * we cannot unload the module, hence no refcnt needed. */ |
1da177e4c
|
4073 4074 |
return e; } |
4d435f9d8
|
4075 |
/* |
e610499e2
|
4076 4077 4078 4079 4080 |
* is_module_address - is this address inside a module? * @addr: the address to check. * * See is_module_text_address() if you simply want to see if the address * is code (not data). |
4d435f9d8
|
4081 |
*/ |
e610499e2
|
4082 |
bool is_module_address(unsigned long addr) |
4d435f9d8
|
4083 |
{ |
e610499e2
|
4084 |
bool ret; |
4d435f9d8
|
4085 |
|
24da1cbff
|
4086 |
preempt_disable(); |
e610499e2
|
4087 |
ret = __module_address(addr) != NULL; |
24da1cbff
|
4088 |
preempt_enable(); |
4d435f9d8
|
4089 |
|
e610499e2
|
4090 |
return ret; |
4d435f9d8
|
4091 |
} |
e610499e2
|
4092 4093 4094 4095 4096 4097 4098 |
/* * __module_address - get the module which contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that * module doesn't get freed during this. */ |
714f83d5d
|
4099 |
struct module *__module_address(unsigned long addr) |
1da177e4c
|
4100 4101 |
{ struct module *mod; |
3a642e99b
|
4102 4103 |
if (addr < module_addr_min || addr > module_addr_max) return NULL; |
0be964be0
|
4104 |
module_assert_mutex_or_preempt(); |
6c9692e2d
|
4105 |
mod = mod_find(addr); |
93c2e105f
|
4106 4107 |
if (mod) { BUG_ON(!within_module(addr, mod)); |
0d21b0e34
|
4108 |
if (mod->state == MODULE_STATE_UNFORMED) |
93c2e105f
|
4109 |
mod = NULL; |
0d21b0e34
|
4110 |
} |
93c2e105f
|
4111 |
return mod; |
1da177e4c
|
4112 |
} |
c6b378019
|
4113 |
EXPORT_SYMBOL_GPL(__module_address); |
1da177e4c
|
4114 |
|
e610499e2
|
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 |
/* * is_module_text_address - is this address inside module code? * @addr: the address to check. * * See is_module_address() if you simply want to see if the address is * anywhere in a module. See kernel_text_address() for testing if an * address corresponds to kernel or module code. */ bool is_module_text_address(unsigned long addr) { bool ret; preempt_disable(); ret = __module_text_address(addr) != NULL; preempt_enable(); return ret; } /* * __module_text_address - get the module whose code contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that * module doesn't get freed during this. */ struct module *__module_text_address(unsigned long addr) { struct module *mod = __module_address(addr); if (mod) { /* Make sure it's within the text section. */ |
7523e4dc5
|
4146 4147 |
if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) |
e610499e2
|
4148 4149 4150 4151 |
mod = NULL; } return mod; } |
c6b378019
|
4152 |
EXPORT_SYMBOL_GPL(__module_text_address); |
e610499e2
|
4153 |
|
1da177e4c
|
4154 4155 4156 4157 |
/* Don't grab lock, we're oopsing. */ void print_modules(void) { struct module *mod; |
7fd8329ba
|
4158 |
char buf[MODULE_FLAGS_BUF_SIZE]; |
1da177e4c
|
4159 |
|
b231125af
|
4160 |
printk(KERN_DEFAULT "Modules linked in:"); |
d72b37513
|
4161 4162 |
/* Most callers should already have preempt disabled, but make sure */ preempt_disable(); |
0d21b0e34
|
4163 4164 4165 |
list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; |
27bba4d6b
|
4166 |
pr_cont(" %s%s", mod->name, module_flags(mod, buf)); |
0d21b0e34
|
4167 |
} |
d72b37513
|
4168 |
preempt_enable(); |
e14af7eeb
|
4169 |
if (last_unloaded_module[0]) |
27bba4d6b
|
4170 4171 4172 |
pr_cont(" [last unloaded: %s]", last_unloaded_module); pr_cont(" "); |
1da177e4c
|
4173 |
} |
1da177e4c
|
4174 |
#ifdef CONFIG_MODVERSIONS |
8c8ef42ae
|
4175 4176 4177 4178 4179 4180 |
/* Generate the signature for all relevant module structures here. * If these change, we don't want to try to parse the module. */ void module_layout(struct module *mod, struct modversion_info *ver, struct kernel_param *kp, struct kernel_symbol *ks, |
654986462
|
4181 |
struct tracepoint * const *tp) |
8c8ef42ae
|
4182 4183 4184 |
{ } EXPORT_SYMBOL(module_layout); |
1da177e4c
|
4185 |
#endif |