Commit 3a642e99babe0617febb6f402e1e063479f489db

Authored by Rusty Russell
1 parent 2f0f2a334b

modules: Take a shortcut for checking if an address is in a module

This patch keeps track of the boundaries of module allocation, in
order to speed up module_text_address().

Inspired by Arjan's version, which required arch-specific defines:

	Various pieces of the kernel (lockdep, latencytop, etc) tend
	to store backtraces, sometimes at a relatively high
	frequency. In itself this isn't a big performance deal (after
	all you're using diagnostics features), but there have been
	some complaints from people who have over 100 modules loaded
	that this is a tad too slow.

	This is due to the new backtracer code which looks at every
	slot on the stack to see if it's a kernel/module text address,
	so that's 1024 slots.  1024 times 100 modules... that's a lot
	of list walking.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

Showing 1 changed file with 22 additions and 2 deletions Side-by-side Diff

... ... @@ -70,6 +70,9 @@
70 70  
71 71 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72 72  
  73 +/* Bounds of module allocation, for speeding __module_text_address */
  74 +static unsigned long module_addr_min = -1UL, module_addr_max = 0;
  75 +
73 76 int register_module_notifier(struct notifier_block * nb)
74 77 {
75 78 return blocking_notifier_chain_register(&module_notify_list, nb);
... ... @@ -1779,6 +1782,20 @@
1779 1782 }
1780 1783 #endif /* CONFIG_KALLSYMS */
1781 1784  
  1785 +static void *module_alloc_update_bounds(unsigned long size)
  1786 +{
  1787 + void *ret = module_alloc(size);
  1788 +
  1789 + if (ret) {
  1790 + /* Update module bounds. */
  1791 + if ((unsigned long)ret < module_addr_min)
  1792 + module_addr_min = (unsigned long)ret;
  1793 + if ((unsigned long)ret + size > module_addr_max)
  1794 + module_addr_max = (unsigned long)ret + size;
  1795 + }
  1796 + return ret;
  1797 +}
  1798 +
1782 1799 /* Allocate and load the module: note that size of section 0 is always
1783 1800 zero, and we rely on this for optional sections. */
1784 1801 static struct module *load_module(void __user *umod,
... ... @@ -1980,7 +1997,7 @@
1980 1997 layout_sections(mod, hdr, sechdrs, secstrings);
1981 1998  
1982 1999 /* Do the allocs. */
1983   - ptr = module_alloc(mod->core_size);
  2000 + ptr = module_alloc_update_bounds(mod->core_size);
1984 2001 if (!ptr) {
1985 2002 err = -ENOMEM;
1986 2003 goto free_percpu;
... ... @@ -1988,7 +2005,7 @@
1988 2005 memset(ptr, 0, mod->core_size);
1989 2006 mod->module_core = ptr;
1990 2007  
1991   - ptr = module_alloc(mod->init_size);
  2008 + ptr = module_alloc_update_bounds(mod->init_size);
1992 2009 if (!ptr && mod->init_size) {
1993 2010 err = -ENOMEM;
1994 2011 goto free_core;
... ... @@ -2644,6 +2661,9 @@
2644 2661 struct module *__module_text_address(unsigned long addr)
2645 2662 {
2646 2663 struct module *mod;
  2664 +
  2665 + if (addr < module_addr_min || addr > module_addr_max)
  2666 + return NULL;
2647 2667  
2648 2668 list_for_each_entry(mod, &modules, list)
2649 2669 if (within(addr, mod->module_init, mod->init_text_size)