Commit 8ec006c58775869175edee3d23f4525b6df2935a
Exists in
master
and in
20 other branches
Merge branch 'sh/dwarf-unwinder'
Conflicts: arch/sh/kernel/dwarf.c
Showing 3 changed files Side-by-side Diff
arch/sh/include/asm/dwarf.h
... | ... | @@ -241,6 +241,12 @@ |
241 | 241 | |
242 | 242 | unsigned long flags; |
243 | 243 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) |
244 | + | |
245 | + /* | |
246 | + * 'mod' will be non-NULL if this CIE came from a module's | |
247 | + * .eh_frame section. | |
248 | + */ | |
249 | + struct module *mod; | |
244 | 250 | }; |
245 | 251 | |
246 | 252 | /** |
... | ... | @@ -255,6 +261,12 @@ |
255 | 261 | unsigned char *instructions; |
256 | 262 | unsigned char *end; |
257 | 263 | struct list_head link; |
264 | + | |
265 | + /* | |
266 | + * 'mod' will be non-NULL if this FDE came from a module's | |
267 | + * .eh_frame section. | |
268 | + */ | |
269 | + struct module *mod; | |
258 | 270 | }; |
259 | 271 | |
260 | 272 | /** |
... | ... | @@ -364,6 +376,10 @@ |
364 | 376 | |
365 | 377 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, |
366 | 378 | struct dwarf_frame *); |
379 | +extern void dwarf_free_frame(struct dwarf_frame *); | |
380 | +extern int dwarf_parse_section(char *, char *, struct module *); | |
381 | +extern void dwarf_module_unload(struct module *); | |
382 | + | |
367 | 383 | #endif /* !__ASSEMBLY__ */ |
368 | 384 | |
369 | 385 | #define CFI_STARTPROC .cfi_startproc |
arch/sh/kernel/dwarf.c
... | ... | @@ -529,7 +529,18 @@ |
529 | 529 | } |
530 | 530 | |
531 | 531 | /** |
532 | - * dwarf_unwind_stack - recursively unwind the stack | |
532 | + * dwarf_free_frame - free the memory allocated for @frame | |
533 | + * @frame: the frame to free | |
534 | + */ | |
535 | +void dwarf_free_frame(struct dwarf_frame *frame) | |
536 | +{ | |
537 | + dwarf_frame_free_regs(frame); | |
538 | + mempool_free(frame, dwarf_frame_pool); | |
539 | +} | |
540 | + | |
541 | +/** | |
542 | + * dwarf_unwind_stack - unwind the stack | |
543 | + * | |
533 | 544 | * @pc: address of the function to unwind |
534 | 545 | * @prev: struct dwarf_frame of the previous stackframe on the callstack |
535 | 546 | * |
... | ... | @@ -547,9 +558,9 @@ |
547 | 558 | unsigned long addr; |
548 | 559 | |
549 | 560 | /* |
550 | - * If this is the first invocation of this recursive function we | |
551 | - * need get the contents of a physical register to get the CFA | |
552 | - * in order to begin the virtual unwinding of the stack. | |
561 | + * If we're starting at the top of the stack we need get the | |
562 | + * contents of a physical register to get the CFA in order to | |
563 | + * begin the virtual unwinding of the stack. | |
553 | 564 | * |
554 | 565 | * NOTE: the return address is guaranteed to be setup by the |
555 | 566 | * time this function makes its first function call. |
... | ... | @@ -571,9 +582,8 @@ |
571 | 582 | fde = dwarf_lookup_fde(pc); |
572 | 583 | if (!fde) { |
573 | 584 | /* |
574 | - * This is our normal exit path - the one that stops the | |
575 | - * recursion. There's two reasons why we might exit | |
576 | - * here, | |
585 | + * This is our normal exit path. There are two reasons | |
586 | + * why we might exit here, | |
577 | 587 | * |
578 | 588 | * a) pc has no asscociated DWARF frame info and so |
579 | 589 | * we don't know how to unwind this frame. This is |
... | ... | @@ -615,10 +625,10 @@ |
615 | 625 | |
616 | 626 | } else { |
617 | 627 | /* |
618 | - * Again, this is the first invocation of this | |
619 | - * recurisve function. We need to physically | |
620 | - * read the contents of a register in order to | |
621 | - * get the Canonical Frame Address for this | |
628 | + * Again, we're starting from the top of the | |
629 | + * stack. We need to physically read | |
630 | + * the contents of a register in order to get | |
631 | + * the Canonical Frame Address for this | |
622 | 632 | * function. |
623 | 633 | */ |
624 | 634 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); |
625 | 635 | |
... | ... | @@ -648,13 +658,12 @@ |
648 | 658 | return frame; |
649 | 659 | |
650 | 660 | bail: |
651 | - dwarf_frame_free_regs(frame); | |
652 | - mempool_free(frame, dwarf_frame_pool); | |
661 | + dwarf_free_frame(frame); | |
653 | 662 | return NULL; |
654 | 663 | } |
655 | 664 | |
656 | 665 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
657 | - unsigned char *end) | |
666 | + unsigned char *end, struct module *mod) | |
658 | 667 | { |
659 | 668 | struct dwarf_cie *cie; |
660 | 669 | unsigned long flags; |
... | ... | @@ -750,6 +759,8 @@ |
750 | 759 | cie->initial_instructions = p; |
751 | 760 | cie->instructions_end = end; |
752 | 761 | |
762 | + cie->mod = mod; | |
763 | + | |
753 | 764 | /* Add to list */ |
754 | 765 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
755 | 766 | list_add_tail(&cie->link, &dwarf_cie_list); |
... | ... | @@ -760,7 +771,7 @@ |
760 | 771 | |
761 | 772 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
762 | 773 | void *start, unsigned long len, |
763 | - unsigned char *end) | |
774 | + unsigned char *end, struct module *mod) | |
764 | 775 | { |
765 | 776 | struct dwarf_fde *fde; |
766 | 777 | struct dwarf_cie *cie; |
... | ... | @@ -809,6 +820,8 @@ |
809 | 820 | fde->instructions = p; |
810 | 821 | fde->end = end; |
811 | 822 | |
823 | + fde->mod = mod; | |
824 | + | |
812 | 825 | /* Add to list. */ |
813 | 826 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
814 | 827 | list_add_tail(&fde->link, &dwarf_fde_list); |
... | ... | @@ -832,10 +845,8 @@ |
832 | 845 | while (1) { |
833 | 846 | frame = dwarf_unwind_stack(return_addr, _frame); |
834 | 847 | |
835 | - if (_frame) { | |
836 | - dwarf_frame_free_regs(_frame); | |
837 | - mempool_free(_frame, dwarf_frame_pool); | |
838 | - } | |
848 | + if (_frame) | |
849 | + dwarf_free_frame(_frame); | |
839 | 850 | |
840 | 851 | _frame = frame; |
841 | 852 | |
... | ... | @@ -845,6 +856,9 @@ |
845 | 856 | return_addr = frame->return_addr; |
846 | 857 | ops->address(data, return_addr, 1); |
847 | 858 | } |
859 | + | |
860 | + if (frame) | |
861 | + dwarf_free_frame(frame); | |
848 | 862 | } |
849 | 863 | |
850 | 864 | static struct unwinder dwarf_unwinder = { |
851 | 865 | |
852 | 866 | |
... | ... | @@ -874,15 +888,15 @@ |
874 | 888 | } |
875 | 889 | |
876 | 890 | /** |
877 | - * dwarf_unwinder_init - initialise the dwarf unwinder | |
891 | + * dwarf_parse_section - parse DWARF section | |
892 | + * @eh_frame_start: start address of the .eh_frame section | |
893 | + * @eh_frame_end: end address of the .eh_frame section | |
894 | + * @mod: the kernel module containing the .eh_frame section | |
878 | 895 | * |
879 | - * Build the data structures describing the .dwarf_frame section to | |
880 | - * make it easier to lookup CIE and FDE entries. Because the | |
881 | - * .eh_frame section is packed as tightly as possible it is not | |
882 | - * easy to lookup the FDE for a given PC, so we build a list of FDE | |
883 | - * and CIE entries that make it easier. | |
896 | + * Parse the information in a .eh_frame section. | |
884 | 897 | */ |
885 | -static int __init dwarf_unwinder_init(void) | |
898 | +int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, | |
899 | + struct module *mod) | |
886 | 900 | { |
887 | 901 | u32 entry_type; |
888 | 902 | void *p, *entry; |
889 | 903 | |
890 | 904 | |
... | ... | @@ -890,32 +904,12 @@ |
890 | 904 | unsigned long len; |
891 | 905 | unsigned int c_entries, f_entries; |
892 | 906 | unsigned char *end; |
893 | - INIT_LIST_HEAD(&dwarf_cie_list); | |
894 | - INIT_LIST_HEAD(&dwarf_fde_list); | |
895 | 907 | |
896 | 908 | c_entries = 0; |
897 | 909 | f_entries = 0; |
898 | - entry = &__start_eh_frame; | |
910 | + entry = eh_frame_start; | |
899 | 911 | |
900 | - dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | |
901 | - sizeof(struct dwarf_frame), 0, | |
902 | - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | |
903 | - | |
904 | - dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | |
905 | - sizeof(struct dwarf_reg), 0, | |
906 | - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | |
907 | - | |
908 | - dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | |
909 | - mempool_alloc_slab, | |
910 | - mempool_free_slab, | |
911 | - dwarf_frame_cachep); | |
912 | - | |
913 | - dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | |
914 | - mempool_alloc_slab, | |
915 | - mempool_free_slab, | |
916 | - dwarf_reg_cachep); | |
917 | - | |
918 | - while ((char *)entry < __stop_eh_frame) { | |
912 | + while ((char *)entry < eh_frame_end) { | |
919 | 913 | p = entry; |
920 | 914 | |
921 | 915 | count = dwarf_entry_len(p, &len); |
... | ... | @@ -927,6 +921,7 @@ |
927 | 921 | * entry and move to the next one because 'len' |
928 | 922 | * tells us where our next entry is. |
929 | 923 | */ |
924 | + err = -EINVAL; | |
930 | 925 | goto out; |
931 | 926 | } else |
932 | 927 | p += count; |
933 | 928 | |
... | ... | @@ -938,13 +933,14 @@ |
938 | 933 | p += 4; |
939 | 934 | |
940 | 935 | if (entry_type == DW_EH_FRAME_CIE) { |
941 | - err = dwarf_parse_cie(entry, p, len, end); | |
936 | + err = dwarf_parse_cie(entry, p, len, end, mod); | |
942 | 937 | if (err < 0) |
943 | 938 | goto out; |
944 | 939 | else |
945 | 940 | c_entries++; |
946 | 941 | } else { |
947 | - err = dwarf_parse_fde(entry, entry_type, p, len, end); | |
942 | + err = dwarf_parse_fde(entry, entry_type, p, len, | |
943 | + end, mod); | |
948 | 944 | if (err < 0) |
949 | 945 | goto out; |
950 | 946 | else |
... | ... | @@ -956,6 +952,95 @@ |
956 | 952 | |
957 | 953 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", |
958 | 954 | c_entries, f_entries); |
955 | + | |
956 | + return 0; | |
957 | + | |
958 | +out: | |
959 | + return err; | |
960 | +} | |
961 | + | |
962 | +/** | |
963 | + * dwarf_module_unload - remove FDE/CIEs associated with @mod | |
964 | + * @mod: the module that is being unloaded | |
965 | + * | |
966 | + * Remove any FDEs and CIEs from the global lists that came from | |
967 | + * @mod's .eh_frame section because @mod is being unloaded. | |
968 | + */ | |
969 | +void dwarf_module_unload(struct module *mod) | |
970 | +{ | |
971 | + struct dwarf_fde *fde; | |
972 | + struct dwarf_cie *cie; | |
973 | + unsigned long flags; | |
974 | + | |
975 | + spin_lock_irqsave(&dwarf_cie_lock, flags); | |
976 | + | |
977 | +again_cie: | |
978 | + list_for_each_entry(cie, &dwarf_cie_list, link) { | |
979 | + if (cie->mod == mod) | |
980 | + break; | |
981 | + } | |
982 | + | |
983 | + if (&cie->link != &dwarf_cie_list) { | |
984 | + list_del(&cie->link); | |
985 | + kfree(cie); | |
986 | + goto again_cie; | |
987 | + } | |
988 | + | |
989 | + spin_unlock_irqrestore(&dwarf_cie_lock, flags); | |
990 | + | |
991 | + spin_lock_irqsave(&dwarf_fde_lock, flags); | |
992 | + | |
993 | +again_fde: | |
994 | + list_for_each_entry(fde, &dwarf_fde_list, link) { | |
995 | + if (fde->mod == mod) | |
996 | + break; | |
997 | + } | |
998 | + | |
999 | + if (&fde->link != &dwarf_fde_list) { | |
1000 | + list_del(&fde->link); | |
1001 | + kfree(fde); | |
1002 | + goto again_fde; | |
1003 | + } | |
1004 | + | |
1005 | + spin_unlock_irqrestore(&dwarf_fde_lock, flags); | |
1006 | +} | |
1007 | + | |
1008 | +/** | |
1009 | + * dwarf_unwinder_init - initialise the dwarf unwinder | |
1010 | + * | |
1011 | + * Build the data structures describing the .dwarf_frame section to | |
1012 | + * make it easier to lookup CIE and FDE entries. Because the | |
1013 | + * .eh_frame section is packed as tightly as possible it is not | |
1014 | + * easy to lookup the FDE for a given PC, so we build a list of FDE | |
1015 | + * and CIE entries that make it easier. | |
1016 | + */ | |
1017 | +static int __init dwarf_unwinder_init(void) | |
1018 | +{ | |
1019 | + int err; | |
1020 | + INIT_LIST_HEAD(&dwarf_cie_list); | |
1021 | + INIT_LIST_HEAD(&dwarf_fde_list); | |
1022 | + | |
1023 | + dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | |
1024 | + sizeof(struct dwarf_frame), 0, | |
1025 | + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | |
1026 | + | |
1027 | + dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | |
1028 | + sizeof(struct dwarf_reg), 0, | |
1029 | + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | |
1030 | + | |
1031 | + dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | |
1032 | + mempool_alloc_slab, | |
1033 | + mempool_free_slab, | |
1034 | + dwarf_frame_cachep); | |
1035 | + | |
1036 | + dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | |
1037 | + mempool_alloc_slab, | |
1038 | + mempool_free_slab, | |
1039 | + dwarf_reg_cachep); | |
1040 | + | |
1041 | + err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | |
1042 | + if (err) | |
1043 | + goto out; | |
959 | 1044 | |
960 | 1045 | err = unwinder_register(&dwarf_unwinder); |
961 | 1046 | if (err) |
arch/sh/kernel/module.c
... | ... | @@ -32,6 +32,7 @@ |
32 | 32 | #include <linux/string.h> |
33 | 33 | #include <linux/kernel.h> |
34 | 34 | #include <asm/unaligned.h> |
35 | +#include <asm/dwarf.h> | |
35 | 36 | |
36 | 37 | void *module_alloc(unsigned long size) |
37 | 38 | { |
38 | 39 | |
... | ... | @@ -145,11 +146,42 @@ |
145 | 146 | const Elf_Shdr *sechdrs, |
146 | 147 | struct module *me) |
147 | 148 | { |
149 | +#ifdef CONFIG_DWARF_UNWINDER | |
150 | + unsigned int i, err; | |
151 | + unsigned long start, end; | |
152 | + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | |
153 | + | |
154 | + start = end = 0; | |
155 | + | |
156 | + for (i = 1; i < hdr->e_shnum; i++) { | |
157 | + /* Alloc bit cleared means "ignore it." */ | |
158 | + if ((sechdrs[i].sh_flags & SHF_ALLOC) | |
159 | + && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { | |
160 | + start = sechdrs[i].sh_addr; | |
161 | + end = start + sechdrs[i].sh_size; | |
162 | + break; | |
163 | + } | |
164 | + } | |
165 | + | |
166 | + /* Did we find the .eh_frame section? */ | |
167 | + if (i != hdr->e_shnum) { | |
168 | + err = dwarf_parse_section((char *)start, (char *)end, me); | |
169 | + if (err) | |
170 | + printk(KERN_WARNING "%s: failed to parse DWARF info\n", | |
171 | + me->name); | |
172 | + } | |
173 | + | |
174 | +#endif /* CONFIG_DWARF_UNWINDER */ | |
175 | + | |
148 | 176 | return module_bug_finalize(hdr, sechdrs, me); |
149 | 177 | } |
150 | 178 | |
151 | 179 | void module_arch_cleanup(struct module *mod) |
152 | 180 | { |
153 | 181 | module_bug_cleanup(mod); |
182 | + | |
183 | +#ifdef CONFIG_DWARF_UNWINDER | |
184 | + dwarf_module_unload(mod); | |
185 | +#endif /* CONFIG_DWARF_UNWINDER */ | |
154 | 186 | } |