Commit 457d2ee225801441e96f2e35894ec404572ad862

Authored by Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/ker…

…nel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing, ring-buffer: add paranoid checks for loops
  ftrace: use kretprobe trampoline name to test in output
  tracing, alpha: undefined reference to `save_stack_trace'

Showing 3 changed files Side-by-side Diff

kernel/trace/Kconfig
... ... @@ -25,7 +25,7 @@
25 25 bool
26 26 select DEBUG_FS
27 27 select RING_BUFFER
28   - select STACKTRACE
  28 + select STACKTRACE if STACKTRACE_SUPPORT
29 29 select TRACEPOINTS
30 30 select NOP_TRACER
31 31  
kernel/trace/ring_buffer.c
... ... @@ -1022,8 +1022,23 @@
1022 1022 struct ring_buffer_event *event;
1023 1023 u64 ts, delta;
1024 1024 int commit = 0;
  1025 + int nr_loops = 0;
1025 1026  
1026 1027 again:
  1028 + /*
  1029 + * We allow for interrupts to reenter here and do a trace.
  1030 + * If one does, it will cause this original code to loop
  1031 + * back here. Even with heavy interrupts happening, this
  1032 + * should only happen a few times in a row. If this happens
  1033 + * 1000 times in a row, there must be either an interrupt
  1034 + * storm or we have something buggy.
  1035 + * Bail!
  1036 + */
  1037 + if (unlikely(++nr_loops > 1000)) {
  1038 + RB_WARN_ON(cpu_buffer, 1);
  1039 + return NULL;
  1040 + }
  1041 +
1027 1042 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1028 1043  
1029 1044 /*
1030 1045  
... ... @@ -1532,10 +1547,23 @@
1532 1547 {
1533 1548 struct buffer_page *reader = NULL;
1534 1549 unsigned long flags;
  1550 + int nr_loops = 0;
1535 1551  
1536 1552 spin_lock_irqsave(&cpu_buffer->lock, flags);
1537 1553  
1538 1554 again:
  1555 + /*
  1556 + * This should normally only loop twice. But because the
  1557 + * start of the reader inserts an empty page, it causes
  1558 + * a case where we will loop three times. There should be no
  1559 + * reason to loop four times (that I know of).
  1560 + */
  1561 + if (unlikely(++nr_loops > 3)) {
  1562 + RB_WARN_ON(cpu_buffer, 1);
  1563 + reader = NULL;
  1564 + goto out;
  1565 + }
  1566 +
1539 1567 reader = cpu_buffer->reader_page;
1540 1568  
1541 1569 /* If there's more to read, return this page */
... ... @@ -1665,6 +1693,7 @@
1665 1693 struct ring_buffer_per_cpu *cpu_buffer;
1666 1694 struct ring_buffer_event *event;
1667 1695 struct buffer_page *reader;
  1696 + int nr_loops = 0;
1668 1697  
1669 1698 if (!cpu_isset(cpu, buffer->cpumask))
1670 1699 return NULL;
... ... @@ -1672,6 +1701,19 @@
1672 1701 cpu_buffer = buffer->buffers[cpu];
1673 1702  
1674 1703 again:
  1704 + /*
  1705 + * We repeat when a timestamp is encountered. It is possible
  1706 + * to get multiple timestamps from an interrupt entering just
  1707 + * as one timestamp is about to be written. The max times
  1708 + * that this can happen is the number of nested interrupts we
  1709 + * can have. Nesting 10 deep of interrupts is clearly
  1710 + * an anomaly.
  1711 + */
  1712 + if (unlikely(++nr_loops > 10)) {
  1713 + RB_WARN_ON(cpu_buffer, 1);
  1714 + return NULL;
  1715 + }
  1716 +
1675 1717 reader = rb_get_reader_page(cpu_buffer);
1676 1718 if (!reader)
1677 1719 return NULL;
... ... @@ -1722,6 +1764,7 @@
1722 1764 struct ring_buffer *buffer;
1723 1765 struct ring_buffer_per_cpu *cpu_buffer;
1724 1766 struct ring_buffer_event *event;
  1767 + int nr_loops = 0;
1725 1768  
1726 1769 if (ring_buffer_iter_empty(iter))
1727 1770 return NULL;
... ... @@ -1730,6 +1773,19 @@
1730 1773 buffer = cpu_buffer->buffer;
1731 1774  
1732 1775 again:
  1776 + /*
  1777 + * We repeat when a timestamp is encountered. It is possible
  1778 + * to get multiple timestamps from an interrupt entering just
  1779 + * as one timestamp is about to be written. The max times
  1780 + * that this can happen is the number of nested interrupts we
  1781 + * can have. Nesting 10 deep of interrupts is clearly
  1782 + * an anomaly.
  1783 + */
  1784 + if (unlikely(++nr_loops > 10)) {
  1785 + RB_WARN_ON(cpu_buffer, 1);
  1786 + return NULL;
  1787 + }
  1788 +
1733 1789 if (rb_per_cpu_empty(cpu_buffer))
1734 1790 return NULL;
1735 1791  
kernel/trace/trace.c
... ... @@ -705,6 +705,7 @@
705 705 unsigned long flags,
706 706 int skip, int pc)
707 707 {
  708 +#ifdef CONFIG_STACKTRACE
708 709 struct ring_buffer_event *event;
709 710 struct stack_entry *entry;
710 711 struct stack_trace trace;
... ... @@ -730,6 +731,7 @@
730 731  
731 732 save_stack_trace(&trace);
732 733 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  734 +#endif
733 735 }
734 736  
735 737 void __trace_stack(struct trace_array *tr,
736 738  
737 739  
738 740  
739 741  
... ... @@ -1086,17 +1088,20 @@
1086 1088 mutex_unlock(&trace_types_lock);
1087 1089 }
1088 1090  
1089   -#define KRETPROBE_MSG "[unknown/kretprobe'd]"
1090   -
1091 1091 #ifdef CONFIG_KRETPROBES
1092   -static inline int kretprobed(unsigned long addr)
  1092 +static inline const char *kretprobed(const char *name)
1093 1093 {
1094   - return addr == (unsigned long)kretprobe_trampoline;
  1094 + static const char tramp_name[] = "kretprobe_trampoline";
  1095 + int size = sizeof(tramp_name);
  1096 +
  1097 + if (strncmp(tramp_name, name, size) == 0)
  1098 + return "[unknown/kretprobe'd]";
  1099 + return name;
1095 1100 }
1096 1101 #else
1097   -static inline int kretprobed(unsigned long addr)
  1102 +static inline const char *kretprobed(const char *name)
1098 1103 {
1099   - return 0;
  1104 + return name;
1100 1105 }
1101 1106 #endif /* CONFIG_KRETPROBES */
1102 1107  
1103 1108  
... ... @@ -1105,10 +1110,13 @@
1105 1110 {
1106 1111 #ifdef CONFIG_KALLSYMS
1107 1112 char str[KSYM_SYMBOL_LEN];
  1113 + const char *name;
1108 1114  
1109 1115 kallsyms_lookup(address, NULL, NULL, NULL, str);
1110 1116  
1111   - return trace_seq_printf(s, fmt, str);
  1117 + name = kretprobed(str);
  1118 +
  1119 + return trace_seq_printf(s, fmt, name);
1112 1120 #endif
1113 1121 return 1;
1114 1122 }
1115 1123  
... ... @@ -1119,9 +1127,12 @@
1119 1127 {
1120 1128 #ifdef CONFIG_KALLSYMS
1121 1129 char str[KSYM_SYMBOL_LEN];
  1130 + const char *name;
1122 1131  
1123 1132 sprint_symbol(str, address);
1124   - return trace_seq_printf(s, fmt, str);
  1133 + name = kretprobed(str);
  1134 +
  1135 + return trace_seq_printf(s, fmt, name);
1125 1136 #endif
1126 1137 return 1;
1127 1138 }
... ... @@ -1375,10 +1386,7 @@
1375 1386  
1376 1387 seq_print_ip_sym(s, field->ip, sym_flags);
1377 1388 trace_seq_puts(s, " (");
1378   - if (kretprobed(field->parent_ip))
1379   - trace_seq_puts(s, KRETPROBE_MSG);
1380   - else
1381   - seq_print_ip_sym(s, field->parent_ip, sym_flags);
  1389 + seq_print_ip_sym(s, field->parent_ip, sym_flags);
1382 1390 trace_seq_puts(s, ")\n");
1383 1391 break;
1384 1392 }
... ... @@ -1494,12 +1502,9 @@
1494 1502 ret = trace_seq_printf(s, " <-");
1495 1503 if (!ret)
1496 1504 return TRACE_TYPE_PARTIAL_LINE;
1497   - if (kretprobed(field->parent_ip))
1498   - ret = trace_seq_puts(s, KRETPROBE_MSG);
1499   - else
1500   - ret = seq_print_ip_sym(s,
1501   - field->parent_ip,
1502   - sym_flags);
  1505 + ret = seq_print_ip_sym(s,
  1506 + field->parent_ip,
  1507 + sym_flags);
1503 1508 if (!ret)
1504 1509 return TRACE_TYPE_PARTIAL_LINE;
1505 1510 }