Commit f17f36bb1c006818441b84cf65a6decf3e59942b

Authored by Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/ker…

…nel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: user local buffer variable for trace branch tracer
  tracing: fix warning on kernel/trace/trace_branch.c andtrace_hw_branches.c
  ftrace: check for failure for all conversions
  tracing: correct module boundaries for ftrace_release
  tracing: fix transposed numbers of lock_depth and preempt_count
  trace: Fix missing assignment in trace_ctxwake_*
  tracing: Use free_percpu instead of kfree
  tracing: Check total refcount before releasing bufs in profile_enable failure

Showing 6 changed files Side-by-side Diff

include/linux/ftrace.h
... ... @@ -241,7 +241,7 @@
241 241 # define ftrace_set_filter(buf, len, reset) do { } while (0)
242 242 # define ftrace_disable_daemon() do { } while (0)
243 243 # define ftrace_enable_daemon() do { } while (0)
244   -static inline void ftrace_release(void *start, unsigned long size) { }
  244 +static inline void ftrace_release_mod(struct module *mod) {}
245 245 static inline int register_ftrace_command(struct ftrace_func_command *cmd)
246 246 {
247 247 return -EINVAL;
kernel/trace/ftrace.c
... ... @@ -1078,14 +1078,9 @@
1078 1078 failed = __ftrace_replace_code(rec, enable);
1079 1079 if (failed) {
1080 1080 rec->flags |= FTRACE_FL_FAILED;
1081   - if ((system_state == SYSTEM_BOOTING) ||
1082   - !core_kernel_text(rec->ip)) {
1083   - ftrace_free_rec(rec);
1084   - } else {
1085   - ftrace_bug(failed, rec->ip);
1086   - /* Stop processing */
1087   - return;
1088   - }
  1081 + ftrace_bug(failed, rec->ip);
  1082 + /* Stop processing */
  1083 + return;
1089 1084 }
1090 1085 } while_for_each_ftrace_rec();
1091 1086 }
1092 1087  
1093 1088  
1094 1089  
... ... @@ -2662,19 +2657,17 @@
2662 2657 }
2663 2658  
2664 2659 #ifdef CONFIG_MODULES
2665   -void ftrace_release(void *start, void *end)
  2660 +void ftrace_release_mod(struct module *mod)
2666 2661 {
2667 2662 struct dyn_ftrace *rec;
2668 2663 struct ftrace_page *pg;
2669   - unsigned long s = (unsigned long)start;
2670   - unsigned long e = (unsigned long)end;
2671 2664  
2672   - if (ftrace_disabled || !start || start == end)
  2665 + if (ftrace_disabled)
2673 2666 return;
2674 2667  
2675 2668 mutex_lock(&ftrace_lock);
2676 2669 do_for_each_ftrace_rec(pg, rec) {
2677   - if ((rec->ip >= s) && (rec->ip < e)) {
  2670 + if (within_module_core(rec->ip, mod)) {
2678 2671 /*
2679 2672 * rec->ip is changed in ftrace_free_rec()
2680 2673 * It should not between s and e if record was freed.
... ... @@ -2706,9 +2699,7 @@
2706 2699 mod->num_ftrace_callsites);
2707 2700 break;
2708 2701 case MODULE_STATE_GOING:
2709   - ftrace_release(mod->ftrace_callsites,
2710   - mod->ftrace_callsites +
2711   - mod->num_ftrace_callsites);
  2702 + ftrace_release_mod(mod);
2712 2703 break;
2713 2704 }
2714 2705  
kernel/trace/trace_branch.c
... ... @@ -34,6 +34,7 @@
34 34 struct trace_array *tr = branch_tracer;
35 35 struct ring_buffer_event *event;
36 36 struct trace_branch *entry;
  37 + struct ring_buffer *buffer;
37 38 unsigned long flags;
38 39 int cpu, pc;
39 40 const char *p;
... ... @@ -54,7 +55,8 @@
54 55 goto out;
55 56  
56 57 pc = preempt_count();
57   - event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
  58 + buffer = tr->buffer;
  59 + event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
58 60 sizeof(*entry), flags, pc);
59 61 if (!event)
60 62 goto out;
... ... @@ -74,8 +76,8 @@
74 76 entry->line = f->line;
75 77 entry->correct = val == expect;
76 78  
77   - if (!filter_check_discard(call, entry, tr->buffer, event))
78   - ring_buffer_unlock_commit(tr->buffer, event);
  79 + if (!filter_check_discard(call, entry, buffer, event))
  80 + ring_buffer_unlock_commit(buffer, event);
79 81  
80 82 out:
81 83 atomic_dec(&tr->data[cpu]->disabled);
kernel/trace/trace_event_profile.c
... ... @@ -31,7 +31,7 @@
31 31 if (atomic_inc_return(&event->profile_count))
32 32 return 0;
33 33  
34   - if (!total_profile_count++) {
  34 + if (!total_profile_count) {
35 35 buf = (char *)alloc_percpu(profile_buf_t);
36 36 if (!buf)
37 37 goto fail_buf;
38 38  
39 39  
40 40  
41 41  
... ... @@ -46,14 +46,19 @@
46 46 }
47 47  
48 48 ret = event->profile_enable();
49   - if (!ret)
  49 + if (!ret) {
  50 + total_profile_count++;
50 51 return 0;
  52 + }
51 53  
52   - kfree(trace_profile_buf_nmi);
53 54 fail_buf_nmi:
54   - kfree(trace_profile_buf);
  55 + if (!total_profile_count) {
  56 + free_percpu(trace_profile_buf_nmi);
  57 + free_percpu(trace_profile_buf);
  58 + trace_profile_buf_nmi = NULL;
  59 + trace_profile_buf = NULL;
  60 + }
55 61 fail_buf:
56   - total_profile_count--;
57 62 atomic_dec(&event->profile_count);
58 63  
59 64 return ret;
kernel/trace/trace_hw_branches.c
... ... @@ -165,6 +165,7 @@
165 165 struct ftrace_event_call *call = &event_hw_branch;
166 166 struct trace_array *tr = hw_branch_trace;
167 167 struct ring_buffer_event *event;
  168 + struct ring_buffer *buf;
168 169 struct hw_branch_entry *entry;
169 170 unsigned long irq1;
170 171 int cpu;
... ... @@ -180,7 +181,8 @@
180 181 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
181 182 goto out;
182 183  
183   - event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
  184 + buf = tr->buffer;
  185 + event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
184 186 sizeof(*entry), 0, 0);
185 187 if (!event)
186 188 goto out;
... ... @@ -189,8 +191,8 @@
189 191 entry->ent.type = TRACE_HW_BRANCHES;
190 192 entry->from = from;
191 193 entry->to = to;
192   - if (!filter_check_discard(call, entry, tr->buffer, event))
193   - trace_buffer_unlock_commit(tr, event, 0, 0);
  194 + if (!filter_check_discard(call, entry, buf, event))
  195 + trace_buffer_unlock_commit(buf, event, 0, 0);
194 196  
195 197 out:
196 198 atomic_dec(&tr->data[cpu]->disabled);
kernel/trace/trace_output.c
... ... @@ -486,16 +486,18 @@
486 486 hardirq ? 'h' : softirq ? 's' : '.'))
487 487 return 0;
488 488  
489   - if (entry->lock_depth < 0)
490   - ret = trace_seq_putc(s, '.');
  489 + if (entry->preempt_count)
  490 + ret = trace_seq_printf(s, "%x", entry->preempt_count);
491 491 else
492   - ret = trace_seq_printf(s, "%d", entry->lock_depth);
  492 + ret = trace_seq_putc(s, '.');
  493 +
493 494 if (!ret)
494 495 return 0;
495 496  
496   - if (entry->preempt_count)
497   - return trace_seq_printf(s, "%x", entry->preempt_count);
498   - return trace_seq_putc(s, '.');
  497 + if (entry->lock_depth < 0)
  498 + return trace_seq_putc(s, '.');
  499 +
  500 + return trace_seq_printf(s, "%d", entry->lock_depth);
499 501 }
500 502  
501 503 static int
... ... @@ -883,7 +885,7 @@
883 885 trace_assign_type(field, iter->ent);
884 886  
885 887 if (!S)
886   - task_state_char(field->prev_state);
  888 + S = task_state_char(field->prev_state);
887 889 T = task_state_char(field->next_state);
888 890 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
889 891 field->prev_pid,
... ... @@ -918,7 +920,7 @@
918 920 trace_assign_type(field, iter->ent);
919 921  
920 922 if (!S)
921   - task_state_char(field->prev_state);
  923 + S = task_state_char(field->prev_state);
922 924 T = task_state_char(field->next_state);
923 925  
924 926 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);