Commit a5e25883a445dce94a087ca479b21a5959cd5c18
Committed by
Ingo Molnar
1 parent
dfdc5437bd
Exists in
master
and in
7 other branches
ftrace: replace raw_local_irq_save with local_irq_save
Impact: fix for lockdep and ftrace The raw_local_irq_save/restore confuses lockdep. This patch converts them to the local_irq_save/restore variants. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 4 changed files with 13 additions and 12 deletions Side-by-side Diff
kernel/lockdep.c
kernel/trace/trace.c
... | ... | @@ -1209,7 +1209,7 @@ |
1209 | 1209 | int cpu; |
1210 | 1210 | int pc; |
1211 | 1211 | |
1212 | - raw_local_irq_save(flags); | |
1212 | + local_irq_save(flags); | |
1213 | 1213 | cpu = raw_smp_processor_id(); |
1214 | 1214 | data = tr->data[cpu]; |
1215 | 1215 | disabled = atomic_inc_return(&data->disabled); |
... | ... | @@ -1218,7 +1218,7 @@ |
1218 | 1218 | __trace_graph_entry(tr, data, trace, flags, pc); |
1219 | 1219 | } |
1220 | 1220 | atomic_dec(&data->disabled); |
1221 | - raw_local_irq_restore(flags); | |
1221 | + local_irq_restore(flags); | |
1222 | 1222 | } |
1223 | 1223 | |
1224 | 1224 | void trace_graph_return(struct ftrace_graph_ret *trace) |
... | ... | @@ -1230,7 +1230,7 @@ |
1230 | 1230 | int cpu; |
1231 | 1231 | int pc; |
1232 | 1232 | |
1233 | - raw_local_irq_save(flags); | |
1233 | + local_irq_save(flags); | |
1234 | 1234 | cpu = raw_smp_processor_id(); |
1235 | 1235 | data = tr->data[cpu]; |
1236 | 1236 | disabled = atomic_inc_return(&data->disabled); |
... | ... | @@ -1239,7 +1239,7 @@ |
1239 | 1239 | __trace_graph_return(tr, data, trace, flags, pc); |
1240 | 1240 | } |
1241 | 1241 | atomic_dec(&data->disabled); |
1242 | - raw_local_irq_restore(flags); | |
1242 | + local_irq_restore(flags); | |
1243 | 1243 | } |
1244 | 1244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1245 | 1245 | |
... | ... | @@ -2645,7 +2645,7 @@ |
2645 | 2645 | if (err) |
2646 | 2646 | goto err_unlock; |
2647 | 2647 | |
2648 | - raw_local_irq_disable(); | |
2648 | + local_irq_disable(); | |
2649 | 2649 | __raw_spin_lock(&ftrace_max_lock); |
2650 | 2650 | for_each_tracing_cpu(cpu) { |
2651 | 2651 | /* |
... | ... | @@ -2662,7 +2662,7 @@ |
2662 | 2662 | } |
2663 | 2663 | } |
2664 | 2664 | __raw_spin_unlock(&ftrace_max_lock); |
2665 | - raw_local_irq_enable(); | |
2665 | + local_irq_enable(); | |
2666 | 2666 | |
2667 | 2667 | tracing_cpumask = tracing_cpumask_new; |
2668 | 2668 |
kernel/trace/trace_branch.c
... | ... | @@ -42,7 +42,7 @@ |
42 | 42 | if (unlikely(!tr)) |
43 | 43 | return; |
44 | 44 | |
45 | - raw_local_irq_save(flags); | |
45 | + local_irq_save(flags); | |
46 | 46 | cpu = raw_smp_processor_id(); |
47 | 47 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
48 | 48 | goto out; |
... | ... | @@ -74,7 +74,7 @@ |
74 | 74 | |
75 | 75 | out: |
76 | 76 | atomic_dec(&tr->data[cpu]->disabled); |
77 | - raw_local_irq_restore(flags); | |
77 | + local_irq_restore(flags); | |
78 | 78 | } |
79 | 79 | |
80 | 80 | static inline |
kernel/trace/trace_stack.c
... | ... | @@ -48,7 +48,7 @@ |
48 | 48 | if (!object_is_on_stack(&this_size)) |
49 | 49 | return; |
50 | 50 | |
51 | - raw_local_irq_save(flags); | |
51 | + local_irq_save(flags); | |
52 | 52 | __raw_spin_lock(&max_stack_lock); |
53 | 53 | |
54 | 54 | /* a race could have already updated it */ |
... | ... | @@ -96,7 +96,7 @@ |
96 | 96 | |
97 | 97 | out: |
98 | 98 | __raw_spin_unlock(&max_stack_lock); |
99 | - raw_local_irq_restore(flags); | |
99 | + local_irq_restore(flags); | |
100 | 100 | } |
101 | 101 | |
102 | 102 | static void |
103 | 103 | |
... | ... | @@ -162,11 +162,11 @@ |
162 | 162 | if (ret < 0) |
163 | 163 | return ret; |
164 | 164 | |
165 | - raw_local_irq_save(flags); | |
165 | + local_irq_save(flags); | |
166 | 166 | __raw_spin_lock(&max_stack_lock); |
167 | 167 | *ptr = val; |
168 | 168 | __raw_spin_unlock(&max_stack_lock); |
169 | - raw_local_irq_restore(flags); | |
169 | + local_irq_restore(flags); | |
170 | 170 | |
171 | 171 | return count; |
172 | 172 | } |