Commit 217f155e9fc68bf2a6c58a7b47e0d1ce68d78818

Authored by Alexander Duyck
Committed by H. Peter Anvin
1 parent fc8d782677

x86/ftrace: Use __pa_symbol instead of __pa on C visible symbols

Instead of using __pa which is meant to be a general function for converting
virtual addresses to physical addresses we can use __pa_symbol which is the
preferred way of decoding kernel text virtual addresses to physical addresses.

In this case we are not directly converting C visible symbols however if we
know that the instruction pointer is somewhere between _text and _etext we
know that we are going to be translating an address form the kernel text
space.

Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Link: http://lkml.kernel.org/r/20121116215718.8521.24026.stgit@ahduyck-cp1.jf.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

Showing 1 changed file with 2 additions and 2 deletions Inline Diff

arch/x86/kernel/ftrace.c
1 /* 1 /*
2 * Code for replacing ftrace calls with jumps. 2 * Code for replacing ftrace calls with jumps.
3 * 3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * 5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea. 6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications. 7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me 8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run. 9 * the dangers of modifying code on the run.
10 */ 10 */
11 11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14 #include <linux/spinlock.h> 14 #include <linux/spinlock.h>
15 #include <linux/hardirq.h> 15 #include <linux/hardirq.h>
16 #include <linux/uaccess.h> 16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h> 17 #include <linux/ftrace.h>
18 #include <linux/percpu.h> 18 #include <linux/percpu.h>
19 #include <linux/sched.h> 19 #include <linux/sched.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/list.h> 21 #include <linux/list.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 23
24 #include <trace/syscall.h> 24 #include <trace/syscall.h>
25 25
26 #include <asm/cacheflush.h> 26 #include <asm/cacheflush.h>
27 #include <asm/kprobes.h> 27 #include <asm/kprobes.h>
28 #include <asm/ftrace.h> 28 #include <asm/ftrace.h>
29 #include <asm/nops.h> 29 #include <asm/nops.h>
30 30
31 #ifdef CONFIG_DYNAMIC_FTRACE 31 #ifdef CONFIG_DYNAMIC_FTRACE
32 32
33 int ftrace_arch_code_modify_prepare(void) 33 int ftrace_arch_code_modify_prepare(void)
34 { 34 {
35 set_kernel_text_rw(); 35 set_kernel_text_rw();
36 set_all_modules_text_rw(); 36 set_all_modules_text_rw();
37 return 0; 37 return 0;
38 } 38 }
39 39
40 int ftrace_arch_code_modify_post_process(void) 40 int ftrace_arch_code_modify_post_process(void)
41 { 41 {
42 set_all_modules_text_ro(); 42 set_all_modules_text_ro();
43 set_kernel_text_ro(); 43 set_kernel_text_ro();
44 return 0; 44 return 0;
45 } 45 }
46 46
47 union ftrace_code_union { 47 union ftrace_code_union {
48 char code[MCOUNT_INSN_SIZE]; 48 char code[MCOUNT_INSN_SIZE];
49 struct { 49 struct {
50 char e8; 50 char e8;
51 int offset; 51 int offset;
52 } __attribute__((packed)); 52 } __attribute__((packed));
53 }; 53 };
54 54
55 static int ftrace_calc_offset(long ip, long addr) 55 static int ftrace_calc_offset(long ip, long addr)
56 { 56 {
57 return (int)(addr - ip); 57 return (int)(addr - ip);
58 } 58 }
59 59
60 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 60 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
61 { 61 {
62 static union ftrace_code_union calc; 62 static union ftrace_code_union calc;
63 63
64 calc.e8 = 0xe8; 64 calc.e8 = 0xe8;
65 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); 65 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
66 66
67 /* 67 /*
68 * No locking needed, this must be called via kstop_machine 68 * No locking needed, this must be called via kstop_machine
69 * which in essence is like running on a uniprocessor machine. 69 * which in essence is like running on a uniprocessor machine.
70 */ 70 */
71 return calc.code; 71 return calc.code;
72 } 72 }
73 73
74 static inline int 74 static inline int
75 within(unsigned long addr, unsigned long start, unsigned long end) 75 within(unsigned long addr, unsigned long start, unsigned long end)
76 { 76 {
77 return addr >= start && addr < end; 77 return addr >= start && addr < end;
78 } 78 }
79 79
80 static int 80 static int
81 do_ftrace_mod_code(unsigned long ip, const void *new_code) 81 do_ftrace_mod_code(unsigned long ip, const void *new_code)
82 { 82 {
83 /* 83 /*
84 * On x86_64, kernel text mappings are mapped read-only with 84 * On x86_64, kernel text mappings are mapped read-only with
85 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead 85 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
86 * of the kernel text mapping to modify the kernel text. 86 * of the kernel text mapping to modify the kernel text.
87 * 87 *
88 * For 32bit kernels, these mappings are same and we can use 88 * For 32bit kernels, these mappings are same and we can use
89 * kernel identity mapping to modify code. 89 * kernel identity mapping to modify code.
90 */ 90 */
91 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 91 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
92 ip = (unsigned long)__va(__pa(ip)); 92 ip = (unsigned long)__va(__pa_symbol(ip));
93 93
94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); 94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
95 } 95 }
96 96
97 static const unsigned char *ftrace_nop_replace(void) 97 static const unsigned char *ftrace_nop_replace(void)
98 { 98 {
99 return ideal_nops[NOP_ATOMIC5]; 99 return ideal_nops[NOP_ATOMIC5];
100 } 100 }
101 101
102 static int 102 static int
103 ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, 103 ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
104 unsigned const char *new_code) 104 unsigned const char *new_code)
105 { 105 {
106 unsigned char replaced[MCOUNT_INSN_SIZE]; 106 unsigned char replaced[MCOUNT_INSN_SIZE];
107 107
108 /* 108 /*
109 * Note: Due to modules and __init, code can 109 * Note: Due to modules and __init, code can
110 * disappear and change, we need to protect against faulting 110 * disappear and change, we need to protect against faulting
111 * as well as code changing. We do this by using the 111 * as well as code changing. We do this by using the
112 * probe_kernel_* functions. 112 * probe_kernel_* functions.
113 * 113 *
114 * No real locking needed, this code is run through 114 * No real locking needed, this code is run through
115 * kstop_machine, or before SMP starts. 115 * kstop_machine, or before SMP starts.
116 */ 116 */
117 117
118 /* read the text we want to modify */ 118 /* read the text we want to modify */
119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
120 return -EFAULT; 120 return -EFAULT;
121 121
122 /* Make sure it is what we expect it to be */ 122 /* Make sure it is what we expect it to be */
123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 /* replace the text with the new text */ 126 /* replace the text with the new text */
127 if (do_ftrace_mod_code(ip, new_code)) 127 if (do_ftrace_mod_code(ip, new_code))
128 return -EPERM; 128 return -EPERM;
129 129
130 sync_core(); 130 sync_core();
131 131
132 return 0; 132 return 0;
133 } 133 }
134 134
135 int ftrace_make_nop(struct module *mod, 135 int ftrace_make_nop(struct module *mod,
136 struct dyn_ftrace *rec, unsigned long addr) 136 struct dyn_ftrace *rec, unsigned long addr)
137 { 137 {
138 unsigned const char *new, *old; 138 unsigned const char *new, *old;
139 unsigned long ip = rec->ip; 139 unsigned long ip = rec->ip;
140 140
141 old = ftrace_call_replace(ip, addr); 141 old = ftrace_call_replace(ip, addr);
142 new = ftrace_nop_replace(); 142 new = ftrace_nop_replace();
143 143
144 /* 144 /*
145 * On boot up, and when modules are loaded, the MCOUNT_ADDR 145 * On boot up, and when modules are loaded, the MCOUNT_ADDR
146 * is converted to a nop, and will never become MCOUNT_ADDR 146 * is converted to a nop, and will never become MCOUNT_ADDR
147 * again. This code is either running before SMP (on boot up) 147 * again. This code is either running before SMP (on boot up)
148 * or before the code will ever be executed (module load). 148 * or before the code will ever be executed (module load).
149 * We do not want to use the breakpoint version in this case, 149 * We do not want to use the breakpoint version in this case,
150 * just modify the code directly. 150 * just modify the code directly.
151 */ 151 */
152 if (addr == MCOUNT_ADDR) 152 if (addr == MCOUNT_ADDR)
153 return ftrace_modify_code_direct(rec->ip, old, new); 153 return ftrace_modify_code_direct(rec->ip, old, new);
154 154
155 /* Normal cases use add_brk_on_nop */ 155 /* Normal cases use add_brk_on_nop */
156 WARN_ONCE(1, "invalid use of ftrace_make_nop"); 156 WARN_ONCE(1, "invalid use of ftrace_make_nop");
157 return -EINVAL; 157 return -EINVAL;
158 } 158 }
159 159
160 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 160 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
161 { 161 {
162 unsigned const char *new, *old; 162 unsigned const char *new, *old;
163 unsigned long ip = rec->ip; 163 unsigned long ip = rec->ip;
164 164
165 old = ftrace_nop_replace(); 165 old = ftrace_nop_replace();
166 new = ftrace_call_replace(ip, addr); 166 new = ftrace_call_replace(ip, addr);
167 167
168 /* Should only be called when module is loaded */ 168 /* Should only be called when module is loaded */
169 return ftrace_modify_code_direct(rec->ip, old, new); 169 return ftrace_modify_code_direct(rec->ip, old, new);
170 } 170 }
171 171
172 /* 172 /*
173 * The modifying_ftrace_code is used to tell the breakpoint 173 * The modifying_ftrace_code is used to tell the breakpoint
174 * handler to call ftrace_int3_handler(). If it fails to 174 * handler to call ftrace_int3_handler(). If it fails to
175 * call this handler for a breakpoint added by ftrace, then 175 * call this handler for a breakpoint added by ftrace, then
176 * the kernel may crash. 176 * the kernel may crash.
177 * 177 *
178 * As atomic_writes on x86 do not need a barrier, we do not 178 * As atomic_writes on x86 do not need a barrier, we do not
179 * need to add smp_mb()s for this to work. It is also considered 179 * need to add smp_mb()s for this to work. It is also considered
180 * that we can not read the modifying_ftrace_code before 180 * that we can not read the modifying_ftrace_code before
181 * executing the breakpoint. That would be quite remarkable if 181 * executing the breakpoint. That would be quite remarkable if
182 * it could do that. Here's the flow that is required: 182 * it could do that. Here's the flow that is required:
183 * 183 *
184 * CPU-0 CPU-1 184 * CPU-0 CPU-1
185 * 185 *
186 * atomic_inc(mfc); 186 * atomic_inc(mfc);
187 * write int3s 187 * write int3s
188 * <trap-int3> // implicit (r)mb 188 * <trap-int3> // implicit (r)mb
189 * if (atomic_read(mfc)) 189 * if (atomic_read(mfc))
190 * call ftrace_int3_handler() 190 * call ftrace_int3_handler()
191 * 191 *
192 * Then when we are finished: 192 * Then when we are finished:
193 * 193 *
194 * atomic_dec(mfc); 194 * atomic_dec(mfc);
195 * 195 *
196 * If we hit a breakpoint that was not set by ftrace, it does not 196 * If we hit a breakpoint that was not set by ftrace, it does not
197 * matter if ftrace_int3_handler() is called or not. It will 197 * matter if ftrace_int3_handler() is called or not. It will
198 * simply be ignored. But it is crucial that a ftrace nop/caller 198 * simply be ignored. But it is crucial that a ftrace nop/caller
199 * breakpoint is handled. No other user should ever place a 199 * breakpoint is handled. No other user should ever place a
200 * breakpoint on an ftrace nop/caller location. It must only 200 * breakpoint on an ftrace nop/caller location. It must only
201 * be done by this code. 201 * be done by this code.
202 */ 202 */
203 atomic_t modifying_ftrace_code __read_mostly; 203 atomic_t modifying_ftrace_code __read_mostly;
204 204
205 static int 205 static int
206 ftrace_modify_code(unsigned long ip, unsigned const char *old_code, 206 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
207 unsigned const char *new_code); 207 unsigned const char *new_code);
208 208
209 /* 209 /*
210 * Should never be called: 210 * Should never be called:
211 * As it is only called by __ftrace_replace_code() which is called by 211 * As it is only called by __ftrace_replace_code() which is called by
212 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() 212 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
213 * which is called to turn mcount into nops or nops into function calls 213 * which is called to turn mcount into nops or nops into function calls
214 * but not to convert a function from not using regs to one that uses 214 * but not to convert a function from not using regs to one that uses
215 * regs, which ftrace_modify_call() is for. 215 * regs, which ftrace_modify_call() is for.
216 */ 216 */
217 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 217 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
218 unsigned long addr) 218 unsigned long addr)
219 { 219 {
220 WARN_ON(1); 220 WARN_ON(1);
221 return -EINVAL; 221 return -EINVAL;
222 } 222 }
223 223
224 int ftrace_update_ftrace_func(ftrace_func_t func) 224 int ftrace_update_ftrace_func(ftrace_func_t func)
225 { 225 {
226 unsigned long ip = (unsigned long)(&ftrace_call); 226 unsigned long ip = (unsigned long)(&ftrace_call);
227 unsigned char old[MCOUNT_INSN_SIZE], *new; 227 unsigned char old[MCOUNT_INSN_SIZE], *new;
228 int ret; 228 int ret;
229 229
230 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 230 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
231 new = ftrace_call_replace(ip, (unsigned long)func); 231 new = ftrace_call_replace(ip, (unsigned long)func);
232 232
233 /* See comment above by declaration of modifying_ftrace_code */ 233 /* See comment above by declaration of modifying_ftrace_code */
234 atomic_inc(&modifying_ftrace_code); 234 atomic_inc(&modifying_ftrace_code);
235 235
236 ret = ftrace_modify_code(ip, old, new); 236 ret = ftrace_modify_code(ip, old, new);
237 237
238 /* Also update the regs callback function */ 238 /* Also update the regs callback function */
239 if (!ret) { 239 if (!ret) {
240 ip = (unsigned long)(&ftrace_regs_call); 240 ip = (unsigned long)(&ftrace_regs_call);
241 memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); 241 memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
242 new = ftrace_call_replace(ip, (unsigned long)func); 242 new = ftrace_call_replace(ip, (unsigned long)func);
243 ret = ftrace_modify_code(ip, old, new); 243 ret = ftrace_modify_code(ip, old, new);
244 } 244 }
245 245
246 atomic_dec(&modifying_ftrace_code); 246 atomic_dec(&modifying_ftrace_code);
247 247
248 return ret; 248 return ret;
249 } 249 }
250 250
251 /* 251 /*
252 * A breakpoint was added to the code address we are about to 252 * A breakpoint was added to the code address we are about to
253 * modify, and this is the handle that will just skip over it. 253 * modify, and this is the handle that will just skip over it.
254 * We are either changing a nop into a trace call, or a trace 254 * We are either changing a nop into a trace call, or a trace
255 * call to a nop. While the change is taking place, we treat 255 * call to a nop. While the change is taking place, we treat
256 * it just like it was a nop. 256 * it just like it was a nop.
257 */ 257 */
258 int ftrace_int3_handler(struct pt_regs *regs) 258 int ftrace_int3_handler(struct pt_regs *regs)
259 { 259 {
260 if (WARN_ON_ONCE(!regs)) 260 if (WARN_ON_ONCE(!regs))
261 return 0; 261 return 0;
262 262
263 if (!ftrace_location(regs->ip - 1)) 263 if (!ftrace_location(regs->ip - 1))
264 return 0; 264 return 0;
265 265
266 regs->ip += MCOUNT_INSN_SIZE - 1; 266 regs->ip += MCOUNT_INSN_SIZE - 1;
267 267
268 return 1; 268 return 1;
269 } 269 }
270 270
271 static int ftrace_write(unsigned long ip, const char *val, int size) 271 static int ftrace_write(unsigned long ip, const char *val, int size)
272 { 272 {
273 /* 273 /*
274 * On x86_64, kernel text mappings are mapped read-only with 274 * On x86_64, kernel text mappings are mapped read-only with
275 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead 275 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
276 * of the kernel text mapping to modify the kernel text. 276 * of the kernel text mapping to modify the kernel text.
277 * 277 *
278 * For 32bit kernels, these mappings are same and we can use 278 * For 32bit kernels, these mappings are same and we can use
279 * kernel identity mapping to modify code. 279 * kernel identity mapping to modify code.
280 */ 280 */
281 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 281 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
282 ip = (unsigned long)__va(__pa(ip)); 282 ip = (unsigned long)__va(__pa_symbol(ip));
283 283
284 return probe_kernel_write((void *)ip, val, size); 284 return probe_kernel_write((void *)ip, val, size);
285 } 285 }
286 286
287 static int add_break(unsigned long ip, const char *old) 287 static int add_break(unsigned long ip, const char *old)
288 { 288 {
289 unsigned char replaced[MCOUNT_INSN_SIZE]; 289 unsigned char replaced[MCOUNT_INSN_SIZE];
290 unsigned char brk = BREAKPOINT_INSTRUCTION; 290 unsigned char brk = BREAKPOINT_INSTRUCTION;
291 291
292 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 292 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
293 return -EFAULT; 293 return -EFAULT;
294 294
295 /* Make sure it is what we expect it to be */ 295 /* Make sure it is what we expect it to be */
296 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0) 296 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
297 return -EINVAL; 297 return -EINVAL;
298 298
299 if (ftrace_write(ip, &brk, 1)) 299 if (ftrace_write(ip, &brk, 1))
300 return -EPERM; 300 return -EPERM;
301 301
302 return 0; 302 return 0;
303 } 303 }
304 304
305 static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr) 305 static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
306 { 306 {
307 unsigned const char *old; 307 unsigned const char *old;
308 unsigned long ip = rec->ip; 308 unsigned long ip = rec->ip;
309 309
310 old = ftrace_call_replace(ip, addr); 310 old = ftrace_call_replace(ip, addr);
311 311
312 return add_break(rec->ip, old); 312 return add_break(rec->ip, old);
313 } 313 }
314 314
315 315
316 static int add_brk_on_nop(struct dyn_ftrace *rec) 316 static int add_brk_on_nop(struct dyn_ftrace *rec)
317 { 317 {
318 unsigned const char *old; 318 unsigned const char *old;
319 319
320 old = ftrace_nop_replace(); 320 old = ftrace_nop_replace();
321 321
322 return add_break(rec->ip, old); 322 return add_break(rec->ip, old);
323 } 323 }
324 324
325 /* 325 /*
326 * If the record has the FTRACE_FL_REGS set, that means that it 326 * If the record has the FTRACE_FL_REGS set, that means that it
327 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 327 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
328 * is not not set, then it wants to convert to the normal callback. 328 * is not not set, then it wants to convert to the normal callback.
329 */ 329 */
330 static unsigned long get_ftrace_addr(struct dyn_ftrace *rec) 330 static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
331 { 331 {
332 if (rec->flags & FTRACE_FL_REGS) 332 if (rec->flags & FTRACE_FL_REGS)
333 return (unsigned long)FTRACE_REGS_ADDR; 333 return (unsigned long)FTRACE_REGS_ADDR;
334 else 334 else
335 return (unsigned long)FTRACE_ADDR; 335 return (unsigned long)FTRACE_ADDR;
336 } 336 }
337 337
338 /* 338 /*
339 * The FTRACE_FL_REGS_EN is set when the record already points to 339 * The FTRACE_FL_REGS_EN is set when the record already points to
340 * a function that saves all the regs. Basically the '_EN' version 340 * a function that saves all the regs. Basically the '_EN' version
341 * represents the current state of the function. 341 * represents the current state of the function.
342 */ 342 */
343 static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec) 343 static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
344 { 344 {
345 if (rec->flags & FTRACE_FL_REGS_EN) 345 if (rec->flags & FTRACE_FL_REGS_EN)
346 return (unsigned long)FTRACE_REGS_ADDR; 346 return (unsigned long)FTRACE_REGS_ADDR;
347 else 347 else
348 return (unsigned long)FTRACE_ADDR; 348 return (unsigned long)FTRACE_ADDR;
349 } 349 }
350 350
351 static int add_breakpoints(struct dyn_ftrace *rec, int enable) 351 static int add_breakpoints(struct dyn_ftrace *rec, int enable)
352 { 352 {
353 unsigned long ftrace_addr; 353 unsigned long ftrace_addr;
354 int ret; 354 int ret;
355 355
356 ret = ftrace_test_record(rec, enable); 356 ret = ftrace_test_record(rec, enable);
357 357
358 ftrace_addr = get_ftrace_addr(rec); 358 ftrace_addr = get_ftrace_addr(rec);
359 359
360 switch (ret) { 360 switch (ret) {
361 case FTRACE_UPDATE_IGNORE: 361 case FTRACE_UPDATE_IGNORE:
362 return 0; 362 return 0;
363 363
364 case FTRACE_UPDATE_MAKE_CALL: 364 case FTRACE_UPDATE_MAKE_CALL:
365 /* converting nop to call */ 365 /* converting nop to call */
366 return add_brk_on_nop(rec); 366 return add_brk_on_nop(rec);
367 367
368 case FTRACE_UPDATE_MODIFY_CALL_REGS: 368 case FTRACE_UPDATE_MODIFY_CALL_REGS:
369 case FTRACE_UPDATE_MODIFY_CALL: 369 case FTRACE_UPDATE_MODIFY_CALL:
370 ftrace_addr = get_ftrace_old_addr(rec); 370 ftrace_addr = get_ftrace_old_addr(rec);
371 /* fall through */ 371 /* fall through */
372 case FTRACE_UPDATE_MAKE_NOP: 372 case FTRACE_UPDATE_MAKE_NOP:
373 /* converting a call to a nop */ 373 /* converting a call to a nop */
374 return add_brk_on_call(rec, ftrace_addr); 374 return add_brk_on_call(rec, ftrace_addr);
375 } 375 }
376 return 0; 376 return 0;
377 } 377 }
378 378
379 /* 379 /*
380 * On error, we need to remove breakpoints. This needs to 380 * On error, we need to remove breakpoints. This needs to
381 * be done caefully. If the address does not currently have a 381 * be done caefully. If the address does not currently have a
382 * breakpoint, we know we are done. Otherwise, we look at the 382 * breakpoint, we know we are done. Otherwise, we look at the
383 * remaining 4 bytes of the instruction. If it matches a nop 383 * remaining 4 bytes of the instruction. If it matches a nop
384 * we replace the breakpoint with the nop. Otherwise we replace 384 * we replace the breakpoint with the nop. Otherwise we replace
385 * it with the call instruction. 385 * it with the call instruction.
386 */ 386 */
387 static int remove_breakpoint(struct dyn_ftrace *rec) 387 static int remove_breakpoint(struct dyn_ftrace *rec)
388 { 388 {
389 unsigned char ins[MCOUNT_INSN_SIZE]; 389 unsigned char ins[MCOUNT_INSN_SIZE];
390 unsigned char brk = BREAKPOINT_INSTRUCTION; 390 unsigned char brk = BREAKPOINT_INSTRUCTION;
391 const unsigned char *nop; 391 const unsigned char *nop;
392 unsigned long ftrace_addr; 392 unsigned long ftrace_addr;
393 unsigned long ip = rec->ip; 393 unsigned long ip = rec->ip;
394 394
395 /* If we fail the read, just give up */ 395 /* If we fail the read, just give up */
396 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE)) 396 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
397 return -EFAULT; 397 return -EFAULT;
398 398
399 /* If this does not have a breakpoint, we are done */ 399 /* If this does not have a breakpoint, we are done */
400 if (ins[0] != brk) 400 if (ins[0] != brk)
401 return -1; 401 return -1;
402 402
403 nop = ftrace_nop_replace(); 403 nop = ftrace_nop_replace();
404 404
405 /* 405 /*
406 * If the last 4 bytes of the instruction do not match 406 * If the last 4 bytes of the instruction do not match
407 * a nop, then we assume that this is a call to ftrace_addr. 407 * a nop, then we assume that this is a call to ftrace_addr.
408 */ 408 */
409 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) { 409 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
410 /* 410 /*
411 * For extra paranoidism, we check if the breakpoint is on 411 * For extra paranoidism, we check if the breakpoint is on
412 * a call that would actually jump to the ftrace_addr. 412 * a call that would actually jump to the ftrace_addr.
413 * If not, don't touch the breakpoint, we make just create 413 * If not, don't touch the breakpoint, we make just create
414 * a disaster. 414 * a disaster.
415 */ 415 */
416 ftrace_addr = get_ftrace_addr(rec); 416 ftrace_addr = get_ftrace_addr(rec);
417 nop = ftrace_call_replace(ip, ftrace_addr); 417 nop = ftrace_call_replace(ip, ftrace_addr);
418 418
419 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0) 419 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
420 goto update; 420 goto update;
421 421
422 /* Check both ftrace_addr and ftrace_old_addr */ 422 /* Check both ftrace_addr and ftrace_old_addr */
423 ftrace_addr = get_ftrace_old_addr(rec); 423 ftrace_addr = get_ftrace_old_addr(rec);
424 nop = ftrace_call_replace(ip, ftrace_addr); 424 nop = ftrace_call_replace(ip, ftrace_addr);
425 425
426 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) 426 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
427 return -EINVAL; 427 return -EINVAL;
428 } 428 }
429 429
430 update: 430 update:
431 return probe_kernel_write((void *)ip, &nop[0], 1); 431 return probe_kernel_write((void *)ip, &nop[0], 1);
432 } 432 }
433 433
434 static int add_update_code(unsigned long ip, unsigned const char *new) 434 static int add_update_code(unsigned long ip, unsigned const char *new)
435 { 435 {
436 /* skip breakpoint */ 436 /* skip breakpoint */
437 ip++; 437 ip++;
438 new++; 438 new++;
439 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1)) 439 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
440 return -EPERM; 440 return -EPERM;
441 return 0; 441 return 0;
442 } 442 }
443 443
444 static int add_update_call(struct dyn_ftrace *rec, unsigned long addr) 444 static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
445 { 445 {
446 unsigned long ip = rec->ip; 446 unsigned long ip = rec->ip;
447 unsigned const char *new; 447 unsigned const char *new;
448 448
449 new = ftrace_call_replace(ip, addr); 449 new = ftrace_call_replace(ip, addr);
450 return add_update_code(ip, new); 450 return add_update_code(ip, new);
451 } 451 }
452 452
453 static int add_update_nop(struct dyn_ftrace *rec) 453 static int add_update_nop(struct dyn_ftrace *rec)
454 { 454 {
455 unsigned long ip = rec->ip; 455 unsigned long ip = rec->ip;
456 unsigned const char *new; 456 unsigned const char *new;
457 457
458 new = ftrace_nop_replace(); 458 new = ftrace_nop_replace();
459 return add_update_code(ip, new); 459 return add_update_code(ip, new);
460 } 460 }
461 461
462 static int add_update(struct dyn_ftrace *rec, int enable) 462 static int add_update(struct dyn_ftrace *rec, int enable)
463 { 463 {
464 unsigned long ftrace_addr; 464 unsigned long ftrace_addr;
465 int ret; 465 int ret;
466 466
467 ret = ftrace_test_record(rec, enable); 467 ret = ftrace_test_record(rec, enable);
468 468
469 ftrace_addr = get_ftrace_addr(rec); 469 ftrace_addr = get_ftrace_addr(rec);
470 470
471 switch (ret) { 471 switch (ret) {
472 case FTRACE_UPDATE_IGNORE: 472 case FTRACE_UPDATE_IGNORE:
473 return 0; 473 return 0;
474 474
475 case FTRACE_UPDATE_MODIFY_CALL_REGS: 475 case FTRACE_UPDATE_MODIFY_CALL_REGS:
476 case FTRACE_UPDATE_MODIFY_CALL: 476 case FTRACE_UPDATE_MODIFY_CALL:
477 case FTRACE_UPDATE_MAKE_CALL: 477 case FTRACE_UPDATE_MAKE_CALL:
478 /* converting nop to call */ 478 /* converting nop to call */
479 return add_update_call(rec, ftrace_addr); 479 return add_update_call(rec, ftrace_addr);
480 480
481 case FTRACE_UPDATE_MAKE_NOP: 481 case FTRACE_UPDATE_MAKE_NOP:
482 /* converting a call to a nop */ 482 /* converting a call to a nop */
483 return add_update_nop(rec); 483 return add_update_nop(rec);
484 } 484 }
485 485
486 return 0; 486 return 0;
487 } 487 }
488 488
489 static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr) 489 static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
490 { 490 {
491 unsigned long ip = rec->ip; 491 unsigned long ip = rec->ip;
492 unsigned const char *new; 492 unsigned const char *new;
493 493
494 new = ftrace_call_replace(ip, addr); 494 new = ftrace_call_replace(ip, addr);
495 495
496 if (ftrace_write(ip, new, 1)) 496 if (ftrace_write(ip, new, 1))
497 return -EPERM; 497 return -EPERM;
498 498
499 return 0; 499 return 0;
500 } 500 }
501 501
502 static int finish_update_nop(struct dyn_ftrace *rec) 502 static int finish_update_nop(struct dyn_ftrace *rec)
503 { 503 {
504 unsigned long ip = rec->ip; 504 unsigned long ip = rec->ip;
505 unsigned const char *new; 505 unsigned const char *new;
506 506
507 new = ftrace_nop_replace(); 507 new = ftrace_nop_replace();
508 508
509 if (ftrace_write(ip, new, 1)) 509 if (ftrace_write(ip, new, 1))
510 return -EPERM; 510 return -EPERM;
511 return 0; 511 return 0;
512 } 512 }
513 513
514 static int finish_update(struct dyn_ftrace *rec, int enable) 514 static int finish_update(struct dyn_ftrace *rec, int enable)
515 { 515 {
516 unsigned long ftrace_addr; 516 unsigned long ftrace_addr;
517 int ret; 517 int ret;
518 518
519 ret = ftrace_update_record(rec, enable); 519 ret = ftrace_update_record(rec, enable);
520 520
521 ftrace_addr = get_ftrace_addr(rec); 521 ftrace_addr = get_ftrace_addr(rec);
522 522
523 switch (ret) { 523 switch (ret) {
524 case FTRACE_UPDATE_IGNORE: 524 case FTRACE_UPDATE_IGNORE:
525 return 0; 525 return 0;
526 526
527 case FTRACE_UPDATE_MODIFY_CALL_REGS: 527 case FTRACE_UPDATE_MODIFY_CALL_REGS:
528 case FTRACE_UPDATE_MODIFY_CALL: 528 case FTRACE_UPDATE_MODIFY_CALL:
529 case FTRACE_UPDATE_MAKE_CALL: 529 case FTRACE_UPDATE_MAKE_CALL:
530 /* converting nop to call */ 530 /* converting nop to call */
531 return finish_update_call(rec, ftrace_addr); 531 return finish_update_call(rec, ftrace_addr);
532 532
533 case FTRACE_UPDATE_MAKE_NOP: 533 case FTRACE_UPDATE_MAKE_NOP:
534 /* converting a call to a nop */ 534 /* converting a call to a nop */
535 return finish_update_nop(rec); 535 return finish_update_nop(rec);
536 } 536 }
537 537
538 return 0; 538 return 0;
539 } 539 }
540 540
541 static void do_sync_core(void *data) 541 static void do_sync_core(void *data)
542 { 542 {
543 sync_core(); 543 sync_core();
544 } 544 }
545 545
546 static void run_sync(void) 546 static void run_sync(void)
547 { 547 {
548 int enable_irqs = irqs_disabled(); 548 int enable_irqs = irqs_disabled();
549 549
550 /* We may be called with interrupts disbled (on bootup). */ 550 /* We may be called with interrupts disbled (on bootup). */
551 if (enable_irqs) 551 if (enable_irqs)
552 local_irq_enable(); 552 local_irq_enable();
553 on_each_cpu(do_sync_core, NULL, 1); 553 on_each_cpu(do_sync_core, NULL, 1);
554 if (enable_irqs) 554 if (enable_irqs)
555 local_irq_disable(); 555 local_irq_disable();
556 } 556 }
557 557
558 void ftrace_replace_code(int enable) 558 void ftrace_replace_code(int enable)
559 { 559 {
560 struct ftrace_rec_iter *iter; 560 struct ftrace_rec_iter *iter;
561 struct dyn_ftrace *rec; 561 struct dyn_ftrace *rec;
562 const char *report = "adding breakpoints"; 562 const char *report = "adding breakpoints";
563 int count = 0; 563 int count = 0;
564 int ret; 564 int ret;
565 565
566 for_ftrace_rec_iter(iter) { 566 for_ftrace_rec_iter(iter) {
567 rec = ftrace_rec_iter_record(iter); 567 rec = ftrace_rec_iter_record(iter);
568 568
569 ret = add_breakpoints(rec, enable); 569 ret = add_breakpoints(rec, enable);
570 if (ret) 570 if (ret)
571 goto remove_breakpoints; 571 goto remove_breakpoints;
572 count++; 572 count++;
573 } 573 }
574 574
575 run_sync(); 575 run_sync();
576 576
577 report = "updating code"; 577 report = "updating code";
578 578
579 for_ftrace_rec_iter(iter) { 579 for_ftrace_rec_iter(iter) {
580 rec = ftrace_rec_iter_record(iter); 580 rec = ftrace_rec_iter_record(iter);
581 581
582 ret = add_update(rec, enable); 582 ret = add_update(rec, enable);
583 if (ret) 583 if (ret)
584 goto remove_breakpoints; 584 goto remove_breakpoints;
585 } 585 }
586 586
587 run_sync(); 587 run_sync();
588 588
589 report = "removing breakpoints"; 589 report = "removing breakpoints";
590 590
591 for_ftrace_rec_iter(iter) { 591 for_ftrace_rec_iter(iter) {
592 rec = ftrace_rec_iter_record(iter); 592 rec = ftrace_rec_iter_record(iter);
593 593
594 ret = finish_update(rec, enable); 594 ret = finish_update(rec, enable);
595 if (ret) 595 if (ret)
596 goto remove_breakpoints; 596 goto remove_breakpoints;
597 } 597 }
598 598
599 run_sync(); 599 run_sync();
600 600
601 return; 601 return;
602 602
603 remove_breakpoints: 603 remove_breakpoints:
604 ftrace_bug(ret, rec ? rec->ip : 0); 604 ftrace_bug(ret, rec ? rec->ip : 0);
605 printk(KERN_WARNING "Failed on %s (%d):\n", report, count); 605 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
606 for_ftrace_rec_iter(iter) { 606 for_ftrace_rec_iter(iter) {
607 rec = ftrace_rec_iter_record(iter); 607 rec = ftrace_rec_iter_record(iter);
608 remove_breakpoint(rec); 608 remove_breakpoint(rec);
609 } 609 }
610 } 610 }
611 611
612 static int 612 static int
613 ftrace_modify_code(unsigned long ip, unsigned const char *old_code, 613 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
614 unsigned const char *new_code) 614 unsigned const char *new_code)
615 { 615 {
616 int ret; 616 int ret;
617 617
618 ret = add_break(ip, old_code); 618 ret = add_break(ip, old_code);
619 if (ret) 619 if (ret)
620 goto out; 620 goto out;
621 621
622 run_sync(); 622 run_sync();
623 623
624 ret = add_update_code(ip, new_code); 624 ret = add_update_code(ip, new_code);
625 if (ret) 625 if (ret)
626 goto fail_update; 626 goto fail_update;
627 627
628 run_sync(); 628 run_sync();
629 629
630 ret = ftrace_write(ip, new_code, 1); 630 ret = ftrace_write(ip, new_code, 1);
631 if (ret) { 631 if (ret) {
632 ret = -EPERM; 632 ret = -EPERM;
633 goto out; 633 goto out;
634 } 634 }
635 run_sync(); 635 run_sync();
636 out: 636 out:
637 return ret; 637 return ret;
638 638
639 fail_update: 639 fail_update:
640 probe_kernel_write((void *)ip, &old_code[0], 1); 640 probe_kernel_write((void *)ip, &old_code[0], 1);
641 goto out; 641 goto out;
642 } 642 }
643 643
644 void arch_ftrace_update_code(int command) 644 void arch_ftrace_update_code(int command)
645 { 645 {
646 /* See comment above by declaration of modifying_ftrace_code */ 646 /* See comment above by declaration of modifying_ftrace_code */
647 atomic_inc(&modifying_ftrace_code); 647 atomic_inc(&modifying_ftrace_code);
648 648
649 ftrace_modify_all_code(command); 649 ftrace_modify_all_code(command);
650 650
651 atomic_dec(&modifying_ftrace_code); 651 atomic_dec(&modifying_ftrace_code);
652 } 652 }
653 653
654 int __init ftrace_dyn_arch_init(void *data) 654 int __init ftrace_dyn_arch_init(void *data)
655 { 655 {
656 /* The return code is retured via data */ 656 /* The return code is retured via data */
657 *(unsigned long *)data = 0; 657 *(unsigned long *)data = 0;
658 658
659 return 0; 659 return 0;
660 } 660 }
661 #endif 661 #endif
662 662
663 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 663 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
664 664
665 #ifdef CONFIG_DYNAMIC_FTRACE 665 #ifdef CONFIG_DYNAMIC_FTRACE
666 extern void ftrace_graph_call(void); 666 extern void ftrace_graph_call(void);
667 667
668 static int ftrace_mod_jmp(unsigned long ip, 668 static int ftrace_mod_jmp(unsigned long ip,
669 int old_offset, int new_offset) 669 int old_offset, int new_offset)
670 { 670 {
671 unsigned char code[MCOUNT_INSN_SIZE]; 671 unsigned char code[MCOUNT_INSN_SIZE];
672 672
673 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) 673 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
674 return -EFAULT; 674 return -EFAULT;
675 675
676 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) 676 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
677 return -EINVAL; 677 return -EINVAL;
678 678
679 *(int *)(&code[1]) = new_offset; 679 *(int *)(&code[1]) = new_offset;
680 680
681 if (do_ftrace_mod_code(ip, &code)) 681 if (do_ftrace_mod_code(ip, &code))
682 return -EPERM; 682 return -EPERM;
683 683
684 return 0; 684 return 0;
685 } 685 }
686 686
687 int ftrace_enable_ftrace_graph_caller(void) 687 int ftrace_enable_ftrace_graph_caller(void)
688 { 688 {
689 unsigned long ip = (unsigned long)(&ftrace_graph_call); 689 unsigned long ip = (unsigned long)(&ftrace_graph_call);
690 int old_offset, new_offset; 690 int old_offset, new_offset;
691 691
692 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); 692 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
693 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); 693 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
694 694
695 return ftrace_mod_jmp(ip, old_offset, new_offset); 695 return ftrace_mod_jmp(ip, old_offset, new_offset);
696 } 696 }
697 697
698 int ftrace_disable_ftrace_graph_caller(void) 698 int ftrace_disable_ftrace_graph_caller(void)
699 { 699 {
700 unsigned long ip = (unsigned long)(&ftrace_graph_call); 700 unsigned long ip = (unsigned long)(&ftrace_graph_call);
701 int old_offset, new_offset; 701 int old_offset, new_offset;
702 702
703 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); 703 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
704 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); 704 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
705 705
706 return ftrace_mod_jmp(ip, old_offset, new_offset); 706 return ftrace_mod_jmp(ip, old_offset, new_offset);
707 } 707 }
708 708
709 #endif /* !CONFIG_DYNAMIC_FTRACE */ 709 #endif /* !CONFIG_DYNAMIC_FTRACE */
710 710
711 /* 711 /*
712 * Hook the return address and push it in the stack of return addrs 712 * Hook the return address and push it in the stack of return addrs
713 * in current thread info. 713 * in current thread info.
714 */ 714 */
715 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 715 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
716 unsigned long frame_pointer) 716 unsigned long frame_pointer)
717 { 717 {
718 unsigned long old; 718 unsigned long old;
719 int faulted; 719 int faulted;
720 struct ftrace_graph_ent trace; 720 struct ftrace_graph_ent trace;
721 unsigned long return_hooker = (unsigned long) 721 unsigned long return_hooker = (unsigned long)
722 &return_to_handler; 722 &return_to_handler;
723 723
724 if (unlikely(atomic_read(&current->tracing_graph_pause))) 724 if (unlikely(atomic_read(&current->tracing_graph_pause)))
725 return; 725 return;
726 726
727 /* 727 /*
728 * Protect against fault, even if it shouldn't 728 * Protect against fault, even if it shouldn't
729 * happen. This tool is too much intrusive to 729 * happen. This tool is too much intrusive to
730 * ignore such a protection. 730 * ignore such a protection.
731 */ 731 */
732 asm volatile( 732 asm volatile(
733 "1: " _ASM_MOV " (%[parent]), %[old]\n" 733 "1: " _ASM_MOV " (%[parent]), %[old]\n"
734 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" 734 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
735 " movl $0, %[faulted]\n" 735 " movl $0, %[faulted]\n"
736 "3:\n" 736 "3:\n"
737 737
738 ".section .fixup, \"ax\"\n" 738 ".section .fixup, \"ax\"\n"
739 "4: movl $1, %[faulted]\n" 739 "4: movl $1, %[faulted]\n"
740 " jmp 3b\n" 740 " jmp 3b\n"
741 ".previous\n" 741 ".previous\n"
742 742
743 _ASM_EXTABLE(1b, 4b) 743 _ASM_EXTABLE(1b, 4b)
744 _ASM_EXTABLE(2b, 4b) 744 _ASM_EXTABLE(2b, 4b)
745 745
746 : [old] "=&r" (old), [faulted] "=r" (faulted) 746 : [old] "=&r" (old), [faulted] "=r" (faulted)
747 : [parent] "r" (parent), [return_hooker] "r" (return_hooker) 747 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
748 : "memory" 748 : "memory"
749 ); 749 );
750 750
751 if (unlikely(faulted)) { 751 if (unlikely(faulted)) {
752 ftrace_graph_stop(); 752 ftrace_graph_stop();
753 WARN_ON(1); 753 WARN_ON(1);
754 return; 754 return;
755 } 755 }
756 756
757 trace.func = self_addr; 757 trace.func = self_addr;
758 trace.depth = current->curr_ret_stack + 1; 758 trace.depth = current->curr_ret_stack + 1;
759 759
760 /* Only trace if the calling function expects to */ 760 /* Only trace if the calling function expects to */
761 if (!ftrace_graph_entry(&trace)) { 761 if (!ftrace_graph_entry(&trace)) {
762 *parent = old; 762 *parent = old;
763 return; 763 return;
764 } 764 }
765 765
766 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 766 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
767 frame_pointer) == -EBUSY) { 767 frame_pointer) == -EBUSY) {
768 *parent = old; 768 *parent = old;
769 return; 769 return;
770 } 770 }
771 } 771 }
772 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 772 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
773 773