Blame view

arch/x86/kernel/ftrace.c 10.9 KB
3d0833953   Steven Rostedt   ftrace: dynamic e...
1
2
3
4
5
6
7
8
9
10
  /*
   * Code for replacing ftrace calls with jumps.
   *
   * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   *
   * Thanks goes to Ingo Molnar, for suggesting the idea.
   * Mathieu Desnoyers, for suggesting postponing the modifications.
   * Arjan van de Ven, for keeping me straight, and explaining to me
   * the dangers of modifying code on the run.
   */
3bb258bf4   Joe Perches   ftrace.c: Add #de...
11
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3d0833953   Steven Rostedt   ftrace: dynamic e...
12
13
  #include <linux/spinlock.h>
  #include <linux/hardirq.h>
6f93fc076   Steven Rostedt   ftrace: x86 use c...
14
  #include <linux/uaccess.h>
3d0833953   Steven Rostedt   ftrace: dynamic e...
15
16
  #include <linux/ftrace.h>
  #include <linux/percpu.h>
19b3e9671   Ingo Molnar   tracing: function...
17
  #include <linux/sched.h>
3d0833953   Steven Rostedt   ftrace: dynamic e...
18
19
  #include <linux/init.h>
  #include <linux/list.h>
84e1c6bb3   Matthieu CASTET   x86: Add RO/NX pr...
20
  #include <linux/module.h>
3d0833953   Steven Rostedt   ftrace: dynamic e...
21

47788c58e   Frederic Weisbecker   tracing/syscalls:...
22
  #include <trace/syscall.h>
162396309   Steven Rostedt   ftrace, x86: make...
23
  #include <asm/cacheflush.h>
395a59d0f   Abhishek Sagar   ftrace: store mco...
24
  #include <asm/ftrace.h>
732f3ca7d   Steven Rostedt   ftrace: use only ...
25
  #include <asm/nops.h>
caf4b323b   Frederic Weisbecker   tracing, x86: add...
26
  #include <asm/nmi.h>
3d0833953   Steven Rostedt   ftrace: dynamic e...
27

3d0833953   Steven Rostedt   ftrace: dynamic e...
28

caf4b323b   Frederic Weisbecker   tracing, x86: add...
29
  #ifdef CONFIG_DYNAMIC_FTRACE
3d0833953   Steven Rostedt   ftrace: dynamic e...
30

0c54dd341   Steven Rostedt   ftrace: Remove me...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  /*
   * modifying_code is set to notify NMIs that they need to use
   * memory barriers when entering or exiting. But we don't want
   * to burden NMIs with unnecessary memory barriers when code
   * modification is not being done (which is most of the time).
   *
   * A mutex is already held when ftrace_arch_code_modify_prepare
   * and post_process are called. No locks need to be taken here.
   *
   * Stop machine will make sure currently running NMIs are done
   * and new NMIs will see the updated variable before we need
   * to worry about NMIs doing memory barriers.
   */
  static int modifying_code __read_mostly;
  static DEFINE_PER_CPU(int, save_modifying_code);
162396309   Steven Rostedt   ftrace, x86: make...
46
47
48
  int ftrace_arch_code_modify_prepare(void)
  {
  	set_kernel_text_rw();
84e1c6bb3   Matthieu CASTET   x86: Add RO/NX pr...
49
  	set_all_modules_text_rw();
0c54dd341   Steven Rostedt   ftrace: Remove me...
50
  	modifying_code = 1;
162396309   Steven Rostedt   ftrace, x86: make...
51
52
53
54
55
  	return 0;
  }
  
  int ftrace_arch_code_modify_post_process(void)
  {
0c54dd341   Steven Rostedt   ftrace: Remove me...
56
  	modifying_code = 0;
84e1c6bb3   Matthieu CASTET   x86: Add RO/NX pr...
57
  	set_all_modules_text_ro();
162396309   Steven Rostedt   ftrace, x86: make...
58
59
60
  	set_kernel_text_ro();
  	return 0;
  }
3d0833953   Steven Rostedt   ftrace: dynamic e...
61
  union ftrace_code_union {
395a59d0f   Abhishek Sagar   ftrace: store mco...
62
  	char code[MCOUNT_INSN_SIZE];
3d0833953   Steven Rostedt   ftrace: dynamic e...
63
64
65
66
67
  	struct {
  		char e8;
  		int offset;
  	} __attribute__((packed));
  };
15adc0489   Steven Rostedt   ftrace, powerpc, ...
68
  static int ftrace_calc_offset(long ip, long addr)
3c1720f00   Steven Rostedt   ftrace: move memo...
69
70
71
  {
  	return (int)(addr - ip);
  }
3d0833953   Steven Rostedt   ftrace: dynamic e...
72

31e889098   Steven Rostedt   ftrace: pass modu...
73
  static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
3c1720f00   Steven Rostedt   ftrace: move memo...
74
75
  {
  	static union ftrace_code_union calc;
3d0833953   Steven Rostedt   ftrace: dynamic e...
76

3c1720f00   Steven Rostedt   ftrace: move memo...
77
  	calc.e8		= 0xe8;
395a59d0f   Abhishek Sagar   ftrace: store mco...
78
  	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
3c1720f00   Steven Rostedt   ftrace: move memo...
79
80
81
82
83
84
  
  	/*
  	 * No locking needed, this must be called via kstop_machine
  	 * which in essence is like running on a uniprocessor machine.
  	 */
  	return calc.code;
3d0833953   Steven Rostedt   ftrace: dynamic e...
85
  }
17666f02b   Steven Rostedt   ftrace: nmi safe ...
86
87
88
89
90
91
92
93
94
95
96
  /*
   * Modifying code must take extra care. On an SMP machine, if
   * the code being modified is also being executed on another CPU
   * that CPU will have undefined results and possibly take a GPF.
   * We use kstop_machine to stop other CPUS from exectuing code.
   * But this does not stop NMIs from happening. We still need
   * to protect against that. We separate out the modification of
   * the code to take care of this.
   *
   * Two buffers are added: An IP buffer and a "code" buffer.
   *
a26a2a273   Steven Rostedt   ftrace: nmi safe ...
97
   * 1) Put the instruction pointer into the IP buffer
17666f02b   Steven Rostedt   ftrace: nmi safe ...
98
   *    and the new code into the "code" buffer.
e9d9df447   Lai Jiangshan   ftrace: protect r...
99
100
101
102
103
   * 2) Wait for any running NMIs to finish and set a flag that says
   *    we are modifying code, it is done in an atomic operation.
   * 3) Write the code
   * 4) clear the flag.
   * 5) Wait for any running NMIs to finish.
17666f02b   Steven Rostedt   ftrace: nmi safe ...
104
105
106
107
108
109
110
111
112
113
   *
   * If an NMI is executed, the first thing it does is to call
   * "ftrace_nmi_enter". This will check if the flag is set to write
   * and if it is, it will write what is in the IP and "code" buffers.
   *
   * The trick is, it does not matter if everyone is writing the same
   * content to the code location. Also, if a CPU is executing code
   * it is OK to write to that code location if the contents being written
   * are the same as what exists.
   */
e9d9df447   Lai Jiangshan   ftrace: protect r...
114
  #define MOD_CODE_WRITE_FLAG (1 << 31)	/* set when NMI should do the write */
4e6ea1440   Steven Rostedt   ftrace, x86: rena...
115
  static atomic_t nmi_running = ATOMIC_INIT(0);
a26a2a273   Steven Rostedt   ftrace: nmi safe ...
116
  static int mod_code_status;		/* holds return value of text write */
a26a2a273   Steven Rostedt   ftrace: nmi safe ...
117
  static void *mod_code_ip;		/* holds the IP to write to */
0d098a7d1   Rakib Mullick   x86/ftrace: Fix c...
118
  static const void *mod_code_newcode;	/* holds the text to write to the IP */
17666f02b   Steven Rostedt   ftrace: nmi safe ...
119

a26a2a273   Steven Rostedt   ftrace: nmi safe ...
120
121
  static unsigned nmi_wait_count;
  static atomic_t nmi_update_count = ATOMIC_INIT(0);
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
122
123
124
125
126
127
128
129
130
131
  
  int ftrace_arch_read_dyn_info(char *buf, int size)
  {
  	int r;
  
  	r = snprintf(buf, size, "%u %u",
  		     nmi_wait_count,
  		     atomic_read(&nmi_update_count));
  	return r;
  }
e9d9df447   Lai Jiangshan   ftrace: protect r...
132
133
134
135
136
137
138
139
140
141
142
143
144
  static void clear_mod_flag(void)
  {
  	int old = atomic_read(&nmi_running);
  
  	for (;;) {
  		int new = old & ~MOD_CODE_WRITE_FLAG;
  
  		if (old == new)
  			break;
  
  		old = atomic_cmpxchg(&nmi_running, old, new);
  	}
  }
17666f02b   Steven Rostedt   ftrace: nmi safe ...
145
146
147
148
149
150
151
152
153
154
  static void ftrace_mod_code(void)
  {
  	/*
  	 * Yes, more than one CPU process can be writing to mod_code_status.
  	 *    (and the code itself)
  	 * But if one were to fail, then they all should, and if one were
  	 * to succeed, then they all should.
  	 */
  	mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
  					     MCOUNT_INSN_SIZE);
90c7ac49a   Steven Rostedt   ftrace: immediate...
155
156
157
  
  	/* if we fail, then kill any new writers */
  	if (mod_code_status)
e9d9df447   Lai Jiangshan   ftrace: protect r...
158
  		clear_mod_flag();
17666f02b   Steven Rostedt   ftrace: nmi safe ...
159
  }
a81bd80a0   Steven Rostedt   ring-buffer: use ...
160
  void ftrace_nmi_enter(void)
17666f02b   Steven Rostedt   ftrace: nmi safe ...
161
  {
0a3aee0da   Tejun Heo   x86: Use this_cpu...
162
  	__this_cpu_write(save_modifying_code, modifying_code);
0c54dd341   Steven Rostedt   ftrace: Remove me...
163

0a3aee0da   Tejun Heo   x86: Use this_cpu...
164
  	if (!__this_cpu_read(save_modifying_code))
0c54dd341   Steven Rostedt   ftrace: Remove me...
165
  		return;
e9d9df447   Lai Jiangshan   ftrace: protect r...
166
167
  	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
  		smp_rmb();
17666f02b   Steven Rostedt   ftrace: nmi safe ...
168
  		ftrace_mod_code();
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
169
170
  		atomic_inc(&nmi_update_count);
  	}
e9d9df447   Lai Jiangshan   ftrace: protect r...
171
172
  	/* Must have previous changes seen before executions */
  	smp_mb();
17666f02b   Steven Rostedt   ftrace: nmi safe ...
173
  }
a81bd80a0   Steven Rostedt   ring-buffer: use ...
174
  void ftrace_nmi_exit(void)
17666f02b   Steven Rostedt   ftrace: nmi safe ...
175
  {
0a3aee0da   Tejun Heo   x86: Use this_cpu...
176
  	if (!__this_cpu_read(save_modifying_code))
0c54dd341   Steven Rostedt   ftrace: Remove me...
177
  		return;
4e6ea1440   Steven Rostedt   ftrace, x86: rena...
178
  	/* Finish all executions before clearing nmi_running */
e9d9df447   Lai Jiangshan   ftrace: protect r...
179
  	smp_mb();
4e6ea1440   Steven Rostedt   ftrace, x86: rena...
180
  	atomic_dec(&nmi_running);
17666f02b   Steven Rostedt   ftrace: nmi safe ...
181
  }
e9d9df447   Lai Jiangshan   ftrace: protect r...
182
183
184
185
186
187
188
189
190
191
192
  static void wait_for_nmi_and_set_mod_flag(void)
  {
  	if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
  		return;
  
  	do {
  		cpu_relax();
  	} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
  
  	nmi_wait_count++;
  }
17666f02b   Steven Rostedt   ftrace: nmi safe ...
193
194
  static void wait_for_nmi(void)
  {
4e6ea1440   Steven Rostedt   ftrace, x86: rena...
195
  	if (!atomic_read(&nmi_running))
890252823   Cyrill Gorcunov   x86: ftrace - sim...
196
  		return;
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
197

890252823   Cyrill Gorcunov   x86: ftrace - sim...
198
  	do {
17666f02b   Steven Rostedt   ftrace: nmi safe ...
199
  		cpu_relax();
4e6ea1440   Steven Rostedt   ftrace, x86: rena...
200
  	} while (atomic_read(&nmi_running));
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
201

890252823   Cyrill Gorcunov   x86: ftrace - sim...
202
  	nmi_wait_count++;
17666f02b   Steven Rostedt   ftrace: nmi safe ...
203
  }
55ca3cc17   Suresh Siddha   x86_64, ftrace: M...
204
205
206
207
208
  static inline int
  within(unsigned long addr, unsigned long start, unsigned long end)
  {
  	return addr >= start && addr < end;
  }
17666f02b   Steven Rostedt   ftrace: nmi safe ...
209
  static int
0d098a7d1   Rakib Mullick   x86/ftrace: Fix c...
210
  do_ftrace_mod_code(unsigned long ip, const void *new_code)
17666f02b   Steven Rostedt   ftrace: nmi safe ...
211
  {
55ca3cc17   Suresh Siddha   x86_64, ftrace: M...
212
213
214
215
216
217
218
219
220
221
  	/*
  	 * On x86_64, kernel text mappings are mapped read-only with
  	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
  	 * of the kernel text mapping to modify the kernel text.
  	 *
  	 * For 32bit kernels, these mappings are same and we can use
  	 * kernel identity mapping to modify code.
  	 */
  	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  		ip = (unsigned long)__va(__pa(ip));
17666f02b   Steven Rostedt   ftrace: nmi safe ...
222
223
224
225
  	mod_code_ip = (void *)ip;
  	mod_code_newcode = new_code;
  
  	/* The buffers need to be visible before we let NMIs write them */
17666f02b   Steven Rostedt   ftrace: nmi safe ...
226
  	smp_mb();
e9d9df447   Lai Jiangshan   ftrace: protect r...
227
  	wait_for_nmi_and_set_mod_flag();
17666f02b   Steven Rostedt   ftrace: nmi safe ...
228
229
230
231
232
233
234
  
  	/* Make sure all running NMIs have finished before we write the code */
  	smp_mb();
  
  	ftrace_mod_code();
  
  	/* Make sure the write happens before clearing the bit */
17666f02b   Steven Rostedt   ftrace: nmi safe ...
235
  	smp_mb();
e9d9df447   Lai Jiangshan   ftrace: protect r...
236
  	clear_mod_flag();
17666f02b   Steven Rostedt   ftrace: nmi safe ...
237
238
239
240
  	wait_for_nmi();
  
  	return mod_code_status;
  }
dc326fca2   H. Peter Anvin   x86, cpu: Clean u...
241
  static const unsigned char *ftrace_nop_replace(void)
caf4b323b   Frederic Weisbecker   tracing, x86: add...
242
  {
dc326fca2   H. Peter Anvin   x86, cpu: Clean u...
243
  	return ideal_nops[NOP_ATOMIC5];
caf4b323b   Frederic Weisbecker   tracing, x86: add...
244
  }
31e889098   Steven Rostedt   ftrace: pass modu...
245
  static int
0d098a7d1   Rakib Mullick   x86/ftrace: Fix c...
246
247
  ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  		   unsigned const char *new_code)
3d0833953   Steven Rostedt   ftrace: dynamic e...
248
  {
6f93fc076   Steven Rostedt   ftrace: x86 use c...
249
  	unsigned char replaced[MCOUNT_INSN_SIZE];
3d0833953   Steven Rostedt   ftrace: dynamic e...
250
251
252
253
  
  	/*
  	 * Note: Due to modules and __init, code can
  	 *  disappear and change, we need to protect against faulting
76aefee57   Steven Rostedt   ftrace: comment a...
254
  	 *  as well as code changing. We do this by using the
ab9a0918c   Steven Rostedt   ftrace: use probe...
255
  	 *  probe_kernel_* functions.
3d0833953   Steven Rostedt   ftrace: dynamic e...
256
257
  	 *
  	 * No real locking needed, this code is run through
6f93fc076   Steven Rostedt   ftrace: x86 use c...
258
  	 * kstop_machine, or before SMP starts.
3d0833953   Steven Rostedt   ftrace: dynamic e...
259
  	 */
76aefee57   Steven Rostedt   ftrace: comment a...
260
261
  
  	/* read the text we want to modify */
ab9a0918c   Steven Rostedt   ftrace: use probe...
262
  	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
593eb8a2d   Steven Rostedt   ftrace: return er...
263
  		return -EFAULT;
6f93fc076   Steven Rostedt   ftrace: x86 use c...
264

76aefee57   Steven Rostedt   ftrace: comment a...
265
  	/* Make sure it is what we expect it to be */
6f93fc076   Steven Rostedt   ftrace: x86 use c...
266
  	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
593eb8a2d   Steven Rostedt   ftrace: return er...
267
  		return -EINVAL;
3d0833953   Steven Rostedt   ftrace: dynamic e...
268

76aefee57   Steven Rostedt   ftrace: comment a...
269
  	/* replace the text with the new text */
17666f02b   Steven Rostedt   ftrace: nmi safe ...
270
  	if (do_ftrace_mod_code(ip, new_code))
593eb8a2d   Steven Rostedt   ftrace: return er...
271
  		return -EPERM;
6f93fc076   Steven Rostedt   ftrace: x86 use c...
272
273
  
  	sync_core();
3d0833953   Steven Rostedt   ftrace: dynamic e...
274

6f93fc076   Steven Rostedt   ftrace: x86 use c...
275
  	return 0;
3d0833953   Steven Rostedt   ftrace: dynamic e...
276
  }
31e889098   Steven Rostedt   ftrace: pass modu...
277
278
279
  int ftrace_make_nop(struct module *mod,
  		    struct dyn_ftrace *rec, unsigned long addr)
  {
0d098a7d1   Rakib Mullick   x86/ftrace: Fix c...
280
  	unsigned const char *new, *old;
31e889098   Steven Rostedt   ftrace: pass modu...
281
282
283
284
285
286
287
288
289
290
  	unsigned long ip = rec->ip;
  
  	old = ftrace_call_replace(ip, addr);
  	new = ftrace_nop_replace();
  
  	return ftrace_modify_code(rec->ip, old, new);
  }
  
  int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  {
0d098a7d1   Rakib Mullick   x86/ftrace: Fix c...
291
  	unsigned const char *new, *old;
31e889098   Steven Rostedt   ftrace: pass modu...
292
293
294
295
296
297
298
  	unsigned long ip = rec->ip;
  
  	old = ftrace_nop_replace();
  	new = ftrace_call_replace(ip, addr);
  
  	return ftrace_modify_code(rec->ip, old, new);
  }
15adc0489   Steven Rostedt   ftrace, powerpc, ...
299
  int ftrace_update_ftrace_func(ftrace_func_t func)
d61f82d06   Steven Rostedt   ftrace: use dynam...
300
301
  {
  	unsigned long ip = (unsigned long)(&ftrace_call);
395a59d0f   Abhishek Sagar   ftrace: store mco...
302
  	unsigned char old[MCOUNT_INSN_SIZE], *new;
d61f82d06   Steven Rostedt   ftrace: use dynam...
303
  	int ret;
395a59d0f   Abhishek Sagar   ftrace: store mco...
304
  	memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
d61f82d06   Steven Rostedt   ftrace: use dynam...
305
306
307
308
309
  	new = ftrace_call_replace(ip, (unsigned long)func);
  	ret = ftrace_modify_code(ip, old, new);
  
  	return ret;
  }
d61f82d06   Steven Rostedt   ftrace: use dynam...
310
  int __init ftrace_dyn_arch_init(void *data)
3d0833953   Steven Rostedt   ftrace: dynamic e...
311
  {
732f3ca7d   Steven Rostedt   ftrace: use only ...
312
313
  	/* The return code is retured via data */
  	*(unsigned long *)data = 0;
dfa60aba0   Steven Rostedt   ftrace: use nops ...
314

3d0833953   Steven Rostedt   ftrace: dynamic e...
315
316
  	return 0;
  }
caf4b323b   Frederic Weisbecker   tracing, x86: add...
317
  #endif
e7d3737ea   Frederic Weisbecker   tracing/function-...
318

fb52607af   Frederic Weisbecker   tracing/function-...
319
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737ea   Frederic Weisbecker   tracing/function-...
320

5a45cfe1c   Steven Rostedt   ftrace: use code ...
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
  #ifdef CONFIG_DYNAMIC_FTRACE
  extern void ftrace_graph_call(void);
  
  static int ftrace_mod_jmp(unsigned long ip,
  			  int old_offset, int new_offset)
  {
  	unsigned char code[MCOUNT_INSN_SIZE];
  
  	if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
  		return -EFAULT;
  
  	if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
  		return -EINVAL;
  
  	*(int *)(&code[1]) = new_offset;
  
  	if (do_ftrace_mod_code(ip, &code))
  		return -EPERM;
  
  	return 0;
  }
  
  int ftrace_enable_ftrace_graph_caller(void)
  {
  	unsigned long ip = (unsigned long)(&ftrace_graph_call);
  	int old_offset, new_offset;
  
  	old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
  	new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
  
  	return ftrace_mod_jmp(ip, old_offset, new_offset);
  }
  
  int ftrace_disable_ftrace_graph_caller(void)
  {
  	unsigned long ip = (unsigned long)(&ftrace_graph_call);
  	int old_offset, new_offset;
  
  	old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
  	new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
  
  	return ftrace_mod_jmp(ip, old_offset, new_offset);
  }
e7d3737ea   Frederic Weisbecker   tracing/function-...
364
  #endif /* !CONFIG_DYNAMIC_FTRACE */
e7d3737ea   Frederic Weisbecker   tracing/function-...
365
366
367
368
  /*
   * Hook the return address and push it in the stack of return addrs
   * in current thread info.
   */
71e308a23   Steven Rostedt   function-graph: a...
369
370
  void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  			   unsigned long frame_pointer)
e7d3737ea   Frederic Weisbecker   tracing/function-...
371
372
  {
  	unsigned long old;
e7d3737ea   Frederic Weisbecker   tracing/function-...
373
  	int faulted;
287b6e68c   Frederic Weisbecker   tracing/function-...
374
  	struct ftrace_graph_ent trace;
e7d3737ea   Frederic Weisbecker   tracing/function-...
375
376
  	unsigned long return_hooker = (unsigned long)
  				&return_to_handler;
380c4b141   Frederic Weisbecker   tracing/function-...
377
  	if (unlikely(atomic_read(&current->tracing_graph_pause)))
e7d3737ea   Frederic Weisbecker   tracing/function-...
378
379
380
381
382
383
384
385
  		return;
  
  	/*
  	 * Protect against fault, even if it shouldn't
  	 * happen. This tool is too much intrusive to
  	 * ignore such a protection.
  	 */
  	asm volatile(
966657883   Steven Rostedt   tracing, x86: fix...
386
387
388
389
  		"1: " _ASM_MOV " (%[parent]), %[old]
  "
  		"2: " _ASM_MOV " %[return_hooker], (%[parent])
  "
e7d3737ea   Frederic Weisbecker   tracing/function-...
390
391
  		"   movl $0, %[faulted]
  "
e3944bfac   Steven Rostedt   tracing, x86: fix...
392
393
  		"3:
  "
e7d3737ea   Frederic Weisbecker   tracing/function-...
394
395
396
  
  		".section .fixup, \"ax\"
  "
e3944bfac   Steven Rostedt   tracing, x86: fix...
397
398
399
400
  		"4: movl $1, %[faulted]
  "
  		"   jmp 3b
  "
e7d3737ea   Frederic Weisbecker   tracing/function-...
401
402
  		".previous
  "
e3944bfac   Steven Rostedt   tracing, x86: fix...
403
404
  		_ASM_EXTABLE(1b, 4b)
  		_ASM_EXTABLE(2b, 4b)
e7d3737ea   Frederic Weisbecker   tracing/function-...
405

aa512a27e   Steven Rostedt   x86/function-grap...
406
  		: [old] "=&r" (old), [faulted] "=r" (faulted)
966657883   Steven Rostedt   tracing, x86: fix...
407
  		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
e7d3737ea   Frederic Weisbecker   tracing/function-...
408
409
  		: "memory"
  	);
14a866c56   Steven Rostedt   ftrace: add ftrac...
410
411
412
  	if (unlikely(faulted)) {
  		ftrace_graph_stop();
  		WARN_ON(1);
e7d3737ea   Frederic Weisbecker   tracing/function-...
413
414
  		return;
  	}
287b6e68c   Frederic Weisbecker   tracing/function-...
415
  	trace.func = self_addr;
722b3c746   Steven Rostedt   ftrace/graph: Tra...
416
  	trace.depth = current->curr_ret_stack + 1;
287b6e68c   Frederic Weisbecker   tracing/function-...
417

e49dc19c6   Steven Rostedt   ftrace: function ...
418
419
  	/* Only trace if the calling function expects to */
  	if (!ftrace_graph_entry(&trace)) {
e49dc19c6   Steven Rostedt   ftrace: function ...
420
  		*parent = old;
722b3c746   Steven Rostedt   ftrace/graph: Tra...
421
422
423
424
425
426
427
  		return;
  	}
  
  	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
  		    frame_pointer) == -EBUSY) {
  		*parent = old;
  		return;
e49dc19c6   Steven Rostedt   ftrace: function ...
428
  	}
e7d3737ea   Frederic Weisbecker   tracing/function-...
429
  }
fb52607af   Frederic Weisbecker   tracing/function-...
430
  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */