Blame view

kernel/trace/trace_clock.c 2.98 KB
14131f2f9   Ingo Molnar   tracing: implemen...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * tracing clocks
   *
   *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *
   * Implements 3 trace clock variants, with differing scalability/precision
   * tradeoffs:
   *
   *  -   local: CPU-local trace clock
   *  -  medium: scalable global clock with some jitter
   *  -  global: globally monotonic, serialized clock
   *
   * Tracer plugins will chose a default from these clocks.
   */
  #include <linux/spinlock.h>
ae1f30384   Frederic Weisbecker   tracing: Include ...
16
  #include <linux/irqflags.h>
14131f2f9   Ingo Molnar   tracing: implemen...
17
18
19
20
21
  #include <linux/hardirq.h>
  #include <linux/module.h>
  #include <linux/percpu.h>
  #include <linux/sched.h>
  #include <linux/ktime.h>
b8b942653   Dmitri Vorobiev   tracing: fix four...
22
  #include <linux/trace_clock.h>
14131f2f9   Ingo Molnar   tracing: implemen...
23

8b2a5dac7   Steven Rostedt   tracing: do not d...
24
  #include "trace.h"
14131f2f9   Ingo Molnar   tracing: implemen...
25
26
27
28
29
30
31
32
  /*
   * trace_clock_local(): the simplest and least coherent tracing clock.
   *
   * Useful for tracing that does not cross to other CPUs nor
   * does it go through idle events.
   */
  u64 notrace trace_clock_local(void)
  {
6cc3c6e12   Peter Zijlstra   trace_clock: fix ...
33
  	u64 clock;
14131f2f9   Ingo Molnar   tracing: implemen...
34
35
36
37
38
  	/*
  	 * sched_clock() is an architecture implemented, fast, scalable,
  	 * lockless clock. It is not guaranteed to be coherent across
  	 * CPUs, nor across CPU idle events.
  	 */
5168ae50a   Steven Rostedt   tracing: Remove f...
39
  	preempt_disable_notrace();
6cc3c6e12   Peter Zijlstra   trace_clock: fix ...
40
  	clock = sched_clock();
5168ae50a   Steven Rostedt   tracing: Remove f...
41
  	preempt_enable_notrace();
6cc3c6e12   Peter Zijlstra   trace_clock: fix ...
42
43
  
  	return clock;
14131f2f9   Ingo Molnar   tracing: implemen...
44
45
46
  }
  
  /*
25985edce   Lucas De Marchi   Fix common misspe...
47
   * trace_clock(): 'between' trace clock. Not completely serialized,
14131f2f9   Ingo Molnar   tracing: implemen...
48
49
50
51
52
53
54
55
   * but not completely incorrect when crossing CPUs either.
   *
   * This is based on cpu_clock(), which will allow at most ~1 jiffy of
   * jitter between CPUs. So it's a pretty scalable clock, but there
   * can be offsets in the trace data.
   */
  u64 notrace trace_clock(void)
  {
c676329ab   Peter Zijlstra   sched_clock: Add ...
56
  	return local_clock();
14131f2f9   Ingo Molnar   tracing: implemen...
57
58
59
60
61
62
63
64
65
66
67
  }
  
  
  /*
   * trace_clock_global(): special globally coherent trace clock
   *
   * It has higher overhead than the other trace clocks but is still
   * an order of magnitude faster than GTOD derived hardware clocks.
   *
   * Used by plugins that need globally coherent timestamps.
   */
6ca6cca31   Steven Rostedt   tracing: optimize...
68
69
70
  /* keep prev_time and lock in the same cacheline. */
  static struct {
  	u64 prev_time;
445c89514   Thomas Gleixner   locking: Convert ...
71
  	arch_spinlock_t lock;
6ca6cca31   Steven Rostedt   tracing: optimize...
72
73
  } trace_clock_struct ____cacheline_aligned_in_smp =
  	{
edc35bd72   Thomas Gleixner   locking: Rename _...
74
  		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
6ca6cca31   Steven Rostedt   tracing: optimize...
75
  	};
14131f2f9   Ingo Molnar   tracing: implemen...
76
77
78
79
80
81
  
  u64 notrace trace_clock_global(void)
  {
  	unsigned long flags;
  	int this_cpu;
  	u64 now;
e36673ec5   Li Zefan   tracing: Fix lock...
82
  	local_irq_save(flags);
14131f2f9   Ingo Molnar   tracing: implemen...
83
84
85
86
87
88
89
90
91
  
  	this_cpu = raw_smp_processor_id();
  	now = cpu_clock(this_cpu);
  	/*
  	 * If in an NMI context then dont risk lockups and return the
  	 * cpu_clock() time:
  	 */
  	if (unlikely(in_nmi()))
  		goto out;
0199c4e68   Thomas Gleixner   locking: Convert ...
92
  	arch_spin_lock(&trace_clock_struct.lock);
14131f2f9   Ingo Molnar   tracing: implemen...
93
94
95
  
  	/*
  	 * TODO: if this happens often then maybe we should reset
6ca6cca31   Steven Rostedt   tracing: optimize...
96
  	 * my_scd->clock to prev_time+1, to make sure
14131f2f9   Ingo Molnar   tracing: implemen...
97
98
  	 * we start ticking with the local clock from now on?
  	 */
6ca6cca31   Steven Rostedt   tracing: optimize...
99
100
  	if ((s64)(now - trace_clock_struct.prev_time) < 0)
  		now = trace_clock_struct.prev_time + 1;
14131f2f9   Ingo Molnar   tracing: implemen...
101

6ca6cca31   Steven Rostedt   tracing: optimize...
102
  	trace_clock_struct.prev_time = now;
14131f2f9   Ingo Molnar   tracing: implemen...
103

0199c4e68   Thomas Gleixner   locking: Convert ...
104
  	arch_spin_unlock(&trace_clock_struct.lock);
14131f2f9   Ingo Molnar   tracing: implemen...
105
106
  
   out:
e36673ec5   Li Zefan   tracing: Fix lock...
107
  	local_irq_restore(flags);
14131f2f9   Ingo Molnar   tracing: implemen...
108
109
110
  
  	return now;
  }
6249687f7   Steven Rostedt   tracing: Add a co...
111
112
113
114
115
116
117
118
119
120
121
122
  
  static atomic64_t trace_counter;
  
  /*
   * trace_clock_counter(): simply an atomic counter.
   * Use the trace_counter "counter" for cases where you do not care
   * about timings, but are interested in strict ordering.
   */
  u64 notrace trace_clock_counter(void)
  {
  	return atomic64_add_return(1, &trace_counter);
  }