Blame view
kernel/trace/trace_clock.c
3.55 KB
14131f2f9 tracing: implemen... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * tracing clocks * * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Implements 3 trace clock variants, with differing scalability/precision * tradeoffs: * * - local: CPU-local trace clock * - medium: scalable global clock with some jitter * - global: globally monotonic, serialized clock * * Tracer plugins will chose a default from these clocks. */ #include <linux/spinlock.h> |
ae1f30384 tracing: Include ... |
16 |
#include <linux/irqflags.h> |
14131f2f9 tracing: implemen... |
17 18 19 20 |
#include <linux/hardirq.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/sched.h> |
e60175710 sched/headers: Pr... |
21 |
#include <linux/sched/clock.h> |
14131f2f9 tracing: implemen... |
22 |
#include <linux/ktime.h> |
b8b942653 tracing: fix four... |
23 |
#include <linux/trace_clock.h> |
14131f2f9 tracing: implemen... |
24 25 26 27 28 29 30 31 32 |
/* * trace_clock_local(): the simplest and least coherent tracing clock. * * Useful for tracing that does not cross to other CPUs nor * does it go through idle events. */ u64 notrace trace_clock_local(void) { |
6cc3c6e12 trace_clock: fix ... |
33 |
u64 clock; |
14131f2f9 tracing: implemen... |
34 35 36 37 38 |
/* * sched_clock() is an architecture implemented, fast, scalable, * lockless clock. It is not guaranteed to be coherent across * CPUs, nor across CPU idle events. */ |
5168ae50a tracing: Remove f... |
39 |
preempt_disable_notrace(); |
6cc3c6e12 trace_clock: fix ... |
40 |
clock = sched_clock(); |
5168ae50a tracing: Remove f... |
41 |
preempt_enable_notrace(); |
6cc3c6e12 trace_clock: fix ... |
42 43 |
return clock; |
14131f2f9 tracing: implemen... |
44 |
} |
dc975e94f tracing: Export t... |
45 |
EXPORT_SYMBOL_GPL(trace_clock_local); |
14131f2f9 tracing: implemen... |
46 47 |
/* |
25985edce Fix common misspe... |
48 |
* trace_clock(): 'between' trace clock. Not completely serialized, |
14131f2f9 tracing: implemen... |
49 50 51 52 53 54 55 56 |
* but not completely incorrect when crossing CPUs either. * * This is based on cpu_clock(), which will allow at most ~1 jiffy of * jitter between CPUs. So it's a pretty scalable clock, but there * can be offsets in the trace data. */ u64 notrace trace_clock(void) { |
c676329ab sched_clock: Add ... |
57 |
return local_clock(); |
14131f2f9 tracing: implemen... |
58 |
} |
7e255d346 tracing: Export t... |
59 |
EXPORT_SYMBOL_GPL(trace_clock); |
14131f2f9 tracing: implemen... |
60 |
|
8aacf017b tracing: Add "upt... |
61 62 |
/* * trace_jiffy_clock(): Simply use jiffies as a clock counter. |
58d4e21e5 tracing: Fix wrap... |
63 64 65 66 |
* Note that this use of jiffies_64 is not completely safe on * 32-bit systems. But the window is tiny, and the effect if * we are affected is that we will have an obviously bogus * timestamp on a trace event - i.e. not life threatening. |
8aacf017b tracing: Add "upt... |
67 68 69 |
*/ u64 notrace trace_clock_jiffies(void) { |
58d4e21e5 tracing: Fix wrap... |
70 |
return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); |
8aacf017b tracing: Add "upt... |
71 |
} |
7e255d346 tracing: Export t... |
72 |
EXPORT_SYMBOL_GPL(trace_clock_jiffies); |
14131f2f9 tracing: implemen... |
73 74 75 76 77 78 79 80 81 |
/* * trace_clock_global(): special globally coherent trace clock * * It has higher overhead than the other trace clocks but is still * an order of magnitude faster than GTOD derived hardware clocks. * * Used by plugins that need globally coherent timestamps. */ |
6ca6cca31 tracing: optimize... |
82 83 84 |
/* keep prev_time and lock in the same cacheline. */ static struct { u64 prev_time; |
445c89514 locking: Convert ... |
85 |
arch_spinlock_t lock; |
6ca6cca31 tracing: optimize... |
86 87 |
} trace_clock_struct ____cacheline_aligned_in_smp = { |
edc35bd72 locking: Rename _... |
88 |
.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
6ca6cca31 tracing: optimize... |
89 |
}; |
14131f2f9 tracing: implemen... |
90 91 92 93 94 95 |
u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; u64 now; |
e36673ec5 tracing: Fix lock... |
96 |
local_irq_save(flags); |
14131f2f9 tracing: implemen... |
97 98 |
this_cpu = raw_smp_processor_id(); |
5e67b51e3 tracing: Use sche... |
99 |
now = sched_clock_cpu(this_cpu); |
14131f2f9 tracing: implemen... |
100 101 102 103 104 105 |
/* * If in an NMI context then dont risk lockups and return the * cpu_clock() time: */ if (unlikely(in_nmi())) goto out; |
0199c4e68 locking: Convert ... |
106 |
arch_spin_lock(&trace_clock_struct.lock); |
14131f2f9 tracing: implemen... |
107 108 109 |
/* * TODO: if this happens often then maybe we should reset |
6ca6cca31 tracing: optimize... |
110 |
* my_scd->clock to prev_time+1, to make sure |
14131f2f9 tracing: implemen... |
111 112 |
* we start ticking with the local clock from now on? */ |
6ca6cca31 tracing: optimize... |
113 114 |
if ((s64)(now - trace_clock_struct.prev_time) < 0) now = trace_clock_struct.prev_time + 1; |
14131f2f9 tracing: implemen... |
115 |
|
6ca6cca31 tracing: optimize... |
116 |
trace_clock_struct.prev_time = now; |
14131f2f9 tracing: implemen... |
117 |
|
0199c4e68 locking: Convert ... |
118 |
arch_spin_unlock(&trace_clock_struct.lock); |
14131f2f9 tracing: implemen... |
119 120 |
out: |
e36673ec5 tracing: Fix lock... |
121 |
local_irq_restore(flags); |
14131f2f9 tracing: implemen... |
122 123 124 |
return now; } |
7e255d346 tracing: Export t... |
125 |
EXPORT_SYMBOL_GPL(trace_clock_global); |
6249687f7 tracing: Add a co... |
126 127 128 129 130 131 132 133 134 135 136 137 |
static atomic64_t trace_counter; /* * trace_clock_counter(): simply an atomic counter. * Use the trace_counter "counter" for cases where you do not care * about timings, but are interested in strict ordering. */ u64 notrace trace_clock_counter(void) { return atomic64_add_return(1, &trace_counter); } |