Blame view
kernel/time/sched_clock.c
4.98 KB
112f38a4a
|
1 2 3 4 5 6 7 8 9 10 |
/* * sched_clock.c: support for extending counters to full 64-bit ns counter * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/jiffies.h> |
a08ca5d10
|
11 |
#include <linux/ktime.h> |
112f38a4a
|
12 |
#include <linux/kernel.h> |
a42c36298
|
13 |
#include <linux/moduleparam.h> |
112f38a4a
|
14 |
#include <linux/sched.h> |
f153d017a
|
15 |
#include <linux/syscore_ops.h> |
a08ca5d10
|
16 |
#include <linux/hrtimer.h> |
38ff87f77
|
17 |
#include <linux/sched_clock.h> |
85c3d2dd1
|
18 |
#include <linux/seqlock.h> |
e7e3ff1bf
|
19 |
#include <linux/bitops.h> |
112f38a4a
|
20 |
|
2f0778afa
|
21 |
struct clock_data { |
a08ca5d10
|
22 |
ktime_t wrap_kt; |
2f0778afa
|
23 |
u64 epoch_ns; |
e7e3ff1bf
|
24 |
u64 epoch_cyc; |
85c3d2dd1
|
25 |
seqcount_t seq; |
c115739da
|
26 |
unsigned long rate; |
2f0778afa
|
27 28 |
u32 mult; u32 shift; |
237ec6f2e
|
29 |
bool suspended; |
2f0778afa
|
30 |
}; |
a08ca5d10
|
31 |
static struct hrtimer sched_clock_timer; |
a42c36298
|
32 33 34 |
static int irqtime = -1; core_param(irqtime, irqtime, int, 0400); |
2f0778afa
|
35 36 37 38 |
static struct clock_data cd = { .mult = NSEC_PER_SEC / HZ, }; |
e7e3ff1bf
|
39 |
static u64 __read_mostly sched_clock_mask; |
2f0778afa
|
40 |
|
e7e3ff1bf
|
41 |
static u64 notrace jiffy_sched_clock_read(void) |
2f0778afa
|
42 |
{ |
e7e3ff1bf
|
43 44 45 46 47 48 |
/* * We don't need to use get_jiffies_64 on 32-bit arches here * because we register with BITS_PER_LONG */ return (u64)(jiffies - INITIAL_JIFFIES); } |
e7e3ff1bf
|
49 |
static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; |
2f0778afa
|
50 |
|
cea15092f
|
51 |
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
2f0778afa
|
52 53 54 |
{ return (cyc * mult) >> shift; } |
b4042ceaa
|
55 |
unsigned long long notrace sched_clock(void) |
2f0778afa
|
56 57 |
{ u64 epoch_ns; |
e7e3ff1bf
|
58 59 |
u64 epoch_cyc; u64 cyc; |
85c3d2dd1
|
60 |
unsigned long seq; |
336ae1180
|
61 62 63 |
if (cd.suspended) return cd.epoch_ns; |
2f0778afa
|
64 |
|
2f0778afa
|
65 |
do { |
7a06c41cb
|
66 |
seq = raw_read_seqcount_begin(&cd.seq); |
2f0778afa
|
67 |
epoch_cyc = cd.epoch_cyc; |
2f0778afa
|
68 |
epoch_ns = cd.epoch_ns; |
85c3d2dd1
|
69 |
} while (read_seqcount_retry(&cd.seq, seq)); |
2f0778afa
|
70 |
|
336ae1180
|
71 72 73 |
cyc = read_sched_clock(); cyc = (cyc - epoch_cyc) & sched_clock_mask; return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); |
2f0778afa
|
74 75 76 77 78 79 80 81 |
} /* * Atomically update the sched_clock epoch. */ static void notrace update_sched_clock(void) { unsigned long flags; |
e7e3ff1bf
|
82 |
u64 cyc; |
2f0778afa
|
83 84 85 86 87 88 |
u64 ns; cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); |
85c3d2dd1
|
89 |
|
2f0778afa
|
90 |
raw_local_irq_save(flags); |
7a06c41cb
|
91 |
raw_write_seqcount_begin(&cd.seq); |
2f0778afa
|
92 |
cd.epoch_ns = ns; |
7c4e9ced4
|
93 |
cd.epoch_cyc = cyc; |
7a06c41cb
|
94 |
raw_write_seqcount_end(&cd.seq); |
2f0778afa
|
95 96 |
raw_local_irq_restore(flags); } |
112f38a4a
|
97 |
|
a08ca5d10
|
98 |
static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) |
112f38a4a
|
99 |
{ |
2f0778afa
|
100 |
update_sched_clock(); |
a08ca5d10
|
101 102 |
hrtimer_forward_now(hrt, cd.wrap_kt); return HRTIMER_RESTART; |
112f38a4a
|
103 |
} |
e7e3ff1bf
|
104 105 |
void __init sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) |
112f38a4a
|
106 |
{ |
5ae8aabea
|
107 108 109 |
u64 res, wrap, new_mask, new_epoch, cyc, ns; u32 new_mult, new_shift; ktime_t new_wrap_kt; |
a08ca5d10
|
110 |
unsigned long r; |
112f38a4a
|
111 |
char r_unit; |
c115739da
|
112 113 |
if (cd.rate > rate) return; |
2f0778afa
|
114 |
WARN_ON(!irqs_disabled()); |
112f38a4a
|
115 116 |
/* calculate the mult/shift to convert counter ticks to ns. */ |
5ae8aabea
|
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); new_mask = CLOCKSOURCE_MASK(bits); /* calculate how many ns until we wrap */ wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); /* update epoch for new counter and update epoch_ns from old counter*/ new_epoch = read(); cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); raw_write_seqcount_begin(&cd.seq); read_sched_clock = read; sched_clock_mask = new_mask; cd.rate = rate; cd.wrap_kt = new_wrap_kt; cd.mult = new_mult; cd.shift = new_shift; cd.epoch_cyc = new_epoch; cd.epoch_ns = ns; raw_write_seqcount_end(&cd.seq); |
112f38a4a
|
141 142 143 144 145 |
r = rate; if (r >= 4000000) { r /= 1000000; r_unit = 'M'; |
2f0778afa
|
146 |
} else if (r >= 1000) { |
112f38a4a
|
147 148 |
r /= 1000; r_unit = 'k'; |
2f0778afa
|
149 150 |
} else r_unit = ' '; |
112f38a4a
|
151 |
|
112f38a4a
|
152 |
/* calculate the ns resolution of this counter */ |
5ae8aabea
|
153 |
res = cyc_to_ns(1ULL, new_mult, new_shift); |
a08ca5d10
|
154 155 156 |
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns ", bits, r, r_unit, res, wrap); |
112f38a4a
|
157 |
|
a42c36298
|
158 159 160 |
/* Enable IRQ time accounting if we have a fast enough sched_clock */ if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) enable_sched_clock_irqtime(); |
2f0778afa
|
161 162 163 |
pr_debug("Registered %pF as sched_clock source ", read); } |
211baa701
|
164 165 |
void __init sched_clock_postinit(void) { |
2f0778afa
|
166 167 168 169 170 |
/* * If no sched_clock function has been provided at that point, * make it the final one one. */ if (read_sched_clock == jiffy_sched_clock_read) |
e7e3ff1bf
|
171 |
sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); |
2f0778afa
|
172 |
|
a08ca5d10
|
173 174 175 176 177 178 179 180 181 |
update_sched_clock(); /* * Start the timer to keep sched_clock() properly updated and * sets the initial epoch. */ hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sched_clock_timer.function = sched_clock_poll; hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); |
211baa701
|
182 |
} |
f153d017a
|
183 184 185 |
static int sched_clock_suspend(void) { |
a08ca5d10
|
186 |
sched_clock_poll(&sched_clock_timer); |
6a4dae5e1
|
187 |
cd.suspended = true; |
f153d017a
|
188 189 |
return 0; } |
237ec6f2e
|
190 191 |
static void sched_clock_resume(void) { |
6a4dae5e1
|
192 |
cd.epoch_cyc = read_sched_clock(); |
6a4dae5e1
|
193 |
cd.suspended = false; |
237ec6f2e
|
194 |
} |
f153d017a
|
195 196 |
static struct syscore_ops sched_clock_ops = { .suspend = sched_clock_suspend, |
237ec6f2e
|
197 |
.resume = sched_clock_resume, |
f153d017a
|
198 199 200 201 202 203 204 205 |
}; static int __init sched_clock_syscore_init(void) { register_syscore_ops(&sched_clock_ops); return 0; } device_initcall(sched_clock_syscore_init); |