Commit d6ad418763888f617ac5b4849823e4cd670df1dd
1 parent
47c8c91b2d
Exists in
master
and in
20 other branches
time: Kill xtime_lock, replacing it with jiffies_lock
Now that timekeeping is protected by its own locks, rename the xtime_lock to jifffies_lock to better describe what it protects. CC: Thomas Gleixner <tglx@linutronix.de> CC: Eric Dumazet <eric.dumazet@gmail.com> CC: Richard Cochran <richardcochran@gmail.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
Showing 7 changed files with 25 additions and 31 deletions Side-by-side Diff
drivers/clocksource/i8253.c
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | |
36 | 36 | raw_spin_lock_irqsave(&i8253_lock, flags); |
37 | 37 | /* |
38 | - * Although our caller may have the read side of xtime_lock, | |
38 | + * Although our caller may have the read side of jiffies_lock, | |
39 | 39 | * this is now a seqlock, and we are cheating in this routine |
40 | 40 | * by having side effects on state that we cannot undo if |
41 | 41 | * there is a collision on the seqlock and our caller has to |
include/linux/jiffies.h
... | ... | @@ -70,11 +70,12 @@ |
70 | 70 | |
71 | 71 | /* |
72 | 72 | * The 64-bit value is not atomic - you MUST NOT read it |
73 | - * without sampling the sequence number in xtime_lock. | |
73 | + * without sampling the sequence number in jiffies_lock. | |
74 | 74 | * get_jiffies_64() will do this for you as appropriate. |
75 | 75 | */ |
76 | 76 | extern u64 __jiffy_data jiffies_64; |
77 | 77 | extern unsigned long volatile __jiffy_data jiffies; |
78 | +extern seqlock_t jiffies_lock; | |
78 | 79 | |
79 | 80 | #if (BITS_PER_LONG < 64) |
80 | 81 | u64 get_jiffies_64(void); |
kernel/time/jiffies.c
... | ... | @@ -67,6 +67,8 @@ |
67 | 67 | .shift = JIFFIES_SHIFT, |
68 | 68 | }; |
69 | 69 | |
70 | +__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); | |
71 | + | |
70 | 72 | #if (BITS_PER_LONG < 64) |
71 | 73 | u64 get_jiffies_64(void) |
72 | 74 | { |
73 | 75 | |
... | ... | @@ -74,9 +76,9 @@ |
74 | 76 | u64 ret; |
75 | 77 | |
76 | 78 | do { |
77 | - seq = read_seqbegin(&xtime_lock); | |
79 | + seq = read_seqbegin(&jiffies_lock); | |
78 | 80 | ret = jiffies_64; |
79 | - } while (read_seqretry(&xtime_lock, seq)); | |
81 | + } while (read_seqretry(&jiffies_lock, seq)); | |
80 | 82 | return ret; |
81 | 83 | } |
82 | 84 | EXPORT_SYMBOL(get_jiffies_64); |
kernel/time/tick-common.c
... | ... | @@ -63,13 +63,13 @@ |
63 | 63 | static void tick_periodic(int cpu) |
64 | 64 | { |
65 | 65 | if (tick_do_timer_cpu == cpu) { |
66 | - write_seqlock(&xtime_lock); | |
66 | + write_seqlock(&jiffies_lock); | |
67 | 67 | |
68 | 68 | /* Keep track of the next tick event */ |
69 | 69 | tick_next_period = ktime_add(tick_next_period, tick_period); |
70 | 70 | |
71 | 71 | do_timer(1); |
72 | - write_sequnlock(&xtime_lock); | |
72 | + write_sequnlock(&jiffies_lock); | |
73 | 73 | } |
74 | 74 | |
75 | 75 | update_process_times(user_mode(get_irq_regs())); |
76 | 76 | |
... | ... | @@ -130,9 +130,9 @@ |
130 | 130 | ktime_t next; |
131 | 131 | |
132 | 132 | do { |
133 | - seq = read_seqbegin(&xtime_lock); | |
133 | + seq = read_seqbegin(&jiffies_lock); | |
134 | 134 | next = tick_next_period; |
135 | - } while (read_seqretry(&xtime_lock, seq)); | |
135 | + } while (read_seqretry(&jiffies_lock, seq)); | |
136 | 136 | |
137 | 137 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
138 | 138 |
kernel/time/tick-internal.h
kernel/time/tick-sched.c
... | ... | @@ -31,7 +31,7 @@ |
31 | 31 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
32 | 32 | |
33 | 33 | /* |
34 | - * The time, when the last jiffy update happened. Protected by xtime_lock. | |
34 | + * The time, when the last jiffy update happened. Protected by jiffies_lock. | |
35 | 35 | */ |
36 | 36 | static ktime_t last_jiffies_update; |
37 | 37 | |
38 | 38 | |
... | ... | @@ -49,14 +49,14 @@ |
49 | 49 | ktime_t delta; |
50 | 50 | |
51 | 51 | /* |
52 | - * Do a quick check without holding xtime_lock: | |
52 | + * Do a quick check without holding jiffies_lock: | |
53 | 53 | */ |
54 | 54 | delta = ktime_sub(now, last_jiffies_update); |
55 | 55 | if (delta.tv64 < tick_period.tv64) |
56 | 56 | return; |
57 | 57 | |
58 | - /* Reevalute with xtime_lock held */ | |
59 | - write_seqlock(&xtime_lock); | |
58 | + /* Reevalute with jiffies_lock held */ | |
59 | + write_seqlock(&jiffies_lock); | |
60 | 60 | |
61 | 61 | delta = ktime_sub(now, last_jiffies_update); |
62 | 62 | if (delta.tv64 >= tick_period.tv64) { |
... | ... | @@ -79,7 +79,7 @@ |
79 | 79 | /* Keep the tick_next_period variable up to date */ |
80 | 80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
81 | 81 | } |
82 | - write_sequnlock(&xtime_lock); | |
82 | + write_sequnlock(&jiffies_lock); | |
83 | 83 | } |
84 | 84 | |
85 | 85 | /* |
86 | 86 | |
... | ... | @@ -89,12 +89,12 @@ |
89 | 89 | { |
90 | 90 | ktime_t period; |
91 | 91 | |
92 | - write_seqlock(&xtime_lock); | |
92 | + write_seqlock(&jiffies_lock); | |
93 | 93 | /* Did we start the jiffies update yet ? */ |
94 | 94 | if (last_jiffies_update.tv64 == 0) |
95 | 95 | last_jiffies_update = tick_next_period; |
96 | 96 | period = last_jiffies_update; |
97 | - write_sequnlock(&xtime_lock); | |
97 | + write_sequnlock(&jiffies_lock); | |
98 | 98 | return period; |
99 | 99 | } |
100 | 100 | |
101 | 101 | |
... | ... | @@ -282,11 +282,11 @@ |
282 | 282 | |
283 | 283 | /* Read jiffies and the time when jiffies were updated last */ |
284 | 284 | do { |
285 | - seq = read_seqbegin(&xtime_lock); | |
285 | + seq = read_seqbegin(&jiffies_lock); | |
286 | 286 | last_update = last_jiffies_update; |
287 | 287 | last_jiffies = jiffies; |
288 | 288 | time_delta = timekeeping_max_deferment(); |
289 | - } while (read_seqretry(&xtime_lock, seq)); | |
289 | + } while (read_seqretry(&jiffies_lock, seq)); | |
290 | 290 | |
291 | 291 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || |
292 | 292 | arch_needs_cpu(cpu)) { |
... | ... | @@ -658,7 +658,7 @@ |
658 | 658 | * concurrency: This happens only when the cpu in charge went |
659 | 659 | * into a long sleep. If two cpus happen to assign themself to |
660 | 660 | * this duty, then the jiffies update is still serialized by |
661 | - * xtime_lock. | |
661 | + * jiffies_lock. | |
662 | 662 | */ |
663 | 663 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
664 | 664 | tick_do_timer_cpu = cpu; |
... | ... | @@ -810,7 +810,7 @@ |
810 | 810 | * concurrency: This happens only when the cpu in charge went |
811 | 811 | * into a long sleep. If two cpus happen to assign themself to |
812 | 812 | * this duty, then the jiffies update is still serialized by |
813 | - * xtime_lock. | |
813 | + * jiffies_lock. | |
814 | 814 | */ |
815 | 815 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
816 | 816 | tick_do_timer_cpu = cpu; |
kernel/time/timekeeping.c
... | ... | @@ -25,12 +25,6 @@ |
25 | 25 | |
26 | 26 | static struct timekeeper timekeeper; |
27 | 27 | |
28 | -/* | |
29 | - * This read-write spinlock protects us from races in SMP while | |
30 | - * playing with xtime. | |
31 | - */ | |
32 | -__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | |
33 | - | |
34 | 28 | /* flag for if timekeeping is suspended */ |
35 | 29 | int __read_mostly timekeeping_suspended; |
36 | 30 | |
... | ... | @@ -1299,9 +1293,7 @@ |
1299 | 1293 | } |
1300 | 1294 | |
1301 | 1295 | /* |
1302 | - * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
1303 | - * without sampling the sequence number in xtime_lock. | |
1304 | - * jiffies is defined in the linker script... | |
1296 | + * Must hold jiffies_lock | |
1305 | 1297 | */ |
1306 | 1298 | void do_timer(unsigned long ticks) |
1307 | 1299 | { |
1308 | 1300 | |
... | ... | @@ -1389,8 +1381,8 @@ |
1389 | 1381 | */ |
1390 | 1382 | void xtime_update(unsigned long ticks) |
1391 | 1383 | { |
1392 | - write_seqlock(&xtime_lock); | |
1384 | + write_seqlock(&jiffies_lock); | |
1393 | 1385 | do_timer(ticks); |
1394 | - write_sequnlock(&xtime_lock); | |
1386 | + write_sequnlock(&jiffies_lock); | |
1395 | 1387 | } |