Commit 0a45d4491d0f172e02126370f312405c5d473363

Authored by Paul Mackerras
1 parent 82dfdcae0d

powerpc: Fix problem with time going backwards

The recent changes to keep gettimeofday in sync with xtime had the side
effect that it was occasionally possible for the time reported by
gettimeofday to go back by a microsecond.  There were two reasons:
(1) when we recalculated the offsets used by gettimeofday every 2^31
timebase ticks, we lost an accumulated fractional microsecond, and
(2) because the update is done some time after the notional start of
jiffy, if ntp is slowing the clock, it is possible to see time go backwards
when the timebase factor gets reduced.

This fixes it by (a) slowing the gettimeofday clock by about 1us in
2^31 timebase ticks (a factor of less than 1 in 3.7 million), and (b)
adjusting the timebase offsets in the rare case that the gettimeofday
result could possibly go backwards (i.e. when ntp is slowing the clock
and the timer interrupt is late).  In this case the adjustment will
reduce to zero eventually because of (a).

Signed-off-by: Paul Mackerras <paulus@samba.org>

Showing 1 changed file with 34 additions and 14 deletions Side-by-side Diff

arch/powerpc/kernel/time.c
... ... @@ -283,9 +283,9 @@
283 283 * the two values of tb_update_count match and are even then the
284 284 * tb_to_xs and stamp_xsec values are consistent. If not, then it
285 285 * loops back and reads them again until this criteria is met.
  286 + * We expect the caller to have done the first increment of
  287 + * vdso_data->tb_update_count already.
286 288 */
287   - ++(vdso_data->tb_update_count);
288   - smp_wmb();
289 289 vdso_data->tb_orig_stamp = new_tb_stamp;
290 290 vdso_data->stamp_xsec = new_stamp_xsec;
291 291 vdso_data->tb_to_xs = new_tb_to_xs;
292 292  
... ... @@ -310,20 +310,15 @@
310 310 unsigned long offset;
311 311 u64 new_stamp_xsec;
312 312 u64 tlen, t2x;
  313 + u64 tb, xsec_old, xsec_new;
  314 + struct gettimeofday_vars *varp;
313 315  
314 316 if (__USE_RTC())
315 317 return;
316 318 tlen = current_tick_length();
317 319 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
318   - if (tlen == last_tick_len && offset < 0x80000000u) {
319   - /* check that we're still in sync; if not, resync */
320   - struct timeval tv;
321   - __do_gettimeofday(&tv, cur_tb);
322   - if (tv.tv_sec <= xtime.tv_sec &&
323   - (tv.tv_sec < xtime.tv_sec ||
324   - tv.tv_usec * 1000 <= xtime.tv_nsec))
325   - return;
326   - }
  320 + if (tlen == last_tick_len && offset < 0x80000000u)
  321 + return;
327 322 if (tlen != last_tick_len) {
328 323 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
329 324 last_tick_len = tlen;
... ... @@ -332,6 +327,21 @@
332 327 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
333 328 do_div(new_stamp_xsec, 1000000000);
334 329 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
  330 +
  331 + ++vdso_data->tb_update_count;
  332 + smp_mb();
  333 +
  334 + /*
  335 + * Make sure time doesn't go backwards for userspace gettimeofday.
  336 + */
  337 + tb = get_tb();
  338 + varp = do_gtod.varp;
  339 + xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
  340 + + varp->stamp_xsec;
  341 + xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
  342 + if (xsec_new < xsec_old)
  343 + new_stamp_xsec += xsec_old - xsec_new;
  344 +
335 345 update_gtod(cur_tb, new_stamp_xsec, t2x);
336 346 }
337 347  
... ... @@ -564,6 +574,10 @@
564 574 }
565 575 #endif
566 576  
  577 + /* Make userspace gettimeofday spin until we're done. */
  578 + ++vdso_data->tb_update_count;
  579 + smp_mb();
  580 +
567 581 /*
568 582 * Subtract off the number of nanoseconds since the
569 583 * beginning of the last tick.
570 584  
... ... @@ -724,10 +738,16 @@
724 738 * It is computed as:
725 739 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
726 740 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
727   - * so as to give the result as a 0.64 fixed-point fraction.
  741 + * which turns out to be N = 51 - SHIFT_HZ.
  742 + * This gives the result as a 0.64 fixed-point fraction.
  743 + * That value is reduced by an offset amounting to 1 xsec per
  744 + * 2^31 timebase ticks to avoid problems with time going backwards
  745 + * by 1 xsec when we do timer_recalc_offset due to losing the
  746 + * fractional xsec. That offset is equal to ppc_tb_freq/2^51
  747 + * since there are 2^20 xsec in a second.
728 748 */
729   - div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
730   - tb_ticks_per_jiffy, &res);
  749 + div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
  750 + tb_ticks_per_jiffy << SHIFT_HZ, &res);
731 751 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
732 752 ticklen_to_xs = res.result_low;
733 753