@@ -77,7 +77,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
77
77
tk -> wall_to_monotonic = wtm ;
78
78
set_normalized_timespec (& tmp , - wtm .tv_sec , - wtm .tv_nsec );
79
79
tk -> offs_real = timespec_to_ktime (tmp );
80
- tk -> offs_tai = ktime_sub (tk -> offs_real , ktime_set (tk -> tai_offset , 0 ));
80
+ tk -> offs_tai = ktime_add (tk -> offs_real , ktime_set (tk -> tai_offset , 0 ));
81
81
}
82
82
83
83
static void tk_set_sleep_time (struct timekeeper * tk , struct timespec t )
@@ -90,8 +90,9 @@ static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
90
90
}
91
91
92
92
/**
93
- * timekeeper_setup_internals - Set up internals to use clocksource clock.
93
+ * tk_setup_internals - Set up internals to use clocksource clock.
94
94
*
95
+ * @tk: The target timekeeper to setup.
95
96
* @clock: Pointer to clocksource.
96
97
*
97
98
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
@@ -595,7 +596,7 @@ s32 timekeeping_get_tai_offset(void)
595
596
static void __timekeeping_set_tai_offset (struct timekeeper * tk , s32 tai_offset )
596
597
{
597
598
tk -> tai_offset = tai_offset ;
598
- tk -> offs_tai = ktime_sub (tk -> offs_real , ktime_set (tai_offset , 0 ));
599
+ tk -> offs_tai = ktime_add (tk -> offs_real , ktime_set (tai_offset , 0 ));
599
600
}
600
601
601
602
/**
@@ -610,6 +611,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
610
611
raw_spin_lock_irqsave (& timekeeper_lock , flags );
611
612
write_seqcount_begin (& timekeeper_seq );
612
613
__timekeeping_set_tai_offset (tk , tai_offset );
614
+ timekeeping_update (tk , TK_MIRROR | TK_CLOCK_WAS_SET );
613
615
write_seqcount_end (& timekeeper_seq );
614
616
raw_spin_unlock_irqrestore (& timekeeper_lock , flags );
615
617
clock_was_set ();
@@ -1023,6 +1025,8 @@ static int timekeeping_suspend(void)
1023
1025
timekeeping_suspend_time =
1024
1026
timespec_add (timekeeping_suspend_time , delta_delta );
1025
1027
}
1028
+
1029
+ timekeeping_update (tk , TK_MIRROR );
1026
1030
write_seqcount_end (& timekeeper_seq );
1027
1031
raw_spin_unlock_irqrestore (& timekeeper_lock , flags );
1028
1032
@@ -1130,16 +1134,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1130
1134
* we can adjust by 1.
1131
1135
*/
1132
1136
error >>= 2 ;
1133
- /*
1134
- * XXX - In update_wall_time, we round up to the next
1135
- * nanosecond, and store the amount rounded up into
1136
- * the error. This causes the likely below to be unlikely.
1137
- *
1138
- * The proper fix is to avoid rounding up by using
1139
- * the high precision tk->xtime_nsec instead of
1140
- * xtime.tv_nsec everywhere. Fixing this will take some
1141
- * time.
1142
- */
1143
1137
if (likely (error <= interval ))
1144
1138
adj = 1 ;
1145
1139
else
@@ -1255,7 +1249,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1255
1249
static inline unsigned int accumulate_nsecs_to_secs (struct timekeeper * tk )
1256
1250
{
1257
1251
u64 nsecps = (u64 )NSEC_PER_SEC << tk -> shift ;
1258
- unsigned int action = 0 ;
1252
+ unsigned int clock_set = 0 ;
1259
1253
1260
1254
while (tk -> xtime_nsec >= nsecps ) {
1261
1255
int leap ;
@@ -1277,11 +1271,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1277
1271
1278
1272
__timekeeping_set_tai_offset (tk , tk -> tai_offset - leap );
1279
1273
1280
- clock_was_set_delayed ();
1281
- action = TK_CLOCK_WAS_SET ;
1274
+ clock_set = TK_CLOCK_WAS_SET ;
1282
1275
}
1283
1276
}
1284
- return action ;
1277
+ return clock_set ;
1285
1278
}
1286
1279
1287
1280
/**
@@ -1294,7 +1287,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1294
1287
* Returns the unconsumed cycles.
1295
1288
*/
1296
1289
static cycle_t logarithmic_accumulation (struct timekeeper * tk , cycle_t offset ,
1297
- u32 shift )
1290
+ u32 shift ,
1291
+ unsigned int * clock_set )
1298
1292
{
1299
1293
cycle_t interval = tk -> cycle_interval << shift ;
1300
1294
u64 raw_nsecs ;
@@ -1308,7 +1302,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1308
1302
tk -> cycle_last += interval ;
1309
1303
1310
1304
tk -> xtime_nsec += tk -> xtime_interval << shift ;
1311
- accumulate_nsecs_to_secs (tk );
1305
+ * clock_set |= accumulate_nsecs_to_secs (tk );
1312
1306
1313
1307
/* Accumulate raw time */
1314
1308
raw_nsecs = (u64 )tk -> raw_interval << shift ;
@@ -1359,14 +1353,14 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
1359
1353
* update_wall_time - Uses the current clocksource to increment the wall time
1360
1354
*
1361
1355
*/
1362
- static void update_wall_time (void )
1356
+ void update_wall_time (void )
1363
1357
{
1364
1358
struct clocksource * clock ;
1365
1359
struct timekeeper * real_tk = & timekeeper ;
1366
1360
struct timekeeper * tk = & shadow_timekeeper ;
1367
1361
cycle_t offset ;
1368
1362
int shift = 0 , maxshift ;
1369
- unsigned int action ;
1363
+ unsigned int clock_set = 0 ;
1370
1364
unsigned long flags ;
1371
1365
1372
1366
raw_spin_lock_irqsave (& timekeeper_lock , flags );
@@ -1401,7 +1395,8 @@ static void update_wall_time(void)
1401
1395
maxshift = (64 - (ilog2 (ntp_tick_length ())+ 1 )) - 1 ;
1402
1396
shift = min (shift , maxshift );
1403
1397
while (offset >= tk -> cycle_interval ) {
1404
- offset = logarithmic_accumulation (tk , offset , shift );
1398
+ offset = logarithmic_accumulation (tk , offset , shift ,
1399
+ & clock_set );
1405
1400
if (offset < tk -> cycle_interval <<shift )
1406
1401
shift -- ;
1407
1402
}
@@ -1419,7 +1414,7 @@ static void update_wall_time(void)
1419
1414
* Finally, make sure that after the rounding
1420
1415
* xtime_nsec isn't larger than NSEC_PER_SEC
1421
1416
*/
1422
- action = accumulate_nsecs_to_secs (tk );
1417
+ clock_set | = accumulate_nsecs_to_secs (tk );
1423
1418
1424
1419
write_seqcount_begin (& timekeeper_seq );
1425
1420
/* Update clock->cycle_last with the new value */
@@ -1435,10 +1430,12 @@ static void update_wall_time(void)
1435
1430
* updating.
1436
1431
*/
1437
1432
memcpy (real_tk , tk , sizeof (* tk ));
1438
- timekeeping_update (real_tk , action );
1433
+ timekeeping_update (real_tk , clock_set );
1439
1434
write_seqcount_end (& timekeeper_seq );
1440
1435
out :
1441
1436
raw_spin_unlock_irqrestore (& timekeeper_lock , flags );
1437
+ if (clock_set )
1438
+ clock_was_set ();
1442
1439
}
1443
1440
1444
1441
/**
@@ -1583,7 +1580,6 @@ struct timespec get_monotonic_coarse(void)
1583
1580
void do_timer (unsigned long ticks )
1584
1581
{
1585
1582
jiffies_64 += ticks ;
1586
- update_wall_time ();
1587
1583
calc_global_load (ticks );
1588
1584
}
1589
1585
@@ -1698,12 +1694,14 @@ int do_adjtimex(struct timex *txc)
1698
1694
1699
1695
if (tai != orig_tai ) {
1700
1696
__timekeeping_set_tai_offset (tk , tai );
1701
- update_pvclock_gtod (tk , true);
1702
- clock_was_set_delayed ();
1697
+ timekeeping_update (tk , TK_MIRROR | TK_CLOCK_WAS_SET );
1703
1698
}
1704
1699
write_seqcount_end (& timekeeper_seq );
1705
1700
raw_spin_unlock_irqrestore (& timekeeper_lock , flags );
1706
1701
1702
+ if (tai != orig_tai )
1703
+ clock_was_set ();
1704
+
1707
1705
ntp_notify_cmos_timer ();
1708
1706
1709
1707
return ret ;
@@ -1739,4 +1737,5 @@ void xtime_update(unsigned long ticks)
1739
1737
write_seqlock (& jiffies_lock );
1740
1738
do_timer (ticks );
1741
1739
write_sequnlock (& jiffies_lock );
1740
+ update_wall_time ();
1742
1741
}
0 commit comments