@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime)
263
263
cpustat [CPUTIME_IDLE ] += (__force u64 ) cputime ;
264
264
}
265
265
266
+ /*
267
+ * When a guest is interrupted for a longer amount of time, missed clock
268
+ * ticks are not redelivered later. Due to that, this function may on
269
+ * occasion account more time than the calling functions think elapsed.
270
+ */
266
271
static __always_inline cputime_t steal_account_process_time (cputime_t maxtime )
267
272
{
268
273
#ifdef CONFIG_PARAVIRT
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
371
376
* idle, or potentially user or system time. Due to rounding,
372
377
* other time can exceed ticks occasionally.
373
378
*/
374
- other = account_other_time (cputime );
379
+ other = account_other_time (ULONG_MAX );
375
380
if (other >= cputime )
376
381
return ;
377
382
cputime -= other ;
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
486
491
}
487
492
488
493
cputime = cputime_one_jiffy ;
489
- steal = steal_account_process_time (cputime );
494
+ steal = steal_account_process_time (ULONG_MAX );
490
495
491
496
if (steal >= cputime )
492
497
return ;
@@ -516,7 +521,7 @@ void account_idle_ticks(unsigned long ticks)
516
521
}
517
522
518
523
cputime = jiffies_to_cputime (ticks );
519
- steal = steal_account_process_time (cputime );
524
+ steal = steal_account_process_time (ULONG_MAX );
520
525
521
526
if (steal >= cputime )
522
527
return ;
@@ -614,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr,
614
619
stime = curr -> stime ;
615
620
utime = curr -> utime ;
616
621
617
- if (utime == 0 ) {
618
- stime = rtime ;
622
+ /*
623
+ * If either stime or both stime and utime are 0, assume all runtime is
624
+ * userspace. Once a task gets some ticks, the monotonicy code at
625
+ * 'update' will ensure things converge to the observed ratio.
626
+ */
627
+ if (stime == 0 ) {
628
+ utime = rtime ;
619
629
goto update ;
620
630
}
621
631
622
- if (stime == 0 ) {
623
- utime = rtime ;
632
+ if (utime == 0 ) {
633
+ stime = rtime ;
624
634
goto update ;
625
635
}
626
636
627
637
stime = scale_stime ((__force u64 )stime , (__force u64 )rtime ,
628
638
(__force u64 )(stime + utime ));
629
639
640
+ update :
630
641
/*
631
642
* Make sure stime doesn't go backwards; this preserves monotonicity
632
643
* for utime because rtime is monotonic.
@@ -649,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr,
649
660
stime = rtime - utime ;
650
661
}
651
662
652
- update :
653
663
prev -> stime = stime ;
654
664
prev -> utime = utime ;
655
665
out :
@@ -694,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
694
704
unsigned long now = READ_ONCE (jiffies );
695
705
cputime_t delta , other ;
696
706
707
+ /*
708
+ * Unlike tick based timing, vtime based timing never has lost
709
+ * ticks, and no need for steal time accounting to make up for
710
+ * lost ticks. Vtime accounts a rounded version of actual
711
+ * elapsed time. Limit account_other_time to prevent rounding
712
+ * errors from causing elapsed vtime to go negative.
713
+ */
697
714
delta = jiffies_to_cputime (now - tsk -> vtime_snap );
698
715
other = account_other_time (delta );
699
716
WARN_ON_ONCE (tsk -> vtime_snap_whence == VTIME_INACTIVE );
0 commit comments