@@ -537,40 +537,55 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
537
537
}
538
538
539
539
/**
540
- * clocksource_max_deferment - Returns max time the clocksource can be deferred
541
- * @cs: Pointer to clocksource
542
- *
540
+ * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
541
+ * @mult: cycle to nanosecond multiplier
542
+ * @shift: cycle to nanosecond divisor (power of two)
543
+ * @maxadj: maximum adjustment value to mult (~11%)
544
+ * @mask: bitmask for two's complement subtraction of non 64 bit counters
543
545
*/
544
- static u64 clocksource_max_deferment ( struct clocksource * cs )
546
+ u64 clocks_calc_max_nsecs ( u32 mult , u32 shift , u32 maxadj , u64 mask )
545
547
{
546
548
u64 max_nsecs , max_cycles ;
547
549
548
550
/*
549
551
* Calculate the maximum number of cycles that we can pass to the
550
552
* cyc2ns function without overflowing a 64-bit signed result. The
551
- * maximum number of cycles is equal to ULLONG_MAX/(cs-> mult+cs-> maxadj)
553
+ * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj)
552
554
* which is equivalent to the below.
553
- * max_cycles < (2^63)/(cs-> mult + cs-> maxadj)
554
- * max_cycles < 2^(log2((2^63)/(cs-> mult + cs-> maxadj)))
555
- * max_cycles < 2^(log2(2^63) - log2(cs-> mult + cs-> maxadj))
556
- * max_cycles < 2^(63 - log2(cs-> mult + cs-> maxadj))
557
- * max_cycles < 1 << (63 - log2(cs-> mult + cs-> maxadj))
555
+ * max_cycles < (2^63)/(mult + maxadj)
556
+ * max_cycles < 2^(log2((2^63)/(mult + maxadj)))
557
+ * max_cycles < 2^(log2(2^63) - log2(mult + maxadj))
558
+ * max_cycles < 2^(63 - log2(mult + maxadj))
559
+ * max_cycles < 1 << (63 - log2(mult + maxadj))
558
560
* Please note that we add 1 to the result of the log2 to account for
559
561
* any rounding errors, ensure the above inequality is satisfied and
560
562
* no overflow will occur.
561
563
*/
562
- max_cycles = 1ULL << (63 - (ilog2 (cs -> mult + cs -> maxadj ) + 1 ));
564
+ max_cycles = 1ULL << (63 - (ilog2 (mult + maxadj ) + 1 ));
563
565
564
566
/*
565
567
* The actual maximum number of cycles we can defer the clocksource is
566
- * determined by the minimum of max_cycles and cs-> mask.
568
+ * determined by the minimum of max_cycles and mask.
567
569
* Note: Here we subtract the maxadj to make sure we don't sleep for
568
570
* too long if there's a large negative adjustment.
569
571
*/
570
- max_cycles = min_t (u64 , max_cycles , (u64 ) cs -> mask );
571
- max_nsecs = clocksource_cyc2ns (max_cycles , cs -> mult - cs -> maxadj ,
572
- cs -> shift );
572
+ max_cycles = min (max_cycles , mask );
573
+ max_nsecs = clocksource_cyc2ns (max_cycles , mult - maxadj , shift );
574
+
575
+ return max_nsecs ;
576
+ }
577
+
578
+ /**
579
+ * clocksource_max_deferment - Returns max time the clocksource can be deferred
580
+ * @cs: Pointer to clocksource
581
+ *
582
+ */
583
+ static u64 clocksource_max_deferment (struct clocksource * cs )
584
+ {
585
+ u64 max_nsecs ;
573
586
587
+ max_nsecs = clocks_calc_max_nsecs (cs -> mult , cs -> shift , cs -> maxadj ,
588
+ cs -> mask );
574
589
/*
575
590
* To ensure that the clocksource does not wrap whilst we are idle,
576
591
* limit the time the clocksource can be deferred by 12.5%. Please
0 commit comments