@@ -231,10 +231,8 @@ struct global_params {
231
231
* @prev_cummulative_iowait: IO Wait time difference from last and
232
232
* current sample
233
233
* @sample: Storage for storing last Sample data
234
- * @min_perf: Minimum capacity limit as a fraction of the maximum
235
- * turbo P-state capacity.
236
- * @max_perf: Maximum capacity limit as a fraction of the maximum
237
- * turbo P-state capacity.
234
+ * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
235
+ * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
238
236
* @acpi_perf_data: Stores ACPI perf information read from _PSS
239
237
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
240
238
* @epp_powersave: Last saved HWP energy performance preference
@@ -266,8 +264,8 @@ struct cpudata {
266
264
u64 prev_tsc ;
267
265
u64 prev_cummulative_iowait ;
268
266
struct sample sample ;
269
- int32_t min_perf ;
270
- int32_t max_perf ;
267
+ int32_t min_perf_ratio ;
268
+ int32_t max_perf_ratio ;
271
269
#ifdef CONFIG_ACPI
272
270
struct acpi_processor_performance acpi_perf_data ;
273
271
bool valid_pss_table ;
@@ -794,25 +792,32 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
794
792
NULL ,
795
793
};
796
794
797
- static void intel_pstate_hwp_set (unsigned int cpu )
795
+ static void intel_pstate_get_hwp_max (unsigned int cpu , int * phy_max ,
796
+ int * current_max )
798
797
{
799
- struct cpudata * cpu_data = all_cpu_data [cpu ];
800
- int min , hw_min , max , hw_max ;
801
- u64 value , cap ;
802
- s16 epp ;
798
+ u64 cap ;
803
799
804
800
rdmsrl_on_cpu (cpu , MSR_HWP_CAPABILITIES , & cap );
805
- hw_min = HWP_LOWEST_PERF (cap );
806
801
if (global .no_turbo )
807
- hw_max = HWP_GUARANTEED_PERF (cap );
802
+ * current_max = HWP_GUARANTEED_PERF (cap );
808
803
else
809
- hw_max = HWP_HIGHEST_PERF (cap );
804
+ * current_max = HWP_HIGHEST_PERF (cap );
805
+
806
+ * phy_max = HWP_HIGHEST_PERF (cap );
807
+ }
808
+
809
+ static void intel_pstate_hwp_set (unsigned int cpu )
810
+ {
811
+ struct cpudata * cpu_data = all_cpu_data [cpu ];
812
+ int max , min ;
813
+ u64 value ;
814
+ s16 epp ;
815
+
816
+ max = cpu_data -> max_perf_ratio ;
817
+ min = cpu_data -> min_perf_ratio ;
810
818
811
- max = fp_ext_toint (hw_max * cpu_data -> max_perf );
812
819
if (cpu_data -> policy == CPUFREQ_POLICY_PERFORMANCE )
813
820
min = max ;
814
- else
815
- min = fp_ext_toint (hw_max * cpu_data -> min_perf );
816
821
817
822
rdmsrl_on_cpu (cpu , MSR_HWP_REQUEST , & value );
818
823
@@ -1528,8 +1533,7 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
1528
1533
1529
1534
update_turbo_state ();
1530
1535
pstate = intel_pstate_get_base_pstate (cpu );
1531
- pstate = max (cpu -> pstate .min_pstate ,
1532
- fp_ext_toint (pstate * cpu -> max_perf ));
1536
+ pstate = max (cpu -> pstate .min_pstate , cpu -> max_perf_ratio );
1533
1537
intel_pstate_set_pstate (cpu , pstate );
1534
1538
}
1535
1539
@@ -1695,9 +1699,8 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1695
1699
int max_pstate = intel_pstate_get_base_pstate (cpu );
1696
1700
int min_pstate ;
1697
1701
1698
- min_pstate = max (cpu -> pstate .min_pstate ,
1699
- fp_ext_toint (max_pstate * cpu -> min_perf ));
1700
- max_pstate = max (min_pstate , fp_ext_toint (max_pstate * cpu -> max_perf ));
1702
+ min_pstate = max (cpu -> pstate .min_pstate , cpu -> min_perf_ratio );
1703
+ max_pstate = max (min_pstate , cpu -> max_perf_ratio );
1701
1704
return clamp_t (int , pstate , min_pstate , max_pstate );
1702
1705
}
1703
1706
@@ -1967,52 +1970,61 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
1967
1970
{
1968
1971
int max_freq = intel_pstate_get_max_freq (cpu );
1969
1972
int32_t max_policy_perf , min_policy_perf ;
1973
+ int max_state , turbo_max ;
1970
1974
1971
- max_policy_perf = div_ext_fp (policy -> max , max_freq );
1972
- max_policy_perf = clamp_t (int32_t , max_policy_perf , 0 , int_ext_tofp (1 ));
1975
+ /*
1976
+ * HWP needs some special consideration, because on BDX the
1977
+ * HWP_REQUEST uses abstract value to represent performance
1978
+ * rather than pure ratios.
1979
+ */
1980
+ if (hwp_active ) {
1981
+ intel_pstate_get_hwp_max (cpu -> cpu , & turbo_max , & max_state );
1982
+ } else {
1983
+ max_state = intel_pstate_get_base_pstate (cpu );
1984
+ turbo_max = cpu -> pstate .turbo_pstate ;
1985
+ }
1986
+
1987
+ max_policy_perf = max_state * policy -> max / max_freq ;
1973
1988
if (policy -> max == policy -> min ) {
1974
1989
min_policy_perf = max_policy_perf ;
1975
1990
} else {
1976
- min_policy_perf = div_ext_fp ( policy -> min , max_freq ) ;
1991
+ min_policy_perf = max_state * policy -> min / max_freq ;
1977
1992
min_policy_perf = clamp_t (int32_t , min_policy_perf ,
1978
1993
0 , max_policy_perf );
1979
1994
}
1980
1995
1996
+ pr_debug ("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n" ,
1997
+ policy -> cpu , max_state ,
1998
+ min_policy_perf , max_policy_perf );
1999
+
1981
2000
/* Normalize user input to [min_perf, max_perf] */
1982
2001
if (per_cpu_limits ) {
1983
- cpu -> min_perf = min_policy_perf ;
1984
- cpu -> max_perf = max_policy_perf ;
2002
+ cpu -> min_perf_ratio = min_policy_perf ;
2003
+ cpu -> max_perf_ratio = max_policy_perf ;
1985
2004
} else {
1986
2005
int32_t global_min , global_max ;
1987
2006
1988
2007
/* Global limits are in percent of the maximum turbo P-state. */
1989
- global_max = percent_ext_fp (global .max_perf_pct );
1990
- global_min = percent_ext_fp (global .min_perf_pct );
1991
- if (max_freq != cpu -> pstate .turbo_freq ) {
1992
- int32_t turbo_factor ;
1993
-
1994
- turbo_factor = div_ext_fp (cpu -> pstate .turbo_pstate ,
1995
- cpu -> pstate .max_pstate );
1996
- global_min = mul_ext_fp (global_min , turbo_factor );
1997
- global_max = mul_ext_fp (global_max , turbo_factor );
1998
- }
2008
+ global_max = DIV_ROUND_UP (turbo_max * global .max_perf_pct , 100 );
2009
+ global_min = DIV_ROUND_UP (turbo_max * global .min_perf_pct , 100 );
1999
2010
global_min = clamp_t (int32_t , global_min , 0 , global_max );
2000
2011
2001
- cpu -> min_perf = max (min_policy_perf , global_min );
2002
- cpu -> min_perf = min (cpu -> min_perf , max_policy_perf );
2003
- cpu -> max_perf = min (max_policy_perf , global_max );
2004
- cpu -> max_perf = max (min_policy_perf , cpu -> max_perf );
2012
+ pr_debug ("cpu:%d global_min:%d global_max:%d\n" , policy -> cpu ,
2013
+ global_min , global_max );
2005
2014
2006
- /* Make sure min_perf <= max_perf */
2007
- cpu -> min_perf = min (cpu -> min_perf , cpu -> max_perf );
2008
- }
2015
+ cpu -> min_perf_ratio = max (min_policy_perf , global_min );
2016
+ cpu -> min_perf_ratio = min (cpu -> min_perf_ratio , max_policy_perf );
2017
+ cpu -> max_perf_ratio = min (max_policy_perf , global_max );
2018
+ cpu -> max_perf_ratio = max (min_policy_perf , cpu -> max_perf_ratio );
2009
2019
2010
- cpu -> max_perf = round_up (cpu -> max_perf , EXT_FRAC_BITS );
2011
- cpu -> min_perf = round_up (cpu -> min_perf , EXT_FRAC_BITS );
2020
+ /* Make sure min_perf <= max_perf */
2021
+ cpu -> min_perf_ratio = min (cpu -> min_perf_ratio ,
2022
+ cpu -> max_perf_ratio );
2012
2023
2013
- pr_debug ("cpu:%d max_perf_pct:%d min_perf_pct:%d\n" , policy -> cpu ,
2014
- fp_ext_toint (cpu -> max_perf * 100 ),
2015
- fp_ext_toint (cpu -> min_perf * 100 ));
2024
+ }
2025
+ pr_debug ("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n" , policy -> cpu ,
2026
+ cpu -> max_perf_ratio ,
2027
+ cpu -> min_perf_ratio );
2016
2028
}
2017
2029
2018
2030
static int intel_pstate_set_policy (struct cpufreq_policy * policy )
@@ -2115,8 +2127,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2115
2127
2116
2128
cpu = all_cpu_data [policy -> cpu ];
2117
2129
2118
- cpu -> max_perf = int_ext_tofp ( 1 ) ;
2119
- cpu -> min_perf = 0 ;
2130
+ cpu -> max_perf_ratio = 0xFF ;
2131
+ cpu -> min_perf_ratio = 0 ;
2120
2132
2121
2133
policy -> min = cpu -> pstate .min_pstate * cpu -> pstate .scaling ;
2122
2134
policy -> max = cpu -> pstate .turbo_pstate * cpu -> pstate .scaling ;
0 commit comments