37
37
#include <asm/cpufeature.h>
38
38
#include <asm/intel-family.h>
39
39
40
+ #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
41
+
40
42
#define ATOM_RATIOS 0x66a
41
43
#define ATOM_VIDS 0x66b
42
44
#define ATOM_TURBO_RATIOS 0x66c
@@ -122,6 +124,8 @@ struct sample {
122
124
* @scaling: Scaling factor to convert frequency to cpufreq
123
125
* frequency units
124
126
* @turbo_pstate: Max Turbo P state possible for this platform
127
+ * @max_freq: @max_pstate frequency in cpufreq units
128
+ * @turbo_freq: @turbo_pstate frequency in cpufreq units
125
129
*
126
130
* Stores the per cpu model P state limits and current P state.
127
131
*/
@@ -132,6 +136,8 @@ struct pstate_data {
132
136
int max_pstate_physical ;
133
137
int scaling ;
134
138
int turbo_pstate ;
139
+ unsigned int max_freq ;
140
+ unsigned int turbo_freq ;
135
141
};
136
142
137
143
/**
@@ -470,7 +476,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
470
476
{
471
477
}
472
478
473
- static void intel_pstate_exit_perf_limits (struct cpufreq_policy * policy )
479
+ static inline int intel_pstate_exit_perf_limits (struct cpufreq_policy * policy )
474
480
{
475
481
}
476
482
#endif
@@ -1225,6 +1231,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1225
1231
cpu -> pstate .max_pstate_physical = pstate_funcs .get_max_physical ();
1226
1232
cpu -> pstate .turbo_pstate = pstate_funcs .get_turbo ();
1227
1233
cpu -> pstate .scaling = pstate_funcs .get_scaling ();
1234
+ cpu -> pstate .max_freq = cpu -> pstate .max_pstate * cpu -> pstate .scaling ;
1235
+ cpu -> pstate .turbo_freq = cpu -> pstate .turbo_pstate * cpu -> pstate .scaling ;
1228
1236
1229
1237
if (pstate_funcs .get_vid )
1230
1238
pstate_funcs .get_vid (cpu );
@@ -1363,15 +1371,19 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1363
1371
return cpu -> pstate .current_pstate - pid_calc (& cpu -> pid , perf_scaled );
1364
1372
}
1365
1373
1366
- static inline void intel_pstate_update_pstate (struct cpudata * cpu , int pstate )
1374
+ static int intel_pstate_prepare_request (struct cpudata * cpu , int pstate )
1367
1375
{
1368
1376
int max_perf , min_perf ;
1369
1377
1370
- update_turbo_state ();
1371
-
1372
1378
intel_pstate_get_min_max (cpu , & min_perf , & max_perf );
1373
1379
pstate = clamp_t (int , pstate , min_perf , max_perf );
1374
1380
trace_cpu_frequency (pstate * cpu -> pstate .scaling , cpu -> cpu );
1381
+ return pstate ;
1382
+ }
1383
+
1384
+ static void intel_pstate_update_pstate (struct cpudata * cpu , int pstate )
1385
+ {
1386
+ pstate = intel_pstate_prepare_request (cpu , pstate );
1375
1387
if (pstate == cpu -> pstate .current_pstate )
1376
1388
return ;
1377
1389
@@ -1389,6 +1401,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1389
1401
target_pstate = cpu -> policy == CPUFREQ_POLICY_PERFORMANCE ?
1390
1402
cpu -> pstate .turbo_pstate : pstate_funcs .get_target_pstate (cpu );
1391
1403
1404
+ update_turbo_state ();
1405
+
1392
1406
intel_pstate_update_pstate (cpu , target_pstate );
1393
1407
1394
1408
sample = & cpu -> sample ;
@@ -1670,22 +1684,30 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1670
1684
return 0 ;
1671
1685
}
1672
1686
1687
+ static void intel_cpufreq_stop_cpu (struct cpufreq_policy * policy )
1688
+ {
1689
+ intel_pstate_set_min_pstate (all_cpu_data [policy -> cpu ]);
1690
+ }
1691
+
1673
1692
static void intel_pstate_stop_cpu (struct cpufreq_policy * policy )
1674
1693
{
1675
- int cpu_num = policy -> cpu ;
1676
- struct cpudata * cpu = all_cpu_data [cpu_num ];
1694
+ pr_debug ("CPU %d exiting\n" , policy -> cpu );
1677
1695
1678
- pr_debug ("CPU %d exiting\n" , cpu_num );
1696
+ intel_pstate_clear_update_util_hook (policy -> cpu );
1697
+ if (!hwp_active )
1698
+ intel_cpufreq_stop_cpu (policy );
1699
+ }
1679
1700
1680
- intel_pstate_clear_update_util_hook (cpu_num );
1701
+ static int intel_pstate_cpu_exit (struct cpufreq_policy * policy )
1702
+ {
1703
+ intel_pstate_exit_perf_limits (policy );
1681
1704
1682
- if (hwp_active )
1683
- return ;
1705
+ policy -> fast_switch_possible = false;
1684
1706
1685
- intel_pstate_set_min_pstate ( cpu ) ;
1707
+ return 0 ;
1686
1708
}
1687
1709
1688
- static int intel_pstate_cpu_init (struct cpufreq_policy * policy )
1710
+ static int __intel_pstate_cpu_init (struct cpufreq_policy * policy )
1689
1711
{
1690
1712
struct cpudata * cpu ;
1691
1713
int rc ;
@@ -1696,11 +1718,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1696
1718
1697
1719
cpu = all_cpu_data [policy -> cpu ];
1698
1720
1699
- if (limits -> min_perf_pct == 100 && limits -> max_perf_pct == 100 )
1700
- policy -> policy = CPUFREQ_POLICY_PERFORMANCE ;
1701
- else
1702
- policy -> policy = CPUFREQ_POLICY_POWERSAVE ;
1703
-
1704
1721
/*
1705
1722
* We need sane value in the cpu->perf_limits, so inherit from global
1706
1723
* perf_limits limits, which are seeded with values based on the
@@ -1720,20 +1737,30 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1720
1737
policy -> cpuinfo .max_freq *= cpu -> pstate .scaling ;
1721
1738
1722
1739
intel_pstate_init_acpi_perf_limits (policy );
1723
- policy -> cpuinfo .transition_latency = CPUFREQ_ETERNAL ;
1724
1740
cpumask_set_cpu (policy -> cpu , policy -> cpus );
1725
1741
1742
+ policy -> fast_switch_possible = true;
1743
+
1726
1744
return 0 ;
1727
1745
}
1728
1746
1729
- static int intel_pstate_cpu_exit (struct cpufreq_policy * policy )
1747
+ static int intel_pstate_cpu_init (struct cpufreq_policy * policy )
1730
1748
{
1731
- intel_pstate_exit_perf_limits (policy );
1749
+ int ret = __intel_pstate_cpu_init (policy );
1750
+
1751
+ if (ret )
1752
+ return ret ;
1753
+
1754
+ policy -> cpuinfo .transition_latency = CPUFREQ_ETERNAL ;
1755
+ if (limits -> min_perf_pct == 100 && limits -> max_perf_pct == 100 )
1756
+ policy -> policy = CPUFREQ_POLICY_PERFORMANCE ;
1757
+ else
1758
+ policy -> policy = CPUFREQ_POLICY_POWERSAVE ;
1732
1759
1733
1760
return 0 ;
1734
1761
}
1735
1762
1736
- static struct cpufreq_driver intel_pstate_driver = {
1763
+ static struct cpufreq_driver intel_pstate = {
1737
1764
.flags = CPUFREQ_CONST_LOOPS ,
1738
1765
.verify = intel_pstate_verify_policy ,
1739
1766
.setpolicy = intel_pstate_set_policy ,
@@ -1745,6 +1772,118 @@ static struct cpufreq_driver intel_pstate_driver = {
1745
1772
.name = "intel_pstate" ,
1746
1773
};
1747
1774
1775
+ static int intel_cpufreq_verify_policy (struct cpufreq_policy * policy )
1776
+ {
1777
+ struct cpudata * cpu = all_cpu_data [policy -> cpu ];
1778
+ struct perf_limits * perf_limits = limits ;
1779
+
1780
+ update_turbo_state ();
1781
+ policy -> cpuinfo .max_freq = limits -> turbo_disabled ?
1782
+ cpu -> pstate .max_freq : cpu -> pstate .turbo_freq ;
1783
+
1784
+ cpufreq_verify_within_cpu_limits (policy );
1785
+
1786
+ if (per_cpu_limits )
1787
+ perf_limits = cpu -> perf_limits ;
1788
+
1789
+ intel_pstate_update_perf_limits (policy , perf_limits );
1790
+
1791
+ return 0 ;
1792
+ }
1793
+
1794
+ static unsigned int intel_cpufreq_turbo_update (struct cpudata * cpu ,
1795
+ struct cpufreq_policy * policy ,
1796
+ unsigned int target_freq )
1797
+ {
1798
+ unsigned int max_freq ;
1799
+
1800
+ update_turbo_state ();
1801
+
1802
+ max_freq = limits -> no_turbo || limits -> turbo_disabled ?
1803
+ cpu -> pstate .max_freq : cpu -> pstate .turbo_freq ;
1804
+ policy -> cpuinfo .max_freq = max_freq ;
1805
+ if (policy -> max > max_freq )
1806
+ policy -> max = max_freq ;
1807
+
1808
+ if (target_freq > max_freq )
1809
+ target_freq = max_freq ;
1810
+
1811
+ return target_freq ;
1812
+ }
1813
+
1814
+ static int intel_cpufreq_target (struct cpufreq_policy * policy ,
1815
+ unsigned int target_freq ,
1816
+ unsigned int relation )
1817
+ {
1818
+ struct cpudata * cpu = all_cpu_data [policy -> cpu ];
1819
+ struct cpufreq_freqs freqs ;
1820
+ int target_pstate ;
1821
+
1822
+ freqs .old = policy -> cur ;
1823
+ freqs .new = intel_cpufreq_turbo_update (cpu , policy , target_freq );
1824
+
1825
+ cpufreq_freq_transition_begin (policy , & freqs );
1826
+ switch (relation ) {
1827
+ case CPUFREQ_RELATION_L :
1828
+ target_pstate = DIV_ROUND_UP (freqs .new , cpu -> pstate .scaling );
1829
+ break ;
1830
+ case CPUFREQ_RELATION_H :
1831
+ target_pstate = freqs .new / cpu -> pstate .scaling ;
1832
+ break ;
1833
+ default :
1834
+ target_pstate = DIV_ROUND_CLOSEST (freqs .new , cpu -> pstate .scaling );
1835
+ break ;
1836
+ }
1837
+ target_pstate = intel_pstate_prepare_request (cpu , target_pstate );
1838
+ if (target_pstate != cpu -> pstate .current_pstate ) {
1839
+ cpu -> pstate .current_pstate = target_pstate ;
1840
+ wrmsrl_on_cpu (policy -> cpu , MSR_IA32_PERF_CTL ,
1841
+ pstate_funcs .get_val (cpu , target_pstate ));
1842
+ }
1843
+ cpufreq_freq_transition_end (policy , & freqs , false);
1844
+
1845
+ return 0 ;
1846
+ }
1847
+
1848
+ static unsigned int intel_cpufreq_fast_switch (struct cpufreq_policy * policy ,
1849
+ unsigned int target_freq )
1850
+ {
1851
+ struct cpudata * cpu = all_cpu_data [policy -> cpu ];
1852
+ int target_pstate ;
1853
+
1854
+ target_freq = intel_cpufreq_turbo_update (cpu , policy , target_freq );
1855
+ target_pstate = DIV_ROUND_UP (target_freq , cpu -> pstate .scaling );
1856
+ intel_pstate_update_pstate (cpu , target_pstate );
1857
+ return target_freq ;
1858
+ }
1859
+
1860
+ static int intel_cpufreq_cpu_init (struct cpufreq_policy * policy )
1861
+ {
1862
+ int ret = __intel_pstate_cpu_init (policy );
1863
+
1864
+ if (ret )
1865
+ return ret ;
1866
+
1867
+ policy -> cpuinfo .transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY ;
1868
+ /* This reflects the intel_pstate_get_cpu_pstates() setting. */
1869
+ policy -> cur = policy -> cpuinfo .min_freq ;
1870
+
1871
+ return 0 ;
1872
+ }
1873
+
1874
+ static struct cpufreq_driver intel_cpufreq = {
1875
+ .flags = CPUFREQ_CONST_LOOPS ,
1876
+ .verify = intel_cpufreq_verify_policy ,
1877
+ .target = intel_cpufreq_target ,
1878
+ .fast_switch = intel_cpufreq_fast_switch ,
1879
+ .init = intel_cpufreq_cpu_init ,
1880
+ .exit = intel_pstate_cpu_exit ,
1881
+ .stop_cpu = intel_cpufreq_stop_cpu ,
1882
+ .name = "intel_cpufreq" ,
1883
+ };
1884
+
1885
+ static struct cpufreq_driver * intel_pstate_driver = & intel_pstate ;
1886
+
1748
1887
static int no_load __initdata ;
1749
1888
static int no_hwp __initdata ;
1750
1889
static int hwp_only __initdata ;
@@ -1976,7 +2115,7 @@ static int __init intel_pstate_init(void)
1976
2115
1977
2116
intel_pstate_request_control_from_smm ();
1978
2117
1979
- rc = cpufreq_register_driver (& intel_pstate_driver );
2118
+ rc = cpufreq_register_driver (intel_pstate_driver );
1980
2119
if (rc )
1981
2120
goto out ;
1982
2121
@@ -1991,7 +2130,9 @@ static int __init intel_pstate_init(void)
1991
2130
get_online_cpus ();
1992
2131
for_each_online_cpu (cpu ) {
1993
2132
if (all_cpu_data [cpu ]) {
1994
- intel_pstate_clear_update_util_hook (cpu );
2133
+ if (intel_pstate_driver == & intel_pstate )
2134
+ intel_pstate_clear_update_util_hook (cpu );
2135
+
1995
2136
kfree (all_cpu_data [cpu ]);
1996
2137
}
1997
2138
}
@@ -2007,8 +2148,13 @@ static int __init intel_pstate_setup(char *str)
2007
2148
if (!str )
2008
2149
return - EINVAL ;
2009
2150
2010
- if (!strcmp (str , "disable" ))
2151
+ if (!strcmp (str , "disable" )) {
2011
2152
no_load = 1 ;
2153
+ } else if (!strcmp (str , "passive" )) {
2154
+ pr_info ("Passive mode enabled\n" );
2155
+ intel_pstate_driver = & intel_cpufreq ;
2156
+ no_hwp = 1 ;
2157
+ }
2012
2158
if (!strcmp (str , "no_hwp" )) {
2013
2159
pr_info ("HWP disabled\n" );
2014
2160
no_hwp = 1 ;
0 commit comments