@@ -187,6 +187,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
187
187
struct device * cpu_dev ;
188
188
struct regulator * cpu_reg ;
189
189
struct clk * cpu_clk ;
190
+ unsigned long min_uV = ~0 , max_uV = 0 ;
190
191
unsigned int transition_latency ;
191
192
int ret ;
192
193
@@ -206,16 +207,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
206
207
/* OPPs might be populated at runtime, don't check for error here */
207
208
of_init_opp_table (cpu_dev );
208
209
209
- ret = dev_pm_opp_init_cpufreq_table (cpu_dev , & freq_table );
210
- if (ret ) {
211
- dev_err (cpu_dev , "failed to init cpufreq table: %d\n" , ret );
212
- goto out_put_node ;
213
- }
214
-
215
210
priv = kzalloc (sizeof (* priv ), GFP_KERNEL );
216
211
if (!priv ) {
217
212
ret = - ENOMEM ;
218
- goto out_free_table ;
213
+ goto out_put_node ;
219
214
}
220
215
221
216
of_property_read_u32 (np , "voltage-tolerance" , & priv -> voltage_tolerance );
@@ -224,30 +219,51 @@ static int cpufreq_init(struct cpufreq_policy *policy)
224
219
transition_latency = CPUFREQ_ETERNAL ;
225
220
226
221
if (!IS_ERR (cpu_reg )) {
227
- struct dev_pm_opp * opp ;
228
- unsigned long min_uV , max_uV ;
229
- int i ;
222
+ unsigned long opp_freq = 0 ;
230
223
231
224
/*
232
- * OPP is maintained in order of increasing frequency, and
233
- * freq_table initialised from OPP is therefore sorted in the
234
- * same order .
225
+ * Disable any OPPs where the connected regulator isn't able to
226
+ * provide the specified voltage and record minimum and maximum
227
+ * voltage levels .
235
228
*/
236
- for (i = 0 ; freq_table [i ].frequency != CPUFREQ_TABLE_END ; i ++ )
237
- ;
238
- rcu_read_lock ();
239
- opp = dev_pm_opp_find_freq_exact (cpu_dev ,
240
- freq_table [0 ].frequency * 1000 , true);
241
- min_uV = dev_pm_opp_get_voltage (opp );
242
- opp = dev_pm_opp_find_freq_exact (cpu_dev ,
243
- freq_table [i - 1 ].frequency * 1000 , true);
244
- max_uV = dev_pm_opp_get_voltage (opp );
245
- rcu_read_unlock ();
229
+ while (1 ) {
230
+ struct dev_pm_opp * opp ;
231
+ unsigned long opp_uV , tol_uV ;
232
+
233
+ rcu_read_lock ();
234
+ opp = dev_pm_opp_find_freq_ceil (cpu_dev , & opp_freq );
235
+ if (IS_ERR (opp )) {
236
+ rcu_read_unlock ();
237
+ break ;
238
+ }
239
+ opp_uV = dev_pm_opp_get_voltage (opp );
240
+ rcu_read_unlock ();
241
+
242
+ tol_uV = opp_uV * priv -> voltage_tolerance / 100 ;
243
+ if (regulator_is_supported_voltage (cpu_reg , opp_uV ,
244
+ opp_uV + tol_uV )) {
245
+ if (opp_uV < min_uV )
246
+ min_uV = opp_uV ;
247
+ if (opp_uV > max_uV )
248
+ max_uV = opp_uV ;
249
+ } else {
250
+ dev_pm_opp_disable (cpu_dev , opp_freq );
251
+ }
252
+
253
+ opp_freq ++ ;
254
+ }
255
+
246
256
ret = regulator_set_voltage_time (cpu_reg , min_uV , max_uV );
247
257
if (ret > 0 )
248
258
transition_latency += ret * 1000 ;
249
259
}
250
260
261
+ ret = dev_pm_opp_init_cpufreq_table (cpu_dev , & freq_table );
262
+ if (ret ) {
263
+ pr_err ("failed to init cpufreq table: %d\n" , ret );
264
+ goto out_free_priv ;
265
+ }
266
+
251
267
/*
252
268
* For now, just loading the cooling device;
253
269
* thermal DT code takes care of matching them.
@@ -286,9 +302,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
286
302
287
303
out_cooling_unregister :
288
304
cpufreq_cooling_unregister (priv -> cdev );
289
- kfree (priv );
290
- out_free_table :
291
305
dev_pm_opp_free_cpufreq_table (cpu_dev , & freq_table );
306
+ out_free_priv :
307
+ kfree (priv );
292
308
out_put_node :
293
309
of_node_put (np );
294
310
out_put_reg_clk :
0 commit comments