@@ -285,6 +285,44 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
285
285
return 0 ;
286
286
}
287
287
288
+ static int devfreq_set_target (struct devfreq * devfreq , unsigned long new_freq ,
289
+ u32 flags )
290
+ {
291
+ struct devfreq_freqs freqs ;
292
+ unsigned long cur_freq ;
293
+ int err = 0 ;
294
+
295
+ if (devfreq -> profile -> get_cur_freq )
296
+ devfreq -> profile -> get_cur_freq (devfreq -> dev .parent , & cur_freq );
297
+ else
298
+ cur_freq = devfreq -> previous_freq ;
299
+
300
+ freqs .old = cur_freq ;
301
+ freqs .new = new_freq ;
302
+ devfreq_notify_transition (devfreq , & freqs , DEVFREQ_PRECHANGE );
303
+
304
+ err = devfreq -> profile -> target (devfreq -> dev .parent , & new_freq , flags );
305
+ if (err ) {
306
+ freqs .new = cur_freq ;
307
+ devfreq_notify_transition (devfreq , & freqs , DEVFREQ_POSTCHANGE );
308
+ return err ;
309
+ }
310
+
311
+ freqs .new = new_freq ;
312
+ devfreq_notify_transition (devfreq , & freqs , DEVFREQ_POSTCHANGE );
313
+
314
+ if (devfreq_update_status (devfreq , new_freq ))
315
+ dev_err (& devfreq -> dev ,
316
+ "Couldn't update frequency transition information.\n" );
317
+
318
+ devfreq -> previous_freq = new_freq ;
319
+
320
+ if (devfreq -> suspend_freq )
321
+ devfreq -> resume_freq = cur_freq ;
322
+
323
+ return err ;
324
+ }
325
+
288
326
/* Load monitoring helper functions for governors use */
289
327
290
328
/**
@@ -296,8 +334,7 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
296
334
*/
297
335
int update_devfreq (struct devfreq * devfreq )
298
336
{
299
- struct devfreq_freqs freqs ;
300
- unsigned long freq , cur_freq , min_freq , max_freq ;
337
+ unsigned long freq , min_freq , max_freq ;
301
338
int err = 0 ;
302
339
u32 flags = 0 ;
303
340
@@ -333,31 +370,8 @@ int update_devfreq(struct devfreq *devfreq)
333
370
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND ; /* Use LUB */
334
371
}
335
372
336
- if (devfreq -> profile -> get_cur_freq )
337
- devfreq -> profile -> get_cur_freq (devfreq -> dev .parent , & cur_freq );
338
- else
339
- cur_freq = devfreq -> previous_freq ;
373
+ return devfreq_set_target (devfreq , freq , flags );
340
374
341
- freqs .old = cur_freq ;
342
- freqs .new = freq ;
343
- devfreq_notify_transition (devfreq , & freqs , DEVFREQ_PRECHANGE );
344
-
345
- err = devfreq -> profile -> target (devfreq -> dev .parent , & freq , flags );
346
- if (err ) {
347
- freqs .new = cur_freq ;
348
- devfreq_notify_transition (devfreq , & freqs , DEVFREQ_POSTCHANGE );
349
- return err ;
350
- }
351
-
352
- freqs .new = freq ;
353
- devfreq_notify_transition (devfreq , & freqs , DEVFREQ_POSTCHANGE );
354
-
355
- if (devfreq_update_status (devfreq , freq ))
356
- dev_err (& devfreq -> dev ,
357
- "Couldn't update frequency transition information.\n" );
358
-
359
- devfreq -> previous_freq = freq ;
360
- return err ;
361
375
}
362
376
EXPORT_SYMBOL (update_devfreq );
363
377
@@ -657,6 +671,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
657
671
}
658
672
devfreq -> max_freq = devfreq -> scaling_max_freq ;
659
673
674
+ devfreq -> suspend_freq = dev_pm_opp_get_suspend_opp_freq (dev );
675
+ atomic_set (& devfreq -> suspend_count , 0 );
676
+
660
677
dev_set_name (& devfreq -> dev , "devfreq%d" ,
661
678
atomic_inc_return (& devfreq_no ));
662
679
err = device_register (& devfreq -> dev );
@@ -857,14 +874,28 @@ EXPORT_SYMBOL(devm_devfreq_remove_device);
857
874
*/
858
875
int devfreq_suspend_device (struct devfreq * devfreq )
859
876
{
877
+ int ret ;
878
+
860
879
if (!devfreq )
861
880
return - EINVAL ;
862
881
863
- if (! devfreq -> governor )
882
+ if (atomic_inc_return ( & devfreq -> suspend_count ) > 1 )
864
883
return 0 ;
865
884
866
- return devfreq -> governor -> event_handler (devfreq ,
867
- DEVFREQ_GOV_SUSPEND , NULL );
885
+ if (devfreq -> governor ) {
886
+ ret = devfreq -> governor -> event_handler (devfreq ,
887
+ DEVFREQ_GOV_SUSPEND , NULL );
888
+ if (ret )
889
+ return ret ;
890
+ }
891
+
892
+ if (devfreq -> suspend_freq ) {
893
+ ret = devfreq_set_target (devfreq , devfreq -> suspend_freq , 0 );
894
+ if (ret )
895
+ return ret ;
896
+ }
897
+
898
+ return 0 ;
868
899
}
869
900
EXPORT_SYMBOL (devfreq_suspend_device );
870
901
@@ -878,17 +909,75 @@ EXPORT_SYMBOL(devfreq_suspend_device);
878
909
*/
879
910
int devfreq_resume_device (struct devfreq * devfreq )
880
911
{
912
+ int ret ;
913
+
881
914
if (!devfreq )
882
915
return - EINVAL ;
883
916
884
- if (! devfreq -> governor )
917
+ if (atomic_dec_return ( & devfreq -> suspend_count ) >= 1 )
885
918
return 0 ;
886
919
887
- return devfreq -> governor -> event_handler (devfreq ,
888
- DEVFREQ_GOV_RESUME , NULL );
920
+ if (devfreq -> resume_freq ) {
921
+ ret = devfreq_set_target (devfreq , devfreq -> resume_freq , 0 );
922
+ if (ret )
923
+ return ret ;
924
+ }
925
+
926
+ if (devfreq -> governor ) {
927
+ ret = devfreq -> governor -> event_handler (devfreq ,
928
+ DEVFREQ_GOV_RESUME , NULL );
929
+ if (ret )
930
+ return ret ;
931
+ }
932
+
933
+ return 0 ;
889
934
}
890
935
EXPORT_SYMBOL (devfreq_resume_device );
891
936
937
+ /**
938
+ * devfreq_suspend() - Suspend devfreq governors and devices
939
+ *
940
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
941
+ * and devices preserving the state for resume. On some platforms the devfreq
942
+ * device must have precise state (frequency) after resume in order to provide
943
+ * fully operating setup.
944
+ */
945
+ void devfreq_suspend (void )
946
+ {
947
+ struct devfreq * devfreq ;
948
+ int ret ;
949
+
950
+ mutex_lock (& devfreq_list_lock );
951
+ list_for_each_entry (devfreq , & devfreq_list , node ) {
952
+ ret = devfreq_suspend_device (devfreq );
953
+ if (ret )
954
+ dev_err (& devfreq -> dev ,
955
+ "failed to suspend devfreq device\n" );
956
+ }
957
+ mutex_unlock (& devfreq_list_lock );
958
+ }
959
+
960
+ /**
961
+ * devfreq_resume() - Resume devfreq governors and devices
962
+ *
963
+ * Called during system wide Suspend/Hibernate cycle for resuming governors and
964
+ * devices that are suspended with devfreq_suspend().
965
+ */
966
+ void devfreq_resume (void )
967
+ {
968
+ struct devfreq * devfreq ;
969
+ int ret ;
970
+
971
+ mutex_lock (& devfreq_list_lock );
972
+ list_for_each_entry (devfreq , & devfreq_list , node ) {
973
+ ret = devfreq_resume_device (devfreq );
974
+ if (ret )
975
+ dev_warn (& devfreq -> dev ,
976
+ "failed to resume devfreq device\n" );
977
+ }
978
+ mutex_unlock (& devfreq_list_lock );
979
+ }
980
+
892
981
/**
893
982
* devfreq_add_governor() - Add devfreq governor
894
983
* @governor: the devfreq governor to be added
0 commit comments