22
22
#include <linux/of_iommu.h>
23
23
#include <linux/of_platform.h>
24
24
#include <linux/platform_device.h>
25
+ #include <linux/pm_runtime.h>
25
26
#include <linux/slab.h>
26
27
#include <linux/spinlock.h>
27
28
@@ -106,6 +107,7 @@ struct rk_iommu {
106
107
};
107
108
108
109
struct rk_iommudata {
110
+ struct device_link * link ; /* runtime PM link from IOMMU to master */
109
111
struct rk_iommu * iommu ;
110
112
};
111
113
@@ -520,7 +522,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
520
522
irqreturn_t ret = IRQ_NONE ;
521
523
int i ;
522
524
523
- WARN_ON (clk_bulk_enable (iommu -> num_clocks , iommu -> clocks ));
525
+ if (WARN_ON (!pm_runtime_get_if_in_use (iommu -> dev )))
526
+ return 0 ;
527
+
528
+ if (WARN_ON (clk_bulk_enable (iommu -> num_clocks , iommu -> clocks )))
529
+ goto out ;
524
530
525
531
for (i = 0 ; i < iommu -> num_mmu ; i ++ ) {
526
532
int_status = rk_iommu_read (iommu -> bases [i ], RK_MMU_INT_STATUS );
@@ -570,6 +576,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
570
576
571
577
clk_bulk_disable (iommu -> num_clocks , iommu -> clocks );
572
578
579
+ out :
580
+ pm_runtime_put (iommu -> dev );
573
581
return ret ;
574
582
}
575
583
@@ -611,10 +619,17 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
611
619
spin_lock_irqsave (& rk_domain -> iommus_lock , flags );
612
620
list_for_each (pos , & rk_domain -> iommus ) {
613
621
struct rk_iommu * iommu ;
622
+
614
623
iommu = list_entry (pos , struct rk_iommu , node );
615
- WARN_ON (clk_bulk_enable (iommu -> num_clocks , iommu -> clocks ));
616
- rk_iommu_zap_lines (iommu , iova , size );
617
- clk_bulk_disable (iommu -> num_clocks , iommu -> clocks );
624
+
625
+ /* Only zap TLBs of IOMMUs that are powered on. */
626
+ if (pm_runtime_get_if_in_use (iommu -> dev )) {
627
+ WARN_ON (clk_bulk_enable (iommu -> num_clocks ,
628
+ iommu -> clocks ));
629
+ rk_iommu_zap_lines (iommu , iova , size );
630
+ clk_bulk_disable (iommu -> num_clocks , iommu -> clocks );
631
+ pm_runtime_put (iommu -> dev );
632
+ }
618
633
}
619
634
spin_unlock_irqrestore (& rk_domain -> iommus_lock , flags );
620
635
}
@@ -817,22 +832,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
817
832
return data ? data -> iommu : NULL ;
818
833
}
819
834
820
- static int rk_iommu_attach_device ( struct iommu_domain * domain ,
821
- struct device * dev )
835
+ /* Must be called with iommu powered on and attached */
836
+ static void rk_iommu_disable ( struct rk_iommu * iommu )
822
837
{
823
- struct rk_iommu * iommu ;
838
+ int i ;
839
+
840
+ /* Ignore error while disabling, just keep going */
841
+ WARN_ON (clk_bulk_enable (iommu -> num_clocks , iommu -> clocks ));
842
+ rk_iommu_enable_stall (iommu );
843
+ rk_iommu_disable_paging (iommu );
844
+ for (i = 0 ; i < iommu -> num_mmu ; i ++ ) {
845
+ rk_iommu_write (iommu -> bases [i ], RK_MMU_INT_MASK , 0 );
846
+ rk_iommu_write (iommu -> bases [i ], RK_MMU_DTE_ADDR , 0 );
847
+ }
848
+ rk_iommu_disable_stall (iommu );
849
+ clk_bulk_disable (iommu -> num_clocks , iommu -> clocks );
850
+ }
851
+
852
+ /* Must be called with iommu powered on and attached */
853
+ static int rk_iommu_enable (struct rk_iommu * iommu )
854
+ {
855
+ struct iommu_domain * domain = iommu -> domain ;
824
856
struct rk_iommu_domain * rk_domain = to_rk_domain (domain );
825
- unsigned long flags ;
826
857
int ret , i ;
827
858
828
- /*
829
- * Allow 'virtual devices' (e.g., drm) to attach to domain.
830
- * Such a device does not belong to an iommu group.
831
- */
832
- iommu = rk_iommu_from_dev (dev );
833
- if (!iommu )
834
- return 0 ;
835
-
836
859
ret = clk_bulk_enable (iommu -> num_clocks , iommu -> clocks );
837
860
if (ret )
838
861
return ret ;
@@ -845,8 +868,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
845
868
if (ret )
846
869
goto out_disable_stall ;
847
870
848
- iommu -> domain = domain ;
849
-
850
871
for (i = 0 ; i < iommu -> num_mmu ; i ++ ) {
851
872
rk_iommu_write (iommu -> bases [i ], RK_MMU_DTE_ADDR ,
852
873
rk_domain -> dt_dma );
@@ -855,14 +876,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
855
876
}
856
877
857
878
ret = rk_iommu_enable_paging (iommu );
858
- if (ret )
859
- goto out_disable_stall ;
860
-
861
- spin_lock_irqsave (& rk_domain -> iommus_lock , flags );
862
- list_add_tail (& iommu -> node , & rk_domain -> iommus );
863
- spin_unlock_irqrestore (& rk_domain -> iommus_lock , flags );
864
-
865
- dev_dbg (dev , "Attached to iommu domain\n" );
866
879
867
880
out_disable_stall :
868
881
rk_iommu_disable_stall (iommu );
@@ -877,31 +890,71 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
877
890
struct rk_iommu * iommu ;
878
891
struct rk_iommu_domain * rk_domain = to_rk_domain (domain );
879
892
unsigned long flags ;
880
- int i ;
881
893
882
894
/* Allow 'virtual devices' (eg drm) to detach from domain */
883
895
iommu = rk_iommu_from_dev (dev );
884
896
if (!iommu )
885
897
return ;
886
898
899
+ dev_dbg (dev , "Detaching from iommu domain\n" );
900
+
901
+ /* iommu already detached */
902
+ if (iommu -> domain != domain )
903
+ return ;
904
+
905
+ iommu -> domain = NULL ;
906
+
887
907
spin_lock_irqsave (& rk_domain -> iommus_lock , flags );
888
908
list_del_init (& iommu -> node );
889
909
spin_unlock_irqrestore (& rk_domain -> iommus_lock , flags );
890
910
891
- /* Ignore error while disabling, just keep going */
892
- WARN_ON (clk_bulk_enable (iommu -> num_clocks , iommu -> clocks ));
893
- rk_iommu_enable_stall (iommu );
894
- rk_iommu_disable_paging (iommu );
895
- for (i = 0 ; i < iommu -> num_mmu ; i ++ ) {
896
- rk_iommu_write (iommu -> bases [i ], RK_MMU_INT_MASK , 0 );
897
- rk_iommu_write (iommu -> bases [i ], RK_MMU_DTE_ADDR , 0 );
911
+ if (pm_runtime_get_if_in_use (iommu -> dev )) {
912
+ rk_iommu_disable (iommu );
913
+ pm_runtime_put (iommu -> dev );
898
914
}
899
- rk_iommu_disable_stall (iommu );
900
- clk_bulk_disable (iommu -> num_clocks , iommu -> clocks );
915
+ }
901
916
902
- iommu -> domain = NULL ;
917
+ static int rk_iommu_attach_device (struct iommu_domain * domain ,
918
+ struct device * dev )
919
+ {
920
+ struct rk_iommu * iommu ;
921
+ struct rk_iommu_domain * rk_domain = to_rk_domain (domain );
922
+ unsigned long flags ;
923
+ int ret ;
903
924
904
- dev_dbg (dev , "Detached from iommu domain\n" );
925
+ /*
926
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
927
+ * Such a device does not belong to an iommu group.
928
+ */
929
+ iommu = rk_iommu_from_dev (dev );
930
+ if (!iommu )
931
+ return 0 ;
932
+
933
+ dev_dbg (dev , "Attaching to iommu domain\n" );
934
+
935
+ /* iommu already attached */
936
+ if (iommu -> domain == domain )
937
+ return 0 ;
938
+
939
+ if (iommu -> domain )
940
+ rk_iommu_detach_device (iommu -> domain , dev );
941
+
942
+ iommu -> domain = domain ;
943
+
944
+ spin_lock_irqsave (& rk_domain -> iommus_lock , flags );
945
+ list_add_tail (& iommu -> node , & rk_domain -> iommus );
946
+ spin_unlock_irqrestore (& rk_domain -> iommus_lock , flags );
947
+
948
+ if (!pm_runtime_get_if_in_use (iommu -> dev ))
949
+ return 0 ;
950
+
951
+ ret = rk_iommu_enable (iommu );
952
+ if (ret )
953
+ rk_iommu_detach_device (iommu -> domain , dev );
954
+
955
+ pm_runtime_put (iommu -> dev );
956
+
957
+ return ret ;
905
958
}
906
959
907
960
static struct iommu_domain * rk_iommu_domain_alloc (unsigned type )
@@ -989,27 +1042,33 @@ static int rk_iommu_add_device(struct device *dev)
989
1042
{
990
1043
struct iommu_group * group ;
991
1044
struct rk_iommu * iommu ;
1045
+ struct rk_iommudata * data ;
992
1046
993
- iommu = rk_iommu_from_dev ( dev ) ;
994
- if (!iommu )
1047
+ data = dev -> archdata . iommu ;
1048
+ if (!data )
995
1049
return - ENODEV ;
996
1050
1051
+ iommu = rk_iommu_from_dev (dev );
1052
+
997
1053
group = iommu_group_get_for_dev (dev );
998
1054
if (IS_ERR (group ))
999
1055
return PTR_ERR (group );
1000
1056
iommu_group_put (group );
1001
1057
1002
1058
iommu_device_link (& iommu -> iommu , dev );
1059
+ data -> link = device_link_add (dev , iommu -> dev , DL_FLAG_PM_RUNTIME );
1003
1060
1004
1061
return 0 ;
1005
1062
}
1006
1063
1007
1064
static void rk_iommu_remove_device (struct device * dev )
1008
1065
{
1009
1066
struct rk_iommu * iommu ;
1067
+ struct rk_iommudata * data = dev -> archdata .iommu ;
1010
1068
1011
1069
iommu = rk_iommu_from_dev (dev );
1012
1070
1071
+ device_link_del (data -> link );
1013
1072
iommu_device_unlink (& iommu -> iommu , dev );
1014
1073
iommu_group_remove_device (dev );
1015
1074
}
@@ -1135,6 +1194,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
1135
1194
1136
1195
bus_set_iommu (& platform_bus_type , & rk_iommu_ops );
1137
1196
1197
+ pm_runtime_enable (dev );
1198
+
1138
1199
return 0 ;
1139
1200
err_remove_sysfs :
1140
1201
iommu_device_sysfs_remove (& iommu -> iommu );
@@ -1145,21 +1206,36 @@ static int rk_iommu_probe(struct platform_device *pdev)
1145
1206
1146
1207
static void rk_iommu_shutdown (struct platform_device * pdev )
1147
1208
{
1148
- struct rk_iommu * iommu = platform_get_drvdata (pdev );
1209
+ pm_runtime_force_suspend (& pdev -> dev );
1210
+ }
1149
1211
1150
- /*
1151
- * Be careful not to try to shutdown an otherwise unused
1152
- * IOMMU, as it is likely not to be clocked, and accessing it
1153
- * would just block. An IOMMU without a domain is likely to be
1154
- * unused, so let's use this as a (weak) guard.
1155
- */
1156
- if (iommu && iommu -> domain ) {
1157
- rk_iommu_enable_stall (iommu );
1158
- rk_iommu_disable_paging (iommu );
1159
- rk_iommu_force_reset (iommu );
1160
- }
1212
+ static int __maybe_unused rk_iommu_suspend (struct device * dev )
1213
+ {
1214
+ struct rk_iommu * iommu = dev_get_drvdata (dev );
1215
+
1216
+ if (!iommu -> domain )
1217
+ return 0 ;
1218
+
1219
+ rk_iommu_disable (iommu );
1220
+ return 0 ;
1221
+ }
1222
+
1223
+ static int __maybe_unused rk_iommu_resume (struct device * dev )
1224
+ {
1225
+ struct rk_iommu * iommu = dev_get_drvdata (dev );
1226
+
1227
+ if (!iommu -> domain )
1228
+ return 0 ;
1229
+
1230
+ return rk_iommu_enable (iommu );
1161
1231
}
1162
1232
1233
+ static const struct dev_pm_ops rk_iommu_pm_ops = {
1234
+ SET_RUNTIME_PM_OPS (rk_iommu_suspend , rk_iommu_resume , NULL )
1235
+ SET_SYSTEM_SLEEP_PM_OPS (pm_runtime_force_suspend ,
1236
+ pm_runtime_force_resume )
1237
+ };
1238
+
1163
1239
static const struct of_device_id rk_iommu_dt_ids [] = {
1164
1240
{ .compatible = "rockchip,iommu" },
1165
1241
{ /* sentinel */ }
@@ -1172,6 +1248,7 @@ static struct platform_driver rk_iommu_driver = {
1172
1248
.driver = {
1173
1249
.name = "rk_iommu" ,
1174
1250
.of_match_table = rk_iommu_dt_ids ,
1251
+ .pm = & rk_iommu_pm_ops ,
1175
1252
.suppress_bind_attrs = true,
1176
1253
},
1177
1254
};
0 commit comments