@@ -813,34 +813,26 @@ static const struct dma_map_ops iommu_dma_ops = {
813
813
.mapping_error = iommu_dma_mapping_error ,
814
814
};
815
815
816
- /*
817
- * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
818
- * everything it needs to - the device is only partially created and the
819
- * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
820
- * need this delayed attachment dance. Once IOMMU probe ordering is sorted
821
- * to move the arch_setup_dma_ops() call later, all the notifier bits below
822
- * become unnecessary, and will go away.
823
- */
824
- struct iommu_dma_notifier_data {
825
- struct list_head list ;
826
- struct device * dev ;
827
- const struct iommu_ops * ops ;
828
- u64 dma_base ;
829
- u64 size ;
830
- };
831
- static LIST_HEAD (iommu_dma_masters );
832
- static DEFINE_MUTEX (iommu_dma_notifier_lock );
816
+ static int __init __iommu_dma_init (void )
817
+ {
818
+ return iommu_dma_init ();
819
+ }
820
+ arch_initcall (__iommu_dma_init );
833
821
834
- static bool do_iommu_attach (struct device * dev , const struct iommu_ops * ops ,
835
- u64 dma_base , u64 size )
822
+ static void __iommu_setup_dma_ops (struct device * dev , u64 dma_base , u64 size ,
823
+ const struct iommu_ops * ops )
836
824
{
837
- struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
825
+ struct iommu_domain * domain ;
826
+
827
+ if (!ops )
828
+ return ;
838
829
839
830
/*
840
- * If the IOMMU driver has the DMA domain support that we require,
841
- * then the IOMMU core will have already configured a group for this
842
- * device, and allocated the default domain for that group.
831
+ * The IOMMU core code allocates the default DMA domain, which the
832
+ * underlying IOMMU driver needs to support via the dma-iommu layer.
843
833
*/
834
+ domain = iommu_get_domain_for_dev (dev );
835
+
844
836
if (!domain )
845
837
goto out_err ;
846
838
@@ -851,109 +843,11 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
851
843
dev -> dma_ops = & iommu_dma_ops ;
852
844
}
853
845
854
- return true;
846
+ return ;
847
+
855
848
out_err :
856
- pr_warn ("Failed to set up IOMMU for device %s; retaining platform DMA ops\n" ,
849
+ pr_warn ("Failed to set up IOMMU for device %s; retaining platform DMA ops\n" ,
857
850
dev_name (dev ));
858
- return false;
859
- }
860
-
861
- static void queue_iommu_attach (struct device * dev , const struct iommu_ops * ops ,
862
- u64 dma_base , u64 size )
863
- {
864
- struct iommu_dma_notifier_data * iommudata ;
865
-
866
- iommudata = kzalloc (sizeof (* iommudata ), GFP_KERNEL );
867
- if (!iommudata )
868
- return ;
869
-
870
- iommudata -> dev = dev ;
871
- iommudata -> ops = ops ;
872
- iommudata -> dma_base = dma_base ;
873
- iommudata -> size = size ;
874
-
875
- mutex_lock (& iommu_dma_notifier_lock );
876
- list_add (& iommudata -> list , & iommu_dma_masters );
877
- mutex_unlock (& iommu_dma_notifier_lock );
878
- }
879
-
880
- static int __iommu_attach_notifier (struct notifier_block * nb ,
881
- unsigned long action , void * data )
882
- {
883
- struct iommu_dma_notifier_data * master , * tmp ;
884
-
885
- if (action != BUS_NOTIFY_BIND_DRIVER )
886
- return 0 ;
887
-
888
- mutex_lock (& iommu_dma_notifier_lock );
889
- list_for_each_entry_safe (master , tmp , & iommu_dma_masters , list ) {
890
- if (data == master -> dev && do_iommu_attach (master -> dev ,
891
- master -> ops , master -> dma_base , master -> size )) {
892
- list_del (& master -> list );
893
- kfree (master );
894
- break ;
895
- }
896
- }
897
- mutex_unlock (& iommu_dma_notifier_lock );
898
- return 0 ;
899
- }
900
-
901
- static int __init register_iommu_dma_ops_notifier (struct bus_type * bus )
902
- {
903
- struct notifier_block * nb = kzalloc (sizeof (* nb ), GFP_KERNEL );
904
- int ret ;
905
-
906
- if (!nb )
907
- return - ENOMEM ;
908
-
909
- nb -> notifier_call = __iommu_attach_notifier ;
910
-
911
- ret = bus_register_notifier (bus , nb );
912
- if (ret ) {
913
- pr_warn ("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n" ,
914
- bus -> name );
915
- kfree (nb );
916
- }
917
- return ret ;
918
- }
919
-
920
- static int __init __iommu_dma_init (void )
921
- {
922
- int ret ;
923
-
924
- ret = iommu_dma_init ();
925
- if (!ret )
926
- ret = register_iommu_dma_ops_notifier (& platform_bus_type );
927
- if (!ret )
928
- ret = register_iommu_dma_ops_notifier (& amba_bustype );
929
- #ifdef CONFIG_PCI
930
- if (!ret )
931
- ret = register_iommu_dma_ops_notifier (& pci_bus_type );
932
- #endif
933
- return ret ;
934
- }
935
- arch_initcall (__iommu_dma_init );
936
-
937
- static void __iommu_setup_dma_ops (struct device * dev , u64 dma_base , u64 size ,
938
- const struct iommu_ops * ops )
939
- {
940
- struct iommu_group * group ;
941
-
942
- if (!ops )
943
- return ;
944
- /*
945
- * TODO: As a concession to the future, we're ready to handle being
946
- * called both early and late (i.e. after bus_add_device). Once all
947
- * the platform bus code is reworked to call us late and the notifier
948
- * junk above goes away, move the body of do_iommu_attach here.
949
- */
950
- group = iommu_group_get (dev );
951
- if (group ) {
952
- do_iommu_attach (dev , ops , dma_base , size );
953
- iommu_group_put (group );
954
- } else {
955
- queue_iommu_attach (dev , ops , dma_base , size );
956
- }
957
851
}
958
852
959
853
void arch_teardown_dma_ops (struct device * dev )
0 commit comments