@@ -24,6 +24,22 @@ struct devx_obj {
24
24
u32 dinbox [MLX5_MAX_DESTROY_INBOX_SIZE_DW ];
25
25
};
26
26
27
+ struct devx_umem {
28
+ struct mlx5_core_dev * mdev ;
29
+ struct ib_umem * umem ;
30
+ u32 page_offset ;
31
+ int page_shift ;
32
+ int ncont ;
33
+ u32 dinlen ;
34
+ u32 dinbox [MLX5_ST_SZ_DW (general_obj_in_cmd_hdr )];
35
+ };
36
+
37
+ struct devx_umem_reg_cmd {
38
+ void * in ;
39
+ u32 inlen ;
40
+ u32 out [MLX5_ST_SZ_DW (general_obj_out_cmd_hdr )];
41
+ };
42
+
27
43
static struct mlx5_ib_ucontext * devx_ufile2uctx (struct ib_uverbs_file * file )
28
44
{
29
45
return to_mucontext (ib_uverbs_get_ucontext (file ));
@@ -788,6 +804,181 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(struct ib_device *ib_de
788
804
return err ;
789
805
}
790
806
807
+ static int devx_umem_get (struct mlx5_ib_dev * dev , struct ib_ucontext * ucontext ,
808
+ struct uverbs_attr_bundle * attrs ,
809
+ struct devx_umem * obj )
810
+ {
811
+ u64 addr ;
812
+ size_t size ;
813
+ int access ;
814
+ int npages ;
815
+ int err ;
816
+ u32 page_mask ;
817
+
818
+ if (uverbs_copy_from (& addr , attrs , MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR ) ||
819
+ uverbs_copy_from (& size , attrs , MLX5_IB_ATTR_DEVX_UMEM_REG_LEN ) ||
820
+ uverbs_copy_from (& access , attrs , MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS ))
821
+ return - EFAULT ;
822
+
823
+ err = ib_check_mr_access (access );
824
+ if (err )
825
+ return err ;
826
+
827
+ obj -> umem = ib_umem_get (ucontext , addr , size , access , 0 );
828
+ if (IS_ERR (obj -> umem ))
829
+ return PTR_ERR (obj -> umem );
830
+
831
+ mlx5_ib_cont_pages (obj -> umem , obj -> umem -> address ,
832
+ MLX5_MKEY_PAGE_SHIFT_MASK , & npages ,
833
+ & obj -> page_shift , & obj -> ncont , NULL );
834
+
835
+ if (!npages ) {
836
+ ib_umem_release (obj -> umem );
837
+ return - EINVAL ;
838
+ }
839
+
840
+ page_mask = (1 << obj -> page_shift ) - 1 ;
841
+ obj -> page_offset = obj -> umem -> address & page_mask ;
842
+
843
+ return 0 ;
844
+ }
845
+
846
+ static int devx_umem_reg_cmd_alloc (struct devx_umem * obj ,
847
+ struct devx_umem_reg_cmd * cmd )
848
+ {
849
+ cmd -> inlen = MLX5_ST_SZ_BYTES (create_umem_in ) +
850
+ (MLX5_ST_SZ_BYTES (mtt ) * obj -> ncont );
851
+ cmd -> in = kvzalloc (cmd -> inlen , GFP_KERNEL );
852
+ return cmd -> in ? 0 : - ENOMEM ;
853
+ }
854
+
855
+ static void devx_umem_reg_cmd_free (struct devx_umem_reg_cmd * cmd )
856
+ {
857
+ kvfree (cmd -> in );
858
+ }
859
+
860
+ static void devx_umem_reg_cmd_build (struct mlx5_ib_dev * dev ,
861
+ struct devx_umem * obj ,
862
+ struct devx_umem_reg_cmd * cmd )
863
+ {
864
+ void * umem ;
865
+ __be64 * mtt ;
866
+
867
+ umem = MLX5_ADDR_OF (create_umem_in , cmd -> in , umem );
868
+ mtt = (__be64 * )MLX5_ADDR_OF (umem , umem , mtt );
869
+
870
+ MLX5_SET (general_obj_in_cmd_hdr , cmd -> in , opcode , MLX5_CMD_OP_CREATE_GENERAL_OBJECT );
871
+ MLX5_SET (general_obj_in_cmd_hdr , cmd -> in , obj_type , MLX5_OBJ_TYPE_UMEM );
872
+ MLX5_SET64 (umem , umem , num_of_mtt , obj -> ncont );
873
+ MLX5_SET (umem , umem , log_page_size , obj -> page_shift -
874
+ MLX5_ADAPTER_PAGE_SHIFT );
875
+ MLX5_SET (umem , umem , page_offset , obj -> page_offset );
876
+ mlx5_ib_populate_pas (dev , obj -> umem , obj -> page_shift , mtt ,
877
+ (obj -> umem -> writable ? MLX5_IB_MTT_WRITE : 0 ) |
878
+ MLX5_IB_MTT_READ );
879
+ }
880
+
881
+ static int UVERBS_HANDLER (MLX5_IB_METHOD_DEVX_UMEM_REG )(struct ib_device * ib_dev ,
882
+ struct ib_uverbs_file * file ,
883
+ struct uverbs_attr_bundle * attrs )
884
+ {
885
+ struct mlx5_ib_ucontext * c = devx_ufile2uctx (file );
886
+ struct mlx5_ib_dev * dev = to_mdev (ib_dev );
887
+ struct devx_umem_reg_cmd cmd ;
888
+ struct devx_umem * obj ;
889
+ struct ib_uobject * uobj ;
890
+ u32 obj_id ;
891
+ int err ;
892
+
893
+ if (!c -> devx_uid )
894
+ return - EPERM ;
895
+
896
+ uobj = uverbs_attr_get_uobject (attrs , MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE );
897
+ obj = kzalloc (sizeof (struct devx_umem ), GFP_KERNEL );
898
+ if (!obj )
899
+ return - ENOMEM ;
900
+
901
+ err = devx_umem_get (dev , & c -> ibucontext , attrs , obj );
902
+ if (err )
903
+ goto err_obj_free ;
904
+
905
+ err = devx_umem_reg_cmd_alloc (obj , & cmd );
906
+ if (err )
907
+ goto err_umem_release ;
908
+
909
+ devx_umem_reg_cmd_build (dev , obj , & cmd );
910
+
911
+ MLX5_SET (general_obj_in_cmd_hdr , cmd .in , uid , c -> devx_uid );
912
+ err = mlx5_cmd_exec (dev -> mdev , cmd .in , cmd .inlen , cmd .out ,
913
+ sizeof (cmd .out ));
914
+ if (err )
915
+ goto err_umem_reg_cmd_free ;
916
+
917
+ obj -> mdev = dev -> mdev ;
918
+ uobj -> object = obj ;
919
+ devx_obj_build_destroy_cmd (cmd .in , cmd .out , obj -> dinbox , & obj -> dinlen , & obj_id );
920
+ err = uverbs_copy_to (attrs , MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID , & obj_id , sizeof (obj_id ));
921
+ if (err )
922
+ goto err_umem_destroy ;
923
+
924
+ devx_umem_reg_cmd_free (& cmd );
925
+
926
+ return 0 ;
927
+
928
+ err_umem_destroy :
929
+ mlx5_cmd_exec (obj -> mdev , obj -> dinbox , obj -> dinlen , cmd .out , sizeof (cmd .out ));
930
+ err_umem_reg_cmd_free :
931
+ devx_umem_reg_cmd_free (& cmd );
932
+ err_umem_release :
933
+ ib_umem_release (obj -> umem );
934
+ err_obj_free :
935
+ kfree (obj );
936
+ return err ;
937
+ }
938
+
939
+ static int UVERBS_HANDLER (MLX5_IB_METHOD_DEVX_UMEM_DEREG )(struct ib_device * ib_dev ,
940
+ struct ib_uverbs_file * file ,
941
+ struct uverbs_attr_bundle * attrs )
942
+ {
943
+ return 0 ;
944
+ }
945
+
946
+ static int devx_umem_cleanup (struct ib_uobject * uobject ,
947
+ enum rdma_remove_reason why )
948
+ {
949
+ struct devx_umem * obj = uobject -> object ;
950
+ u32 out [MLX5_ST_SZ_DW (general_obj_out_cmd_hdr )];
951
+ int err ;
952
+
953
+ err = mlx5_cmd_exec (obj -> mdev , obj -> dinbox , obj -> dinlen , out , sizeof (out ));
954
+ if (err && why == RDMA_REMOVE_DESTROY )
955
+ return err ;
956
+
957
+ ib_umem_release (obj -> umem );
958
+ kfree (obj );
959
+ return 0 ;
960
+ }
961
+
962
+ static DECLARE_UVERBS_NAMED_METHOD (MLX5_IB_METHOD_DEVX_UMEM_REG ,
963
+ & UVERBS_ATTR_IDR (MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE ,
964
+ MLX5_IB_OBJECT_DEVX_UMEM ,
965
+ UVERBS_ACCESS_NEW ,
966
+ UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY )),
967
+ & UVERBS_ATTR_PTR_IN (MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR , UVERBS_ATTR_TYPE (u64 ),
968
+ UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY )),
969
+ & UVERBS_ATTR_PTR_IN (MLX5_IB_ATTR_DEVX_UMEM_REG_LEN , UVERBS_ATTR_TYPE (u64 ),
970
+ UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY )),
971
+ & UVERBS_ATTR_PTR_IN (MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS , UVERBS_ATTR_TYPE (u32 ),
972
+ UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY )),
973
+ & UVERBS_ATTR_PTR_OUT (MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID , UVERBS_ATTR_TYPE (u32 ),
974
+ UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY ))) ;
975
+
976
+ static DECLARE_UVERBS_NAMED_METHOD (MLX5_IB_METHOD_DEVX_UMEM_DEREG ,
977
+ & UVERBS_ATTR_IDR (MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE ,
978
+ MLX5_IB_OBJECT_DEVX_UMEM ,
979
+ UVERBS_ACCESS_DESTROY ,
980
+ UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY ))) ;
981
+
791
982
static DECLARE_UVERBS_NAMED_METHOD (MLX5_IB_METHOD_DEVX_QUERY_UAR ,
792
983
& UVERBS_ATTR_PTR_IN (MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX , UVERBS_ATTR_TYPE (u32 ),
793
984
UA_FLAGS (UVERBS_ATTR_SPEC_F_MANDATORY )),
@@ -868,6 +1059,12 @@ static DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
868
1059
& UVERBS_METHOD (MLX5_IB_METHOD_DEVX_OBJ_MODIFY ),
869
1060
& UVERBS_METHOD (MLX5_IB_METHOD_DEVX_OBJ_QUERY ));
870
1061
1062
+ static DECLARE_UVERBS_NAMED_OBJECT (MLX5_IB_OBJECT_DEVX_UMEM ,
1063
+ & UVERBS_TYPE_ALLOC_IDR (0 , devx_umem_cleanup ),
1064
+ & UVERBS_METHOD (MLX5_IB_METHOD_DEVX_UMEM_REG ),
1065
+ & UVERBS_METHOD (MLX5_IB_METHOD_DEVX_UMEM_DEREG ));
1066
+
871
1067
static DECLARE_UVERBS_OBJECT_TREE (devx_objects ,
872
1068
& UVERBS_OBJECT (MLX5_IB_OBJECT_DEVX ),
873
- & UVERBS_OBJECT (MLX5_IB_OBJECT_DEVX_OBJ )) ;
1069
+ & UVERBS_OBJECT (MLX5_IB_OBJECT_DEVX_OBJ ),
1070
+ & UVERBS_OBJECT (MLX5_IB_OBJECT_DEVX_UMEM )) ;
0 commit comments