@@ -850,52 +850,65 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj)
850
850
851
851
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
852
852
static u32 * copy_batch (struct drm_i915_gem_object * dest_obj ,
853
- struct drm_i915_gem_object * src_obj )
853
+ struct drm_i915_gem_object * src_obj ,
854
+ u32 batch_start_offset ,
855
+ u32 batch_len )
854
856
{
855
857
int ret = 0 ;
856
858
int needs_clflush = 0 ;
857
- u32 * src_addr , * dest_addr = NULL ;
859
+ u32 * src_base , * dest_base = NULL ;
860
+ u32 * src_addr , * dest_addr ;
861
+ u32 offset = batch_start_offset / sizeof (* dest_addr );
862
+ u32 end = batch_start_offset + batch_len ;
863
+
864
+ if (end > dest_obj -> base .size || end > src_obj -> base .size )
865
+ return ERR_PTR (- E2BIG );
858
866
859
867
ret = i915_gem_obj_prepare_shmem_read (src_obj , & needs_clflush );
860
868
if (ret ) {
861
869
DRM_DEBUG_DRIVER ("CMD: failed to prep read\n" );
862
870
return ERR_PTR (ret );
863
871
}
864
872
865
- src_addr = vmap_batch (src_obj );
866
- if (!src_addr ) {
873
+ src_base = vmap_batch (src_obj );
874
+ if (!src_base ) {
867
875
DRM_DEBUG_DRIVER ("CMD: Failed to vmap batch\n" );
868
876
ret = - ENOMEM ;
869
877
goto unpin_src ;
870
878
}
871
879
880
+ src_addr = src_base + offset ;
881
+
872
882
if (needs_clflush )
873
- drm_clflush_virt_range ((char * )src_addr , src_obj -> base . size );
883
+ drm_clflush_virt_range ((char * )src_addr , batch_len );
874
884
875
885
ret = i915_gem_object_set_to_cpu_domain (dest_obj , true);
876
886
if (ret ) {
877
887
DRM_DEBUG_DRIVER ("CMD: Failed to set batch CPU domain\n" );
878
888
goto unmap_src ;
879
889
}
880
890
881
- dest_addr = vmap_batch (dest_obj );
882
- if (!dest_addr ) {
891
+ dest_base = vmap_batch (dest_obj );
892
+ if (!dest_base ) {
883
893
DRM_DEBUG_DRIVER ("CMD: Failed to vmap shadow batch\n" );
884
894
ret = - ENOMEM ;
885
895
goto unmap_src ;
886
896
}
887
897
888
- memcpy (dest_addr , src_addr , src_obj -> base .size );
889
- if (dest_obj -> base .size > src_obj -> base .size )
890
- memset ((u8 * )dest_addr + src_obj -> base .size , 0 ,
891
- dest_obj -> base .size - src_obj -> base .size );
898
+ dest_addr = dest_base + offset ;
899
+
900
+ if (batch_start_offset != 0 )
901
+ memset ((u8 * )dest_base , 0 , batch_start_offset );
902
+
903
+ memcpy (dest_addr , src_addr , batch_len );
904
+ memset ((u8 * )dest_addr + batch_len , 0 , dest_obj -> base .size - end );
892
905
893
906
unmap_src :
894
- vunmap (src_addr );
907
+ vunmap (src_base );
895
908
unpin_src :
896
909
i915_gem_object_unpin_pages (src_obj );
897
910
898
- return ret ? ERR_PTR (ret ) : dest_addr ;
911
+ return ret ? ERR_PTR (ret ) : dest_base ;
899
912
}
900
913
901
914
/**
@@ -1016,6 +1029,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1016
1029
* @batch_obj: the batch buffer in question
1017
1030
* @shadow_batch_obj: copy of the batch buffer in question
1018
1031
* @batch_start_offset: byte offset in the batch at which execution starts
1032
+ * @batch_len: length of the commands in batch_obj
1019
1033
* @is_master: is the submitting process the drm master?
1020
1034
*
1021
1035
* Parses the specified batch buffer looking for privilege violations as
@@ -1028,14 +1042,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1028
1042
struct drm_i915_gem_object * batch_obj ,
1029
1043
struct drm_i915_gem_object * shadow_batch_obj ,
1030
1044
u32 batch_start_offset ,
1045
+ u32 batch_len ,
1031
1046
bool is_master )
1032
1047
{
1033
1048
int ret = 0 ;
1034
1049
u32 * cmd , * batch_base , * batch_end ;
1035
1050
struct drm_i915_cmd_descriptor default_desc = { 0 };
1036
1051
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
1037
1052
1038
- batch_base = copy_batch (shadow_batch_obj , batch_obj );
1053
+ batch_base = copy_batch (shadow_batch_obj , batch_obj ,
1054
+ batch_start_offset , batch_len );
1039
1055
if (IS_ERR (batch_base )) {
1040
1056
DRM_DEBUG_DRIVER ("CMD: Failed to copy batch\n" );
1041
1057
return PTR_ERR (batch_base );
@@ -1044,11 +1060,11 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1044
1060
cmd = batch_base + (batch_start_offset / sizeof (* cmd ));
1045
1061
1046
1062
/*
1047
- * We use the source object's size because the shadow object is as
1063
+ * We use the batch length as size because the shadow object is as
1048
1064
* large or larger and copy_batch() will write MI_NOPs to the extra
1049
1065
* space. Parsing should be faster in some cases this way.
1050
1066
*/
1051
- batch_end = cmd + (batch_obj -> base . size / sizeof (* batch_end ));
1067
+ batch_end = cmd + (batch_len / sizeof (* batch_end ));
1052
1068
1053
1069
while (cmd < batch_end ) {
1054
1070
const struct drm_i915_cmd_descriptor * desc ;
0 commit comments