@@ -871,84 +871,59 @@ int get_compat_sigevent(struct sigevent *event,
871
871
long compat_get_bitmap (unsigned long * mask , const compat_ulong_t __user * umask ,
872
872
unsigned long bitmap_size )
873
873
{
874
- int i , j ;
875
- unsigned long m ;
876
- compat_ulong_t um ;
877
874
unsigned long nr_compat_longs ;
878
875
879
876
/* align bitmap up to nearest compat_long_t boundary */
880
877
bitmap_size = ALIGN (bitmap_size , BITS_PER_COMPAT_LONG );
878
+ nr_compat_longs = BITS_TO_COMPAT_LONGS (bitmap_size );
881
879
882
880
if (!access_ok (VERIFY_READ , umask , bitmap_size / 8 ))
883
881
return - EFAULT ;
884
882
885
- nr_compat_longs = BITS_TO_COMPAT_LONGS (bitmap_size );
886
-
887
- for (i = 0 ; i < BITS_TO_LONGS (bitmap_size ); i ++ ) {
888
- m = 0 ;
889
-
890
- for (j = 0 ; j < sizeof (m )/sizeof (um ); j ++ ) {
891
- /*
892
- * We dont want to read past the end of the userspace
893
- * bitmap. We must however ensure the end of the
894
- * kernel bitmap is zeroed.
895
- */
896
- if (nr_compat_longs ) {
897
- nr_compat_longs -- ;
898
- if (__get_user (um , umask ))
899
- return - EFAULT ;
900
- } else {
901
- um = 0 ;
902
- }
903
-
904
- umask ++ ;
905
- m |= (long )um << (j * BITS_PER_COMPAT_LONG );
906
- }
907
- * mask ++ = m ;
883
+ user_access_begin ();
884
+ while (nr_compat_longs > 1 ) {
885
+ compat_ulong_t l1 , l2 ;
886
+ unsafe_get_user (l1 , umask ++ , Efault );
887
+ unsafe_get_user (l2 , umask ++ , Efault );
888
+ * mask ++ = ((unsigned long )l2 << BITS_PER_COMPAT_LONG ) | l1 ;
889
+ nr_compat_longs -= 2 ;
908
890
}
909
-
891
+ if (nr_compat_longs )
892
+ unsafe_get_user (* mask , umask ++ , Efault );
893
+ user_access_end ();
910
894
return 0 ;
895
+
896
+ Efault :
897
+ user_access_end ();
898
+ return - EFAULT ;
911
899
}
912
900
913
901
long compat_put_bitmap (compat_ulong_t __user * umask , unsigned long * mask ,
914
902
unsigned long bitmap_size )
915
903
{
916
- int i , j ;
917
- unsigned long m ;
918
- compat_ulong_t um ;
919
904
unsigned long nr_compat_longs ;
920
905
921
906
/* align bitmap up to nearest compat_long_t boundary */
922
907
bitmap_size = ALIGN (bitmap_size , BITS_PER_COMPAT_LONG );
908
+ nr_compat_longs = BITS_TO_COMPAT_LONGS (bitmap_size );
923
909
924
910
if (!access_ok (VERIFY_WRITE , umask , bitmap_size / 8 ))
925
911
return - EFAULT ;
926
912
927
- nr_compat_longs = BITS_TO_COMPAT_LONGS (bitmap_size );
928
-
929
- for (i = 0 ; i < BITS_TO_LONGS (bitmap_size ); i ++ ) {
930
- m = * mask ++ ;
931
-
932
- for (j = 0 ; j < sizeof (m )/sizeof (um ); j ++ ) {
933
- um = m ;
934
-
935
- /*
936
- * We dont want to write past the end of the userspace
937
- * bitmap.
938
- */
939
- if (nr_compat_longs ) {
940
- nr_compat_longs -- ;
941
- if (__put_user (um , umask ))
942
- return - EFAULT ;
943
- }
944
-
945
- umask ++ ;
946
- m >>= 4 * sizeof (um );
947
- m >>= 4 * sizeof (um );
948
- }
913
+ user_access_begin ();
914
+ while (nr_compat_longs > 1 ) {
915
+ unsigned long m = * mask ++ ;
916
+ unsafe_put_user ((compat_ulong_t )m , umask ++ , Efault );
917
+ unsafe_put_user (m >> BITS_PER_COMPAT_LONG , umask ++ , Efault );
918
+ nr_compat_longs -= 2 ;
949
919
}
950
-
920
+ if (nr_compat_longs )
921
+ unsafe_put_user ((compat_ulong_t )* mask , umask ++ , Efault );
922
+ user_access_end ();
951
923
return 0 ;
924
+ Efault :
925
+ user_access_end ();
926
+ return - EFAULT ;
952
927
}
953
928
954
929
void
0 commit comments