Skip to content

Commit 3170d8d

Browse files
author
Al Viro
committed
kill {__,}{get,put}_user_unaligned()
no users left Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent 468138d commit 3170d8d

File tree

12 files changed

+0
-369
lines changed

12 files changed

+0
-369
lines changed

arch/arm/include/asm/uaccess.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,6 @@
1717
#include <asm/unified.h>
1818
#include <asm/compiler.h>
1919

20-
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
21-
#include <asm-generic/uaccess-unaligned.h>
22-
#else
23-
#define __get_user_unaligned __get_user
24-
#define __put_user_unaligned __put_user
25-
#endif
26-
2720
#include <asm/extable.h>
2821

2922
/*

arch/arm64/include/asm/uaccess.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -254,8 +254,6 @@ do { \
254254
(void)0; \
255255
})
256256

257-
#define __get_user_unaligned __get_user
258-
259257
#define get_user(x, ptr) \
260258
({ \
261259
__typeof__(*(ptr)) __user *__p = (ptr); \
@@ -320,8 +318,6 @@ do { \
320318
(void)0; \
321319
})
322320

323-
#define __put_user_unaligned __put_user
324-
325321
#define put_user(x, ptr) \
326322
({ \
327323
__typeof__(*(ptr)) __user *__p = (ptr); \

arch/ia64/include/asm/uaccess.h

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -87,42 +87,6 @@ static inline int __access_ok(const void __user *p, unsigned long size)
8787
#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
8888
#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
8989

90-
extern long __put_user_unaligned_unknown (void);
91-
92-
#define __put_user_unaligned(x, ptr) \
93-
({ \
94-
long __ret; \
95-
switch (sizeof(*(ptr))) { \
96-
case 1: __ret = __put_user((x), (ptr)); break; \
97-
case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
98-
| (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
99-
case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
100-
| (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
101-
case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
102-
| (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
103-
default: __ret = __put_user_unaligned_unknown(); \
104-
} \
105-
__ret; \
106-
})
107-
108-
extern long __get_user_unaligned_unknown (void);
109-
110-
#define __get_user_unaligned(x, ptr) \
111-
({ \
112-
long __ret; \
113-
switch (sizeof(*(ptr))) { \
114-
case 1: __ret = __get_user((x), (ptr)); break; \
115-
case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
116-
| (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
117-
case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
118-
| (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
119-
case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
120-
| (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
121-
default: __ret = __get_user_unaligned_unknown(); \
122-
} \
123-
__ret; \
124-
})
125-
12690
#ifdef ASM_SUPPORTED
12791
struct __large_struct { unsigned long buf[100]; };
12892
# define __m(x) (*(struct __large_struct __user *)(x))

arch/m68k/include/asm/uaccess.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,4 @@
33
#else
44
#include <asm/uaccess_mm.h>
55
#endif
6-
76
#include <asm/extable.h>
8-
#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
9-
#include <asm-generic/uaccess-unaligned.h>
10-
#else
11-
#define __get_user_unaligned(x, ptr) __get_user((x), (ptr))
12-
#define __put_user_unaligned(x, ptr) __put_user((x), (ptr))
13-
#endif

arch/mips/include/asm/uaccess.h

Lines changed: 0 additions & 277 deletions
Original file line numberDiff line numberDiff line change
@@ -496,283 +496,6 @@ do { \
496496

497497
extern void __put_user_unknown(void);
498498

499-
/*
500-
* ul{b,h,w} are macros and there are no equivalent macros for EVA.
501-
* EVA unaligned access is handled in the ADE exception handler.
502-
*/
503-
#ifndef CONFIG_EVA
504-
/*
505-
* put_user_unaligned: - Write a simple value into user space.
506-
* @x: Value to copy to user space.
507-
* @ptr: Destination address, in user space.
508-
*
509-
* Context: User context only. This function may sleep if pagefaults are
510-
* enabled.
511-
*
512-
* This macro copies a single simple value from kernel space to user
513-
* space. It supports simple types like char and int, but not larger
514-
* data types like structures or arrays.
515-
*
516-
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
517-
* to the result of dereferencing @ptr.
518-
*
519-
* Returns zero on success, or -EFAULT on error.
520-
*/
521-
#define put_user_unaligned(x,ptr) \
522-
__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
523-
524-
/*
525-
* get_user_unaligned: - Get a simple variable from user space.
526-
* @x: Variable to store result.
527-
* @ptr: Source address, in user space.
528-
*
529-
* Context: User context only. This function may sleep if pagefaults are
530-
* enabled.
531-
*
532-
* This macro copies a single simple variable from user space to kernel
533-
* space. It supports simple types like char and int, but not larger
534-
* data types like structures or arrays.
535-
*
536-
* @ptr must have pointer-to-simple-variable type, and the result of
537-
* dereferencing @ptr must be assignable to @x without a cast.
538-
*
539-
* Returns zero on success, or -EFAULT on error.
540-
* On error, the variable @x is set to zero.
541-
*/
542-
#define get_user_unaligned(x,ptr) \
543-
__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
544-
545-
/*
546-
* __put_user_unaligned: - Write a simple value into user space, with less checking.
547-
* @x: Value to copy to user space.
548-
* @ptr: Destination address, in user space.
549-
*
550-
* Context: User context only. This function may sleep if pagefaults are
551-
* enabled.
552-
*
553-
* This macro copies a single simple value from kernel space to user
554-
* space. It supports simple types like char and int, but not larger
555-
* data types like structures or arrays.
556-
*
557-
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
558-
* to the result of dereferencing @ptr.
559-
*
560-
* Caller must check the pointer with access_ok() before calling this
561-
* function.
562-
*
563-
* Returns zero on success, or -EFAULT on error.
564-
*/
565-
#define __put_user_unaligned(x,ptr) \
566-
__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
567-
568-
/*
569-
* __get_user_unaligned: - Get a simple variable from user space, with less checking.
570-
* @x: Variable to store result.
571-
* @ptr: Source address, in user space.
572-
*
573-
* Context: User context only. This function may sleep if pagefaults are
574-
* enabled.
575-
*
576-
* This macro copies a single simple variable from user space to kernel
577-
* space. It supports simple types like char and int, but not larger
578-
* data types like structures or arrays.
579-
*
580-
* @ptr must have pointer-to-simple-variable type, and the result of
581-
* dereferencing @ptr must be assignable to @x without a cast.
582-
*
583-
* Caller must check the pointer with access_ok() before calling this
584-
* function.
585-
*
586-
* Returns zero on success, or -EFAULT on error.
587-
* On error, the variable @x is set to zero.
588-
*/
589-
#define __get_user_unaligned(x,ptr) \
590-
__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
591-
592-
/*
593-
* Yuck. We need two variants, one for 64bit operation and one
594-
* for 32 bit mode and old iron.
595-
*/
596-
#ifdef CONFIG_32BIT
597-
#define __GET_USER_UNALIGNED_DW(val, ptr) \
598-
__get_user_unaligned_asm_ll32(val, ptr)
599-
#endif
600-
#ifdef CONFIG_64BIT
601-
#define __GET_USER_UNALIGNED_DW(val, ptr) \
602-
__get_user_unaligned_asm(val, "uld", ptr)
603-
#endif
604-
605-
extern void __get_user_unaligned_unknown(void);
606-
607-
#define __get_user_unaligned_common(val, size, ptr) \
608-
do { \
609-
switch (size) { \
610-
case 1: __get_data_asm(val, "lb", ptr); break; \
611-
case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
612-
case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
613-
case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
614-
default: __get_user_unaligned_unknown(); break; \
615-
} \
616-
} while (0)
617-
618-
#define __get_user_unaligned_nocheck(x,ptr,size) \
619-
({ \
620-
int __gu_err; \
621-
\
622-
__get_user_unaligned_common((x), size, ptr); \
623-
__gu_err; \
624-
})
625-
626-
#define __get_user_unaligned_check(x,ptr,size) \
627-
({ \
628-
int __gu_err = -EFAULT; \
629-
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
630-
\
631-
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
632-
__get_user_unaligned_common((x), size, __gu_ptr); \
633-
\
634-
__gu_err; \
635-
})
636-
637-
#define __get_data_unaligned_asm(val, insn, addr) \
638-
{ \
639-
long __gu_tmp; \
640-
\
641-
__asm__ __volatile__( \
642-
"1: " insn " %1, %3 \n" \
643-
"2: \n" \
644-
" .insn \n" \
645-
" .section .fixup,\"ax\" \n" \
646-
"3: li %0, %4 \n" \
647-
" move %1, $0 \n" \
648-
" j 2b \n" \
649-
" .previous \n" \
650-
" .section __ex_table,\"a\" \n" \
651-
" "__UA_ADDR "\t1b, 3b \n" \
652-
" "__UA_ADDR "\t1b + 4, 3b \n" \
653-
" .previous \n" \
654-
: "=r" (__gu_err), "=r" (__gu_tmp) \
655-
: "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
656-
\
657-
(val) = (__typeof__(*(addr))) __gu_tmp; \
658-
}
659-
660-
/*
661-
* Get a long long 64 using 32 bit registers.
662-
*/
663-
#define __get_user_unaligned_asm_ll32(val, addr) \
664-
{ \
665-
unsigned long long __gu_tmp; \
666-
\
667-
__asm__ __volatile__( \
668-
"1: ulw %1, (%3) \n" \
669-
"2: ulw %D1, 4(%3) \n" \
670-
" move %0, $0 \n" \
671-
"3: \n" \
672-
" .insn \n" \
673-
" .section .fixup,\"ax\" \n" \
674-
"4: li %0, %4 \n" \
675-
" move %1, $0 \n" \
676-
" move %D1, $0 \n" \
677-
" j 3b \n" \
678-
" .previous \n" \
679-
" .section __ex_table,\"a\" \n" \
680-
" " __UA_ADDR " 1b, 4b \n" \
681-
" " __UA_ADDR " 1b + 4, 4b \n" \
682-
" " __UA_ADDR " 2b, 4b \n" \
683-
" " __UA_ADDR " 2b + 4, 4b \n" \
684-
" .previous \n" \
685-
: "=r" (__gu_err), "=&r" (__gu_tmp) \
686-
: "0" (0), "r" (addr), "i" (-EFAULT)); \
687-
(val) = (__typeof__(*(addr))) __gu_tmp; \
688-
}
689-
690-
/*
691-
* Yuck. We need two variants, one for 64bit operation and one
692-
* for 32 bit mode and old iron.
693-
*/
694-
#ifdef CONFIG_32BIT
695-
#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
696-
#endif
697-
#ifdef CONFIG_64BIT
698-
#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
699-
#endif
700-
701-
#define __put_user_unaligned_common(ptr, size) \
702-
do { \
703-
switch (size) { \
704-
case 1: __put_data_asm("sb", ptr); break; \
705-
case 2: __put_user_unaligned_asm("ush", ptr); break; \
706-
case 4: __put_user_unaligned_asm("usw", ptr); break; \
707-
case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
708-
default: __put_user_unaligned_unknown(); break; \
709-
} while (0)
710-
711-
#define __put_user_unaligned_nocheck(x,ptr,size) \
712-
({ \
713-
__typeof__(*(ptr)) __pu_val; \
714-
int __pu_err = 0; \
715-
\
716-
__pu_val = (x); \
717-
__put_user_unaligned_common(ptr, size); \
718-
__pu_err; \
719-
})
720-
721-
#define __put_user_unaligned_check(x,ptr,size) \
722-
({ \
723-
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
724-
__typeof__(*(ptr)) __pu_val = (x); \
725-
int __pu_err = -EFAULT; \
726-
\
727-
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
728-
__put_user_unaligned_common(__pu_addr, size); \
729-
\
730-
__pu_err; \
731-
})
732-
733-
#define __put_user_unaligned_asm(insn, ptr) \
734-
{ \
735-
__asm__ __volatile__( \
736-
"1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
737-
"2: \n" \
738-
" .insn \n" \
739-
" .section .fixup,\"ax\" \n" \
740-
"3: li %0, %4 \n" \
741-
" j 2b \n" \
742-
" .previous \n" \
743-
" .section __ex_table,\"a\" \n" \
744-
" " __UA_ADDR " 1b, 3b \n" \
745-
" .previous \n" \
746-
: "=r" (__pu_err) \
747-
: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
748-
"i" (-EFAULT)); \
749-
}
750-
751-
#define __put_user_unaligned_asm_ll32(ptr) \
752-
{ \
753-
__asm__ __volatile__( \
754-
"1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
755-
"2: sw %D2, 4(%3) \n" \
756-
"3: \n" \
757-
" .insn \n" \
758-
" .section .fixup,\"ax\" \n" \
759-
"4: li %0, %4 \n" \
760-
" j 3b \n" \
761-
" .previous \n" \
762-
" .section __ex_table,\"a\" \n" \
763-
" " __UA_ADDR " 1b, 4b \n" \
764-
" " __UA_ADDR " 1b + 4, 4b \n" \
765-
" " __UA_ADDR " 2b, 4b \n" \
766-
" " __UA_ADDR " 2b + 4, 4b \n" \
767-
" .previous" \
768-
: "=r" (__pu_err) \
769-
: "0" (0), "r" (__pu_val), "r" (ptr), \
770-
"i" (-EFAULT)); \
771-
}
772-
773-
extern void __put_user_unaligned_unknown(void);
774-
#endif
775-
776499
/*
777500
* We're generating jump to subroutines which will be outside the range of
778501
* jump instructions

arch/parisc/include/asm/uaccess.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
*/
77
#include <asm/page.h>
88
#include <asm/cache.h>
9-
#include <asm-generic/uaccess-unaligned.h>
109

1110
#include <linux/bug.h>
1211
#include <linux/string.h>

arch/powerpc/include/asm/uaccess.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,6 @@
9090
#define __put_user_inatomic(x, ptr) \
9191
__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9292

93-
#define __get_user_unaligned __get_user
94-
#define __put_user_unaligned __put_user
95-
9693
extern long __put_user_bad(void);
9794

9895
/*

arch/s390/include/asm/uaccess.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -249,9 +249,6 @@ int __put_user_bad(void) __attribute__((noreturn));
249249

250250
int __get_user_bad(void) __attribute__((noreturn));
251251

252-
#define __put_user_unaligned __put_user
253-
#define __get_user_unaligned __get_user
254-
255252
unsigned long __must_check
256253
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
257254

0 commit comments

Comments
 (0)