Skip to content

Commit ff47ab4

Browse files
Andi KleenH. Peter Anvin
authored andcommitted
x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
The 64bit __copy_{from,to}_user_inatomic always called copy_from_user_generic, but skipped the special optimizations for 1/2/4/8 byte accesses. This especially hurts the futex call, which accesses the 4 byte futex user value with a complicated fast string operation in a function call, instead of a single movl. Use __copy_{from,to}_user for _inatomic instead to get the same optimizations. The only problem was the might_fault() in those functions. So move that to new wrapper and call __copy_{f,t}_user_nocheck() from *_inatomic directly. 32bit already did this correctly by duplicating the code. Signed-off-by: Andi Kleen <ak@linux.intel.com> Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
1 parent 6e46645 commit ff47ab4

File tree

1 file changed

+18
-6
lines changed

1 file changed

+18
-6
lines changed

arch/x86/include/asm/uaccess_64.h

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
7777
}
7878

7979
static __always_inline __must_check
80-
int __copy_from_user(void *dst, const void __user *src, unsigned size)
80+
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
8181
{
8282
int ret = 0;
8383

84-
might_fault();
8584
if (!__builtin_constant_p(size))
8685
return copy_user_generic(dst, (__force void *)src, size);
8786
switch (size) {
@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
121120
}
122121

123122
static __always_inline __must_check
124-
int __copy_to_user(void __user *dst, const void *src, unsigned size)
123+
int __copy_from_user(void *dst, const void __user *src, unsigned size)
124+
{
125+
might_fault();
126+
return __copy_from_user_nocheck(dst, src, size);
127+
}
128+
129+
static __always_inline __must_check
130+
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
125131
{
126132
int ret = 0;
127133

128-
might_fault();
129134
if (!__builtin_constant_p(size))
130135
return copy_user_generic((__force void *)dst, src, size);
131136
switch (size) {
@@ -164,6 +169,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
164169
}
165170
}
166171

172+
static __always_inline __must_check
173+
int __copy_to_user(void __user *dst, const void *src, unsigned size)
174+
{
175+
might_fault();
176+
return __copy_to_user_nocheck(dst, src, size);
177+
}
178+
167179
static __always_inline __must_check
168180
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
169181
{
@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
220232
static __must_check __always_inline int
221233
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
222234
{
223-
return copy_user_generic(dst, (__force const void *)src, size);
235+
return __copy_from_user_nocheck(dst, (__force const void *)src, size);
224236
}
225237

226238
static __must_check __always_inline int
227239
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
228240
{
229-
return copy_user_generic((__force void *)dst, src, size);
241+
return __copy_to_user_nocheck((__force void *)dst, src, size);
230242
}
231243

232244
extern long __copy_user_nocache(void *dst, const void __user *src,

0 commit comments

Comments
 (0)