Skip to content

Commit d6e867a

Browse files
committed
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fpu updates from Ingo Molnar: "Misc preparatory changes for an upcoming FPU optimization that will delay the loading of FPU registers to return-to-userspace" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/fpu: Don't export __kernel_fpu_{begin,end}() x86/fpu: Update comment for __raw_xsave_addr() x86/fpu: Add might_fault() to user_insn() x86/pkeys: Make init_pkru_value static x86/thread_info: Remove _TIF_ALLWORK_MASK x86/process/32: Remove asm/math_emu.h include x86/fpu: Use unsigned long long shift in xfeature_uncompacted_offset()
2 parents db2ab47 + 1220999 commit d6e867a

File tree

8 files changed

+14
-32
lines changed

8 files changed

+14
-32
lines changed

arch/x86/include/asm/efi.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,7 @@ struct efi_scratch {
8282
#define arch_efi_call_virt_setup() \
8383
({ \
8484
efi_sync_low_kernel_mappings(); \
85-
preempt_disable(); \
86-
__kernel_fpu_begin(); \
85+
kernel_fpu_begin(); \
8786
firmware_restrict_branch_speculation_start(); \
8887
\
8988
if (!efi_enabled(EFI_OLD_MEMMAP)) \
@@ -99,8 +98,7 @@ struct efi_scratch {
9998
efi_switch_mm(efi_scratch.prev_mm); \
10099
\
101100
firmware_restrict_branch_speculation_end(); \
102-
__kernel_fpu_end(); \
103-
preempt_enable(); \
101+
kernel_fpu_end(); \
104102
})
105103

106104
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,

arch/x86/include/asm/fpu/api.h

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,12 @@
1212
#define _ASM_X86_FPU_API_H
1313

1414
/*
15-
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled
16-
* and they don't touch the preempt state on their own.
17-
* If you enable preemption after __kernel_fpu_begin(), preempt notifier
18-
* should call the __kernel_fpu_end() to prevent the kernel/user FPU
19-
* state from getting corrupted. KVM for example uses this model.
20-
*
21-
* All other cases use kernel_fpu_begin/end() which disable preemption
22-
* during kernel FPU usage.
15+
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
16+
* disables preemption so be careful if you intend to use it for long periods
17+
* of time.
18+
* If you intend to use the FPU in softirq you need to check first with
19+
* irq_fpu_usable() if it is possible.
2320
*/
24-
extern void __kernel_fpu_begin(void);
25-
extern void __kernel_fpu_end(void);
2621
extern void kernel_fpu_begin(void);
2722
extern void kernel_fpu_end(void);
2823
extern bool irq_fpu_usable(void);

arch/x86/include/asm/fpu/internal.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
106106
#define user_insn(insn, output, input...) \
107107
({ \
108108
int err; \
109+
\
110+
might_fault(); \
111+
\
109112
asm volatile(ASM_STAC "\n" \
110113
"1:" #insn "\n\t" \
111114
"2: " ASM_CLAC "\n" \

arch/x86/include/asm/thread_info.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -140,14 +140,6 @@ struct thread_info {
140140
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
141141
_TIF_NOHZ)
142142

143-
/* work to do on any return to user space */
144-
#define _TIF_ALLWORK_MASK \
145-
(_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
146-
_TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \
147-
_TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \
148-
_TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \
149-
_TIF_FSCHECK)
150-
151143
/* flags to check in __switch_to() */
152144
#define _TIF_WORK_CTXSW_BASE \
153145
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \

arch/x86/kernel/fpu/core.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ bool irq_fpu_usable(void)
9393
}
9494
EXPORT_SYMBOL(irq_fpu_usable);
9595

96-
void __kernel_fpu_begin(void)
96+
static void __kernel_fpu_begin(void)
9797
{
9898
struct fpu *fpu = &current->thread.fpu;
9999

@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void)
111111
__cpu_invalidate_fpregs_state();
112112
}
113113
}
114-
EXPORT_SYMBOL(__kernel_fpu_begin);
115114

116-
void __kernel_fpu_end(void)
115+
static void __kernel_fpu_end(void)
117116
{
118117
struct fpu *fpu = &current->thread.fpu;
119118

@@ -122,7 +121,6 @@ void __kernel_fpu_end(void)
122121

123122
kernel_fpu_enable();
124123
}
125-
EXPORT_SYMBOL(__kernel_fpu_end);
126124

127125
void kernel_fpu_begin(void)
128126
{

arch/x86/kernel/fpu/xstate.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr)
444444
* format. Checking a supervisor state's uncompacted offset is
445445
* an error.
446446
*/
447-
if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) {
447+
if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) {
448448
WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
449449
return -1;
450450
}
@@ -808,8 +808,6 @@ void fpu__resume_cpu(void)
808808
* Given an xstate feature mask, calculate where in the xsave
809809
* buffer the state is. Callers should ensure that the buffer
810810
* is valid.
811-
*
812-
* Note: does not work for compacted buffers.
813811
*/
814812
static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
815813
{

arch/x86/kernel/process_32.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,6 @@
4444
#include <asm/processor.h>
4545
#include <asm/fpu/internal.h>
4646
#include <asm/desc.h>
47-
#ifdef CONFIG_MATH_EMULATION
48-
#include <asm/math_emu.h>
49-
#endif
5047

5148
#include <linux/err.h>
5249

arch/x86/mm/pkeys.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
131131
* in the process's lifetime will not accidentally get access
132132
* to data which is pkey-protected later on.
133133
*/
134+
static
134135
u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
135136
PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
136137
PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |

0 commit comments

Comments
 (0)