Skip to content

Commit c9cdd9a

Browse files
committed
Merge branch 'x86/mpx' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpufeature and mpx updates from Peter Anvin: "This includes the basic infrastructure for MPX (Memory Protection Extensions) support, but does not include MPX support itself. It is, however, a prerequisite for KVM support for MPX, which I believe will be pushed later this merge window by the KVM team. This includes moving the functionality in futex_atomic_cmpxchg_inatomic() into a new function in uaccess.h so it can be reused - this will be used by the final MPX patches. The actual MPX functionality (map management and so on) will be pushed in a future merge window, when ready" * 'x86/mpx' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/intel/mpx: Remove unused LWP structure x86, mpx: Add MPX related opcodes to the x86 opcode map x86: replace futex_atomic_cmpxchg_inatomic() with user_atomic_cmpxchg_inatomic x86: add user_atomic_cmpxchg_inatomic at uaccess.h x86, xsave: Support eager-only xsave features, add MPX support x86, cpufeature: Define the Intel MPX feature flag
2 parents f4bcd8c + 741e390 commit c9cdd9a

File tree

7 files changed

+133
-26
lines changed

7 files changed

+133
-26
lines changed

arch/x86/include/asm/cpufeature.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@
216216
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
217217
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
218218
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
219+
#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
219220
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
220221
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
221222
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */

arch/x86/include/asm/futex.h

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
110110
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
111111
u32 oldval, u32 newval)
112112
{
113-
int ret = 0;
114-
115-
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
116-
return -EFAULT;
117-
118-
asm volatile("\t" ASM_STAC "\n"
119-
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
120-
"2:\t" ASM_CLAC "\n"
121-
"\t.section .fixup, \"ax\"\n"
122-
"3:\tmov %3, %0\n"
123-
"\tjmp 2b\n"
124-
"\t.previous\n"
125-
_ASM_EXTABLE(1b, 3b)
126-
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
127-
: "i" (-EFAULT), "r" (newval), "1" (oldval)
128-
: "memory"
129-
);
130-
131-
*uval = oldval;
132-
return ret;
113+
return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
133114
}
134115

135116
#endif

arch/x86/include/asm/processor.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,20 @@ struct ymmh_struct {
370370
u32 ymmh_space[64];
371371
};
372372

373+
/* We don't support LWP yet: */
374+
struct lwp_struct {
375+
u8 reserved[128];
376+
};
377+
378+
struct bndregs_struct {
379+
u64 bndregs[8];
380+
} __packed;
381+
382+
struct bndcsr_struct {
383+
u64 cfg_reg_u;
384+
u64 status_reg;
385+
} __packed;
386+
373387
struct xsave_hdr_struct {
374388
u64 xstate_bv;
375389
u64 reserved1[2];
@@ -380,6 +394,9 @@ struct xsave_struct {
380394
struct i387_fxsave_struct i387;
381395
struct xsave_hdr_struct xsave_hdr;
382396
struct ymmh_struct ymmh;
397+
struct lwp_struct lwp;
398+
struct bndregs_struct bndregs;
399+
struct bndcsr_struct bndcsr;
383400
/* new processor state extensions will go here */
384401
} __attribute__ ((packed, aligned (64)));
385402

arch/x86/include/asm/uaccess.h

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -533,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n);
533533
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
534534
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
535535

536+
extern void __cmpxchg_wrong_size(void)
537+
__compiletime_error("Bad argument size for cmpxchg");
538+
539+
#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
540+
({ \
541+
int __ret = 0; \
542+
__typeof__(ptr) __uval = (uval); \
543+
__typeof__(*(ptr)) __old = (old); \
544+
__typeof__(*(ptr)) __new = (new); \
545+
switch (size) { \
546+
case 1: \
547+
{ \
548+
asm volatile("\t" ASM_STAC "\n" \
549+
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
550+
"2:\t" ASM_CLAC "\n" \
551+
"\t.section .fixup, \"ax\"\n" \
552+
"3:\tmov %3, %0\n" \
553+
"\tjmp 2b\n" \
554+
"\t.previous\n" \
555+
_ASM_EXTABLE(1b, 3b) \
556+
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
557+
: "i" (-EFAULT), "q" (__new), "1" (__old) \
558+
: "memory" \
559+
); \
560+
break; \
561+
} \
562+
case 2: \
563+
{ \
564+
asm volatile("\t" ASM_STAC "\n" \
565+
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
566+
"2:\t" ASM_CLAC "\n" \
567+
"\t.section .fixup, \"ax\"\n" \
568+
"3:\tmov %3, %0\n" \
569+
"\tjmp 2b\n" \
570+
"\t.previous\n" \
571+
_ASM_EXTABLE(1b, 3b) \
572+
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
573+
: "i" (-EFAULT), "r" (__new), "1" (__old) \
574+
: "memory" \
575+
); \
576+
break; \
577+
} \
578+
case 4: \
579+
{ \
580+
asm volatile("\t" ASM_STAC "\n" \
581+
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
582+
"2:\t" ASM_CLAC "\n" \
583+
"\t.section .fixup, \"ax\"\n" \
584+
"3:\tmov %3, %0\n" \
585+
"\tjmp 2b\n" \
586+
"\t.previous\n" \
587+
_ASM_EXTABLE(1b, 3b) \
588+
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
589+
: "i" (-EFAULT), "r" (__new), "1" (__old) \
590+
: "memory" \
591+
); \
592+
break; \
593+
} \
594+
case 8: \
595+
{ \
596+
if (!IS_ENABLED(CONFIG_X86_64)) \
597+
__cmpxchg_wrong_size(); \
598+
\
599+
asm volatile("\t" ASM_STAC "\n" \
600+
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
601+
"2:\t" ASM_CLAC "\n" \
602+
"\t.section .fixup, \"ax\"\n" \
603+
"3:\tmov %3, %0\n" \
604+
"\tjmp 2b\n" \
605+
"\t.previous\n" \
606+
_ASM_EXTABLE(1b, 3b) \
607+
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608+
: "i" (-EFAULT), "r" (__new), "1" (__old) \
609+
: "memory" \
610+
); \
611+
break; \
612+
} \
613+
default: \
614+
__cmpxchg_wrong_size(); \
615+
} \
616+
*__uval = __old; \
617+
__ret; \
618+
})
619+
620+
#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
621+
({ \
622+
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
623+
__user_atomic_cmpxchg_inatomic((uval), (ptr), \
624+
(old), (new), sizeof(*(ptr))) : \
625+
-EFAULT; \
626+
})
627+
536628
/*
537629
* movsl can be slow when source and dest are not both 8-byte aligned
538630
*/

arch/x86/include/asm/xsave.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#define XSTATE_FP 0x1
1010
#define XSTATE_SSE 0x2
1111
#define XSTATE_YMM 0x4
12+
#define XSTATE_BNDREGS 0x8
13+
#define XSTATE_BNDCSR 0x10
1214

1315
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
1416

@@ -20,10 +22,14 @@
2022
#define XSAVE_YMM_SIZE 256
2123
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
2224

23-
/*
24-
* These are the features that the OS can handle currently.
25-
*/
26-
#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
25+
/* Supported features which support lazy state saving */
26+
#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
27+
28+
/* Supported features which require eager state saving */
29+
#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
30+
31+
/* All currently supported features */
32+
#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
2733

2834
#ifdef CONFIG_X86_64
2935
#define REX_PREFIX "0x48, "

arch/x86/kernel/xsave.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void)
562562
if (cpu_has_xsaveopt && eagerfpu != DISABLE)
563563
eagerfpu = ENABLE;
564564

565+
if (pcntxt_mask & XSTATE_EAGER) {
566+
if (eagerfpu == DISABLE) {
567+
pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
568+
pcntxt_mask & XSTATE_EAGER);
569+
pcntxt_mask &= ~XSTATE_EAGER;
570+
} else {
571+
eagerfpu = ENABLE;
572+
}
573+
}
574+
565575
pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
566576
pcntxt_mask, xstate_size);
567577
}

arch/x86/lib/x86-opcode-map.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -346,8 +346,8 @@ AVXcode: 1
346346
17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
347347
18: Grp16 (1A)
348348
19:
349-
1a:
350-
1b:
349+
1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
350+
1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
351351
1c:
352352
1d:
353353
1e:

0 commit comments

Comments
 (0)