Skip to content

Commit 38452af

Browse files
author
Ingo Molnar
committed
Merge branch 'x86/asm' into x86/mm, to resolve conflicts
Conflicts: tools/testing/selftests/x86/Makefile Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents dcb32d9 + be8a18e commit 38452af

File tree

162 files changed

+3790
-817
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

162 files changed

+3790
-817
lines changed

Documentation/x86/intel_mpx.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
4545
MPX-instrumented.
4646
3) The kernel detects that the CPU has MPX, allows the new prctl() to
4747
succeed, and notes the location of the bounds directory. Userspace is
48-
expected to keep the bounds directory at that locationWe note it
48+
expected to keep the bounds directory at that location. We note it
4949
instead of reading it each time because the 'xsave' operation needed
5050
to access the bounds directory register is an expensive operation.
5151
4) If the application needs to spill bounds out of the 4 registers, it
@@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
167167
We need to decode MPX instructions to get violation address and
168168
set this address into extended struct siginfo.
169169

170-
The _sigfault feild of struct siginfo is extended as follow:
170+
The _sigfault field of struct siginfo is extended as follow:
171171

172172
87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
173173
88 struct {
@@ -240,5 +240,5 @@ them at the same bounds table.
240240
This is allowed architecturally. See more information "Intel(R) Architecture
241241
Instruction Set Extensions Programming Reference" (9.3.4).
242242

243-
However, if users did this, the kernel might be fooled in to unmaping an
243+
However, if users did this, the kernel might be fooled in to unmapping an
244244
in-use bounds table since it does not recognize sharing.

Documentation/x86/tlb.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ memory, it has two choices:
55
from areas other than the one we are trying to flush will be
66
destroyed and must be refilled later, at some cost.
77
2. Use the invlpg instruction to invalidate a single page at a
8-
time. This could potentialy cost many more instructions, but
8+
time. This could potentially cost many more instructions, but
99
it is a much more precise operation, causing no collateral
1010
damage to other TLB entries.
1111

@@ -19,7 +19,7 @@ Which method to do depends on a few things:
1919
work.
2020
3. The size of the TLB. The larger the TLB, the more collateral
2121
damage we do with a full flush. So, the larger the TLB, the
22-
more attrative an individual flush looks. Data and
22+
more attractive an individual flush looks. Data and
2323
instructions have separate TLBs, as do different page sizes.
2424
4. The microarchitecture. The TLB has become a multi-level
2525
cache on modern CPUs, and the global flushes have become more

Documentation/x86/x86_64/machinecheck

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ between all CPUs.
3636

3737
check_interval
3838
How often to poll for corrected machine check errors, in seconds
39-
(Note output is hexademical). Default 5 minutes. When the poller
39+
(Note output is hexadecimal). Default 5 minutes. When the poller
4040
finds MCEs it triggers an exponential speedup (poll more often) on
4141
the polling interval. When the poller stops finding MCEs, it
4242
triggers an exponential backoff (poll less often) on the polling

arch/arm64/include/asm/cputype.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,14 @@
8080
#define APM_CPU_PART_POTENZA 0x000
8181

8282
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
83+
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
8384

8485
#define BRCM_CPU_PART_VULCAN 0x516
8586

8687
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
8788
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
8889
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
90+
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
8991

9092
#ifndef __ASSEMBLY__
9193

arch/arm64/include/asm/ptrace.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,8 @@ struct pt_regs {
117117
};
118118
u64 orig_x0;
119119
u64 syscallno;
120+
u64 orig_addr_limit;
121+
u64 unused; // maintain 16 byte alignment
120122
};
121123

122124
#define arch_has_single_step() (1)

arch/arm64/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ int main(void)
6060
DEFINE(S_PC, offsetof(struct pt_regs, pc));
6161
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
6262
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
63+
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
6364
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
6465
BLANK();
6566
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));

arch/arm64/kernel/cpu_errata.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
9898
MIDR_RANGE(MIDR_THUNDERX, 0x00,
9999
(1 << MIDR_VARIANT_SHIFT) | 1),
100100
},
101+
{
102+
/* Cavium ThunderX, T81 pass 1.0 */
103+
.desc = "Cavium erratum 27456",
104+
.capability = ARM64_WORKAROUND_CAVIUM_27456,
105+
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
106+
},
101107
#endif
102108
{
103109
}

arch/arm64/kernel/entry.S

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <asm/errno.h>
2929
#include <asm/esr.h>
3030
#include <asm/irq.h>
31+
#include <asm/memory.h>
3132
#include <asm/thread_info.h>
3233
#include <asm/unistd.h>
3334

@@ -97,7 +98,14 @@
9798
mov x29, xzr // fp pointed to user-space
9899
.else
99100
add x21, sp, #S_FRAME_SIZE
100-
.endif
101+
get_thread_info tsk
102+
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
103+
ldr x20, [tsk, #TI_ADDR_LIMIT]
104+
str x20, [sp, #S_ORIG_ADDR_LIMIT]
105+
mov x20, #TASK_SIZE_64
106+
str x20, [tsk, #TI_ADDR_LIMIT]
107+
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
108+
.endif /* \el == 0 */
101109
mrs x22, elr_el1
102110
mrs x23, spsr_el1
103111
stp lr, x21, [sp, #S_LR]
@@ -128,6 +136,14 @@
128136
.endm
129137

130138
.macro kernel_exit, el
139+
.if \el != 0
140+
/* Restore the task's original addr_limit. */
141+
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
142+
str x20, [tsk, #TI_ADDR_LIMIT]
143+
144+
/* No need to restore UAO, it will be restored from SPSR_EL1 */
145+
.endif
146+
131147
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
132148
.if \el == 0
133149
ct_user_enter
@@ -406,7 +422,6 @@ el1_irq:
406422
bl trace_hardirqs_off
407423
#endif
408424

409-
get_thread_info tsk
410425
irq_handler
411426

412427
#ifdef CONFIG_PREEMPT

arch/arm64/mm/fault.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
280280
}
281281

282282
if (permission_fault(esr) && (addr < USER_DS)) {
283-
if (get_fs() == KERNEL_DS)
283+
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
284+
if (regs->orig_addr_limit == KERNEL_DS)
284285
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
285286

286287
if (!search_exception_tables(regs->pc))

arch/x86/Kconfig

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -294,11 +294,6 @@ config X86_32_LAZY_GS
294294
def_bool y
295295
depends on X86_32 && !CC_STACKPROTECTOR
296296

297-
config ARCH_HWEIGHT_CFLAGS
298-
string
299-
default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
300-
default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
301-
302297
config ARCH_SUPPORTS_UPROBES
303298
def_bool y
304299

arch/x86/boot/bitops.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,16 @@
1616
#define BOOT_BITOPS_H
1717
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
1818

19-
static inline int constant_test_bit(int nr, const void *addr)
19+
#include <linux/types.h>
20+
21+
static inline bool constant_test_bit(int nr, const void *addr)
2022
{
2123
const u32 *p = (const u32 *)addr;
2224
return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
2325
}
24-
static inline int variable_test_bit(int nr, const void *addr)
26+
static inline bool variable_test_bit(int nr, const void *addr)
2527
{
26-
u8 v;
28+
bool v;
2729
const u32 *p = (const u32 *)addr;
2830

2931
asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));

arch/x86/boot/boot.h

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <linux/types.h>
2525
#include <linux/edd.h>
2626
#include <asm/setup.h>
27+
#include <asm/asm.h>
2728
#include "bitops.h"
2829
#include "ctype.h"
2930
#include "cpuflags.h"
@@ -176,18 +177,18 @@ static inline void wrgs32(u32 v, addr_t addr)
176177
}
177178

178179
/* Note: these only return true/false, not a signed return value! */
179-
static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
180+
static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
180181
{
181-
u8 diff;
182-
asm volatile("fs; repe; cmpsb; setnz %0"
183-
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
182+
bool diff;
183+
asm volatile("fs; repe; cmpsb" CC_SET(nz)
184+
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
184185
return diff;
185186
}
186-
static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
187+
static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
187188
{
188-
u8 diff;
189-
asm volatile("gs; repe; cmpsb; setnz %0"
190-
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
189+
bool diff;
190+
asm volatile("gs; repe; cmpsb" CC_SET(nz)
191+
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
191192
return diff;
192193
}
193194

arch/x86/boot/string.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
int memcmp(const void *s1, const void *s2, size_t len)
1919
{
20-
u8 diff;
20+
bool diff;
2121
asm("repe; cmpsb; setnz %0"
2222
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
2323
return diff;

arch/x86/entry/common.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,10 @@ static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
4040

4141
#ifdef CONFIG_CONTEXT_TRACKING
4242
/* Called on entry from user mode with IRQs off. */
43-
__visible void enter_from_user_mode(void)
43+
__visible inline void enter_from_user_mode(void)
4444
{
4545
CT_WARN_ON(ct_state() != CONTEXT_USER);
46-
user_exit();
46+
user_exit_irqoff();
4747
}
4848
#else
4949
static inline void enter_from_user_mode(void) {}
@@ -274,7 +274,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
274274
ti->status &= ~TS_COMPAT;
275275
#endif
276276

277-
user_enter();
277+
user_enter_irqoff();
278278
}
279279

280280
#define SYSCALL_EXIT_WORK_FLAGS \

arch/x86/entry/thunk_64.S

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
.endif
3434

3535
call \func
36-
jmp restore
36+
jmp .L_restore
3737
_ASM_NOKPROBE(\name)
3838
.endm
3939

@@ -54,7 +54,7 @@
5454
#if defined(CONFIG_TRACE_IRQFLAGS) \
5555
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
5656
|| defined(CONFIG_PREEMPT)
57-
restore:
57+
.L_restore:
5858
popq %r11
5959
popq %r10
6060
popq %r9
@@ -66,5 +66,5 @@ restore:
6666
popq %rdi
6767
popq %rbp
6868
ret
69-
_ASM_NOKPROBE(restore)
69+
_ASM_NOKPROBE(.L_restore)
7070
#endif

arch/x86/entry/vdso/Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
134134
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
135135

136136
targets += vdso32/vdso32.lds
137-
targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
137+
targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
138138
targets += vdso32/vclock_gettime.o
139139

140140
KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
@@ -156,7 +156,8 @@ $(obj)/vdso32.so.dbg: FORCE \
156156
$(obj)/vdso32/vdso32.lds \
157157
$(obj)/vdso32/vclock_gettime.o \
158158
$(obj)/vdso32/note.o \
159-
$(obj)/vdso32/system_call.o
159+
$(obj)/vdso32/system_call.o \
160+
$(obj)/vdso32/sigreturn.o
160161
$(call if_changed,vdso)
161162

162163
#

arch/x86/entry/vdso/vdso32/sigreturn.S

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,3 @@
1-
/*
2-
* Common code for the sigreturn entry points in vDSO images.
3-
* So far this code is the same for both int80 and sysenter versions.
4-
* This file is #include'd by int80.S et al to define them first thing.
5-
* The kernel assumes that the addresses of these routines are constant
6-
* for all vDSO implementations.
7-
*/
8-
91
#include <linux/linkage.h>
102
#include <asm/unistd_32.h>
113
#include <asm/asm-offsets.h>

arch/x86/entry/vdso/vdso32/system_call.S

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,11 @@
22
* AT_SYSINFO entry point
33
*/
44

5+
#include <linux/linkage.h>
56
#include <asm/dwarf2.h>
67
#include <asm/cpufeatures.h>
78
#include <asm/alternative-asm.h>
89

9-
/*
10-
* First get the common code for the sigreturn entry points.
11-
* This must come first.
12-
*/
13-
#include "sigreturn.S"
14-
1510
.text
1611
.globl __kernel_vsyscall
1712
.type __kernel_vsyscall,@function

arch/x86/events/core.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2319,7 +2319,7 @@ void
23192319
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
23202320
{
23212321
struct stack_frame frame;
2322-
const void __user *fp;
2322+
const unsigned long __user *fp;
23232323

23242324
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
23252325
/* TODO: We don't support guest os callchain now */
@@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
23322332
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
23332333
return;
23342334

2335-
fp = (void __user *)regs->bp;
2335+
fp = (unsigned long __user *)regs->bp;
23362336

23372337
perf_callchain_store(entry, regs->ip);
23382338

@@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
23452345
pagefault_disable();
23462346
while (entry->nr < entry->max_stack) {
23472347
unsigned long bytes;
2348+
23482349
frame.next_frame = NULL;
23492350
frame.return_address = 0;
23502351

2351-
if (!access_ok(VERIFY_READ, fp, 16))
2352+
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
23522353
break;
23532354

2354-
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
2355+
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
23552356
if (bytes != 0)
23562357
break;
2357-
bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
2358+
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
23582359
if (bytes != 0)
23592360
break;
23602361

arch/x86/events/intel/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
22
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
33
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
4-
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
5-
intel-rapl-objs := rapl.o
4+
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
5+
intel-rapl-perf-objs := rapl.o
66
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
77
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
88
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o

0 commit comments

Comments
 (0)