Skip to content

Commit 9a56827

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A set of fixes for x86: - Prevent multiplication result truncation on 32bit. Introduced with the early timestamp reworrk. - Ensure microcode revision storage to be consistent under all circumstances - Prevent write tearing of PTEs - Prevent confusion of user and kernel reegisters when dumping fatal signals verbosely - Make an error return value in a failure path of the vector allocation negative. Returning EINVAL might the caller assume success and causes further wreckage. - A trivial kernel doc warning fix" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Use WRITE_ONCE() when setting PTEs x86/apic/vector: Make error return value negative x86/process: Don't mix user/kernel regs in 64bit __show_regs() x86/tsc: Prevent result truncation on 32bit x86: Fix kernel-doc atomic.h warnings x86/microcode: Update the new microcode revision unconditionally x86/microcode: Make sure boot_cpu_data.microcode is up-to-date
2 parents 3567994 + 9bc4f28 commit 9a56827

File tree

14 files changed

+87
-59
lines changed

14 files changed

+87
-59
lines changed

arch/x86/include/asm/atomic.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -80,37 +80,37 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
8080
* true if the result is zero, or false for all
8181
* other cases.
8282
*/
83-
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
8483
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
8584
{
8685
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
8786
}
87+
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
8888

8989
/**
9090
* arch_atomic_inc - increment atomic variable
9191
* @v: pointer of type atomic_t
9292
*
9393
* Atomically increments @v by 1.
9494
*/
95-
#define arch_atomic_inc arch_atomic_inc
9695
static __always_inline void arch_atomic_inc(atomic_t *v)
9796
{
9897
asm volatile(LOCK_PREFIX "incl %0"
9998
: "+m" (v->counter));
10099
}
100+
#define arch_atomic_inc arch_atomic_inc
101101

102102
/**
103103
* arch_atomic_dec - decrement atomic variable
104104
* @v: pointer of type atomic_t
105105
*
106106
* Atomically decrements @v by 1.
107107
*/
108-
#define arch_atomic_dec arch_atomic_dec
109108
static __always_inline void arch_atomic_dec(atomic_t *v)
110109
{
111110
asm volatile(LOCK_PREFIX "decl %0"
112111
: "+m" (v->counter));
113112
}
113+
#define arch_atomic_dec arch_atomic_dec
114114

115115
/**
116116
* arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
120120
* returns true if the result is 0, or false for all other
121121
* cases.
122122
*/
123-
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
124123
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
125124
{
126125
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
127126
}
127+
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
128128

129129
/**
130130
* arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
134134
* and returns true if the result is zero, or false for all
135135
* other cases.
136136
*/
137-
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
138137
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
139138
{
140139
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
141140
}
141+
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
142142

143143
/**
144144
* arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
149149
* if the result is negative, or false when
150150
* result is greater than or equal to zero.
151151
*/
152-
#define arch_atomic_add_negative arch_atomic_add_negative
153152
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
154153
{
155154
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
156155
}
156+
#define arch_atomic_add_negative arch_atomic_add_negative
157157

158158
/**
159159
* arch_atomic_add_return - add integer and return

arch/x86/include/asm/atomic64_32.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -205,25 +205,25 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
205205
*
206206
* Atomically increments @v by 1.
207207
*/
208-
#define arch_atomic64_inc arch_atomic64_inc
209208
static inline void arch_atomic64_inc(atomic64_t *v)
210209
{
211210
__alternative_atomic64(inc, inc_return, /* no output */,
212211
"S" (v) : "memory", "eax", "ecx", "edx");
213212
}
213+
#define arch_atomic64_inc arch_atomic64_inc
214214

215215
/**
216216
* arch_atomic64_dec - decrement atomic64 variable
217217
* @v: pointer to type atomic64_t
218218
*
219219
* Atomically decrements @v by 1.
220220
*/
221-
#define arch_atomic64_dec arch_atomic64_dec
222221
static inline void arch_atomic64_dec(atomic64_t *v)
223222
{
224223
__alternative_atomic64(dec, dec_return, /* no output */,
225224
"S" (v) : "memory", "eax", "ecx", "edx");
226225
}
226+
#define arch_atomic64_dec arch_atomic64_dec
227227

228228
/**
229229
* arch_atomic64_add_unless - add unless the number is a given value
@@ -245,23 +245,23 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
245245
return (int)a;
246246
}
247247

248-
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
249248
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
250249
{
251250
int r;
252251
alternative_atomic64(inc_not_zero, "=&a" (r),
253252
"S" (v) : "ecx", "edx", "memory");
254253
return r;
255254
}
255+
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
256256

257-
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
258257
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
259258
{
260259
long long r;
261260
alternative_atomic64(dec_if_positive, "=&A" (r),
262261
"S" (v) : "ecx", "memory");
263262
return r;
264263
}
264+
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
265265

266266
#undef alternative_atomic64
267267
#undef __alternative_atomic64

arch/x86/include/asm/atomic64_64.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -71,39 +71,39 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
7171
* true if the result is zero, or false for all
7272
* other cases.
7373
*/
74-
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
7574
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
7675
{
7776
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
7877
}
78+
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
7979

8080
/**
8181
* arch_atomic64_inc - increment atomic64 variable
8282
* @v: pointer to type atomic64_t
8383
*
8484
* Atomically increments @v by 1.
8585
*/
86-
#define arch_atomic64_inc arch_atomic64_inc
8786
static __always_inline void arch_atomic64_inc(atomic64_t *v)
8887
{
8988
asm volatile(LOCK_PREFIX "incq %0"
9089
: "=m" (v->counter)
9190
: "m" (v->counter));
9291
}
92+
#define arch_atomic64_inc arch_atomic64_inc
9393

9494
/**
9595
* arch_atomic64_dec - decrement atomic64 variable
9696
* @v: pointer to type atomic64_t
9797
*
9898
* Atomically decrements @v by 1.
9999
*/
100-
#define arch_atomic64_dec arch_atomic64_dec
101100
static __always_inline void arch_atomic64_dec(atomic64_t *v)
102101
{
103102
asm volatile(LOCK_PREFIX "decq %0"
104103
: "=m" (v->counter)
105104
: "m" (v->counter));
106105
}
106+
#define arch_atomic64_dec arch_atomic64_dec
107107

108108
/**
109109
* arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
113113
* returns true if the result is 0, or false for all other
114114
* cases.
115115
*/
116-
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
117116
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
118117
{
119118
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
120119
}
120+
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
121121

122122
/**
123123
* arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
127127
* and returns true if the result is zero, or false for all
128128
* other cases.
129129
*/
130-
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
131130
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
132131
{
133132
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
134133
}
134+
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
135135

136136
/**
137137
* arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
142142
* if the result is negative, or false when
143143
* result is greater than or equal to zero.
144144
*/
145-
#define arch_atomic64_add_negative arch_atomic64_add_negative
146145
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
147146
{
148147
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
149148
}
149+
#define arch_atomic64_add_negative arch_atomic64_add_negative
150150

151151
/**
152152
* arch_atomic64_add_return - add and return

arch/x86/include/asm/kdebug.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,20 @@ enum die_val {
2222
DIE_NMIUNKNOWN,
2323
};
2424

25+
enum show_regs_mode {
26+
SHOW_REGS_SHORT,
27+
/*
28+
* For when userspace crashed, but we don't think it's our fault, and
29+
* therefore don't print kernel registers.
30+
*/
31+
SHOW_REGS_USER,
32+
SHOW_REGS_ALL
33+
};
34+
2535
extern void die(const char *, struct pt_regs *,long);
2636
extern int __must_check __die(const char *, struct pt_regs *, long);
2737
extern void show_stack_regs(struct pt_regs *regs);
28-
extern void __show_regs(struct pt_regs *regs, int all);
38+
extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
2939
extern void show_iret_regs(struct pt_regs *regs);
3040
extern unsigned long oops_begin(void);
3141
extern void oops_end(unsigned long, struct pt_regs *, int signr);

arch/x86/include/asm/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
11951195
return xchg(pmdp, pmd);
11961196
} else {
11971197
pmd_t old = *pmdp;
1198-
*pmdp = pmd;
1198+
WRITE_ONCE(*pmdp, pmd);
11991199
return old;
12001200
}
12011201
}

arch/x86/include/asm/pgtable_64.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -55,15 +55,15 @@ struct mm_struct;
5555
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
5656
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
5757

58-
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
59-
pte_t *ptep)
58+
static inline void native_set_pte(pte_t *ptep, pte_t pte)
6059
{
61-
*ptep = native_make_pte(0);
60+
WRITE_ONCE(*ptep, pte);
6261
}
6362

64-
static inline void native_set_pte(pte_t *ptep, pte_t pte)
63+
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
64+
pte_t *ptep)
6565
{
66-
*ptep = pte;
66+
native_set_pte(ptep, native_make_pte(0));
6767
}
6868

6969
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7373

7474
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7575
{
76-
*pmdp = pmd;
76+
WRITE_ONCE(*pmdp, pmd);
7777
}
7878

7979
static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
109109

110110
static inline void native_set_pud(pud_t *pudp, pud_t pud)
111111
{
112-
*pudp = pud;
112+
WRITE_ONCE(*pudp, pud);
113113
}
114114

115115
static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
137137
pgd_t pgd;
138138

139139
if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
140-
*p4dp = p4d;
140+
WRITE_ONCE(*p4dp, p4d);
141141
return;
142142
}
143143

144144
pgd = native_make_pgd(native_p4d_val(p4d));
145145
pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
146-
*p4dp = native_make_p4d(native_pgd_val(pgd));
146+
WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
147147
}
148148

149149
static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
153153

154154
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
155155
{
156-
*pgdp = pti_set_user_pgtbl(pgdp, pgd);
156+
WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
157157
}
158158

159159
static inline void native_pgd_clear(pgd_t *pgd)

arch/x86/kernel/apic/vector.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
413413
if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
414414
/* Something in the core code broke! Survive gracefully */
415415
pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
416-
return EINVAL;
416+
return -EINVAL;
417417
}
418418

419419
ret = assign_managed_vector(irqd, vector_searchmask);

arch/x86/kernel/cpu/microcode/amd.c

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
504504
struct microcode_amd *mc_amd;
505505
struct ucode_cpu_info *uci;
506506
struct ucode_patch *p;
507+
enum ucode_state ret;
507508
u32 rev, dummy;
508509

509510
BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,23 +522,30 @@ static enum ucode_state apply_microcode_amd(int cpu)
521522

522523
/* need to apply patch? */
523524
if (rev >= mc_amd->hdr.patch_id) {
524-
c->microcode = rev;
525-
uci->cpu_sig.rev = rev;
526-
return UCODE_OK;
525+
ret = UCODE_OK;
526+
goto out;
527527
}
528528

529529
if (__apply_microcode_amd(mc_amd)) {
530530
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
531531
cpu, mc_amd->hdr.patch_id);
532532
return UCODE_ERROR;
533533
}
534-
pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
535-
mc_amd->hdr.patch_id);
536534

537-
uci->cpu_sig.rev = mc_amd->hdr.patch_id;
538-
c->microcode = mc_amd->hdr.patch_id;
535+
rev = mc_amd->hdr.patch_id;
536+
ret = UCODE_UPDATED;
537+
538+
pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
539539

540-
return UCODE_UPDATED;
540+
out:
541+
uci->cpu_sig.rev = rev;
542+
c->microcode = rev;
543+
544+
/* Update boot_cpu_data's revision too, if we're on the BSP: */
545+
if (c->cpu_index == boot_cpu_data.cpu_index)
546+
boot_cpu_data.microcode = rev;
547+
548+
return ret;
541549
}
542550

543551
static int install_equiv_cpu_table(const u8 *buf)

0 commit comments

Comments
 (0)