Skip to content

Commit aadaa80

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "A handful of fixes: - Fix an MCE corner case bug/crash found via MCE injection testing - Fix 5-level paging boot crash - Fix MCE recovery cache invalidation bug - Fix regression on Xen guests caused by a recent PMD level mremap speedup optimization" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Make set_pmd_at() paravirt aware x86/mm/cpa: Fix set_mce_nospec() x86/boot/compressed/64: Do not corrupt EDX on EFER.LME=1 setting x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out()
2 parents 73a4c52 + 20e55bc commit aadaa80

File tree

4 files changed

+29
-26
lines changed

4 files changed

+29
-26
lines changed

arch/x86/boot/compressed/head_64.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src)
602602
3:
603603
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
604604
pushl %ecx
605+
pushl %edx
605606
movl $MSR_EFER, %ecx
606607
rdmsr
607608
btsl $_EFER_LME, %eax
608609
wrmsr
610+
popl %edx
609611
popl %ecx
610612

611613
/* Enable PAE and LA57 (if required) paging modes */

arch/x86/include/asm/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
10651065
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
10661066
pmd_t *pmdp, pmd_t pmd)
10671067
{
1068-
native_set_pmd(pmdp, pmd);
1068+
set_pmd(pmdp, pmd);
10691069
}
10701070

10711071
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,

arch/x86/kernel/cpu/mce/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
784784
quirk_no_way_out(i, m, regs);
785785

786786
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
787+
m->bank = i;
787788
mce_read_aux(m, i);
788789
*msg = tmp;
789790
return 1;

arch/x86/mm/pageattr.c

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
230230

231231
#endif
232232

233+
/*
234+
* See set_mce_nospec().
235+
*
236+
* Machine check recovery code needs to change cache mode of poisoned pages to
237+
* UC to avoid speculative access logging another error. But passing the
238+
* address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
239+
* speculative access. So we cheat and flip the top bit of the address. This
240+
* works fine for the code that updates the page tables. But at the end of the
241+
* process we need to flush the TLB and cache and the non-canonical address
242+
* causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
243+
*
244+
* But in the common case we already have a canonical address. This code
245+
* will fix the top bit if needed and is a no-op otherwise.
246+
*/
247+
static inline unsigned long fix_addr(unsigned long addr)
248+
{
249+
#ifdef CONFIG_X86_64
250+
return (long)(addr << 1) >> 1;
251+
#else
252+
return addr;
253+
#endif
254+
}
255+
233256
static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
234257
{
235258
if (cpa->flags & CPA_PAGES_ARRAY) {
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
313336
unsigned int i;
314337

315338
for (i = 0; i < cpa->numpages; i++)
316-
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
339+
__flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
317340
}
318341

319342
static void cpa_flush(struct cpa_data *data, int cache)
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
347370
* Only flush present addresses:
348371
*/
349372
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
350-
clflush_cache_range_opt((void *)addr, PAGE_SIZE);
373+
clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
351374
}
352375
mb();
353376
}
@@ -1627,29 +1650,6 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
16271650
return ret;
16281651
}
16291652

1630-
/*
1631-
* Machine check recovery code needs to change cache mode of poisoned
1632-
* pages to UC to avoid speculative access logging another error. But
1633-
* passing the address of the 1:1 mapping to set_memory_uc() is a fine
1634-
* way to encourage a speculative access. So we cheat and flip the top
1635-
* bit of the address. This works fine for the code that updates the
1636-
* page tables. But at the end of the process we need to flush the cache
1637-
* and the non-canonical address causes a #GP fault when used by the
1638-
* CLFLUSH instruction.
1639-
*
1640-
* But in the common case we already have a canonical address. This code
1641-
* will fix the top bit if needed and is a no-op otherwise.
1642-
*/
1643-
static inline unsigned long make_addr_canonical_again(unsigned long addr)
1644-
{
1645-
#ifdef CONFIG_X86_64
1646-
return (long)(addr << 1) >> 1;
1647-
#else
1648-
return addr;
1649-
#endif
1650-
}
1651-
1652-
16531653
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
16541654
pgprot_t mask_set, pgprot_t mask_clr,
16551655
int force_split, int in_flag,

0 commit comments

Comments
 (0)