Skip to content

Commit cc51e54

Browse files
Andi KleenKAGA-KOKO
authored andcommitted
x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
On Nehalem and newer core CPUs the CPU cache internally uses 44 bits physical address space. The L1TF workaround is limited by this internal cache address width, and needs to have one bit free there for the mitigation to work. Older client systems report only 36bit physical address space so the range check decides that L1TF is not mitigated for a 36bit phys/32GB system with some memory holes. But since these actually have the larger internal cache width this warning is bogus because it would only really be needed if the system had more than 43bits of memory. Add a new internal x86_cache_bits field. Normally it is the same as the physical bits field reported by CPUID, but for Nehalem and newerforce it to be at least 44bits. Change the L1TF memory size warning to use the new cache_bits field to avoid bogus warnings and remove the bogus comment about memory size. Fixes: 17dbca1 ("x86/speculation/l1tf: Add sysfs reporting for l1tf") Reported-by: George Anchev <studio@anchev.net> Reported-by: Christopher Snowhill <kode54@gmail.com> Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: Michael Hocko <mhocko@suse.com> Cc: vbabka@suse.cz Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org
1 parent 1ab534e commit cc51e54

File tree

3 files changed

+45
-6
lines changed

3 files changed

+45
-6
lines changed

arch/x86/include/asm/processor.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
132132
/* Index into per_cpu list: */
133133
u16 cpu_index;
134134
u32 microcode;
135+
/* Address space bits used by the cache internally */
136+
u8 x86_cache_bits;
135137
unsigned initialized : 1;
136138
} __randomize_layout;
137139

@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
183185

184186
static inline unsigned long long l1tf_pfn_limit(void)
185187
{
186-
return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
188+
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
187189
}
188190

189191
extern void early_cpu_init(void);

arch/x86/kernel/cpu/bugs.c

Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -668,13 +668,54 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
668668
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
669669
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
670670

671+
/*
672+
* These CPUs all support 44bits physical address space internally in the
673+
* cache but CPUID can report a smaller number of physical address bits.
674+
*
675+
* The L1TF mitigation uses the top most address bit for the inversion of
676+
* non present PTEs. When the installed memory reaches into the top most
677+
* address bit due to memory holes, which has been observed on machines
678+
* which report 36bits physical address bits and have 32G RAM installed,
679+
* then the mitigation range check in l1tf_select_mitigation() triggers.
680+
* This is a false positive because the mitigation is still possible due to
681+
* the fact that the cache uses 44bit internally. Use the cache bits
682+
* instead of the reported physical bits and adjust them on the affected
683+
* machines to 44bit if the reported bits are less than 44.
684+
*/
685+
static void override_cache_bits(struct cpuinfo_x86 *c)
686+
{
687+
if (c->x86 != 6)
688+
return;
689+
690+
switch (c->x86_model) {
691+
case INTEL_FAM6_NEHALEM:
692+
case INTEL_FAM6_WESTMERE:
693+
case INTEL_FAM6_SANDYBRIDGE:
694+
case INTEL_FAM6_IVYBRIDGE:
695+
case INTEL_FAM6_HASWELL_CORE:
696+
case INTEL_FAM6_HASWELL_ULT:
697+
case INTEL_FAM6_HASWELL_GT3E:
698+
case INTEL_FAM6_BROADWELL_CORE:
699+
case INTEL_FAM6_BROADWELL_GT3E:
700+
case INTEL_FAM6_SKYLAKE_MOBILE:
701+
case INTEL_FAM6_SKYLAKE_DESKTOP:
702+
case INTEL_FAM6_KABYLAKE_MOBILE:
703+
case INTEL_FAM6_KABYLAKE_DESKTOP:
704+
if (c->x86_cache_bits < 44)
705+
c->x86_cache_bits = 44;
706+
break;
707+
}
708+
}
709+
671710
static void __init l1tf_select_mitigation(void)
672711
{
673712
u64 half_pa;
674713

675714
if (!boot_cpu_has_bug(X86_BUG_L1TF))
676715
return;
677716

717+
override_cache_bits(&boot_cpu_data);
718+
678719
switch (l1tf_mitigation) {
679720
case L1TF_MITIGATION_OFF:
680721
case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
694735
return;
695736
#endif
696737

697-
/*
698-
* This is extremely unlikely to happen because almost all
699-
* systems have far more MAX_PA/2 than RAM can be fit into
700-
* DIMM slots.
701-
*/
702738
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
703739
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
704740
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");

arch/x86/kernel/cpu/common.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
919919
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
920920
c->x86_phys_bits = 36;
921921
#endif
922+
c->x86_cache_bits = c->x86_phys_bits;
922923
}
923924

924925
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)

0 commit comments

Comments
 (0)