|
49 | 49 | #define DEF_LVT_OFF 0x2
|
50 | 50 | #define DEF_INT_TYPE_APIC 0x2
|
51 | 51 |
|
| 52 | +/* Scalable MCA: */ |
| 53 | + |
| 54 | +/* Threshold LVT offset is at MSR0xC0000410[15:12] */ |
| 55 | +#define SMCA_THR_LVT_OFF 0xF000 |
| 56 | + |
52 | 57 | static const char * const th_names[] = {
|
53 | 58 | "load_store",
|
54 | 59 | "insn_fetch",
|
@@ -142,6 +147,14 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
|
142 | 147 | }
|
143 | 148 |
|
144 | 149 | if (apic != msr) {
|
| 150 | + /* |
| 151 | + * On SMCA CPUs, LVT offset is programmed at a different MSR, and |
| 152 | + * the BIOS provides the value. The original field where LVT offset |
| 153 | + * was set is reserved. Return early here: |
| 154 | + */ |
| 155 | + if (mce_flags.smca) |
| 156 | + return 0; |
| 157 | + |
145 | 158 | pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
|
146 | 159 | "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
|
147 | 160 | b->cpu, apic, b->bank, b->block, b->address, hi, lo);
|
@@ -300,7 +313,19 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
|
300 | 313 | goto init;
|
301 | 314 |
|
302 | 315 | b.interrupt_enable = 1;
|
303 |
| - new = (high & MASK_LVTOFF_HI) >> 20; |
| 316 | + |
| 317 | + if (mce_flags.smca) { |
| 318 | + u32 smca_low, smca_high; |
| 319 | + |
| 320 | + /* Gather LVT offset for thresholding: */ |
| 321 | + if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) |
| 322 | + break; |
| 323 | + |
| 324 | + new = (smca_low & SMCA_THR_LVT_OFF) >> 12; |
| 325 | + } else { |
| 326 | + new = (high & MASK_LVTOFF_HI) >> 20; |
| 327 | + } |
| 328 | + |
304 | 329 | offset = setup_APIC_mce_threshold(offset, new);
|
305 | 330 |
|
306 | 331 | if ((offset == new) &&
|
|
0 commit comments