Skip to content

Commit 429893b

Browse files
suryasaimadhuIngo Molnar
authored andcommitted
x86/mce/AMD: Carve out threshold block preparation
mce_amd_feature_init() was getting pretty fat, carve out the threshold_block setup into a separate function in order to simplify flow and make it more understandable. No functionality change. Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/1453750913-4781-8-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent f57a1f3 commit 429893b

File tree

1 file changed

+49
-38
lines changed

1 file changed

+49
-38
lines changed

arch/x86/kernel/cpu/mcheck/mce_amd.c

Lines changed: 49 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -267,14 +267,59 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
267267
wrmsr(MSR_CU_DEF_ERR, low, high);
268268
}
269269

270+
static int
271+
prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
272+
int offset, u32 misc_high)
273+
{
274+
unsigned int cpu = smp_processor_id();
275+
struct threshold_block b;
276+
int new;
277+
278+
if (!block)
279+
per_cpu(bank_map, cpu) |= (1 << bank);
280+
281+
memset(&b, 0, sizeof(b));
282+
b.cpu = cpu;
283+
b.bank = bank;
284+
b.block = block;
285+
b.address = addr;
286+
b.interrupt_capable = lvt_interrupt_supported(bank, misc_high);
287+
288+
if (!b.interrupt_capable)
289+
goto done;
290+
291+
b.interrupt_enable = 1;
292+
293+
if (mce_flags.smca) {
294+
u32 smca_low, smca_high;
295+
296+
/* Gather LVT offset for thresholding: */
297+
if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
298+
goto out;
299+
300+
new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
301+
} else {
302+
new = (misc_high & MASK_LVTOFF_HI) >> 20;
303+
}
304+
305+
offset = setup_APIC_mce_threshold(offset, new);
306+
307+
if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
308+
mce_threshold_vector = amd_threshold_interrupt;
309+
310+
done:
311+
mce_threshold_block_init(&b, offset);
312+
313+
out:
314+
return offset;
315+
}
316+
270317
/* cpu init entry point, called from mce.c with preempt off */
271318
void mce_amd_feature_init(struct cpuinfo_x86 *c)
272319
{
273-
struct threshold_block b;
274-
unsigned int cpu = smp_processor_id();
275320
u32 low = 0, high = 0, address = 0;
276321
unsigned int bank, block;
277-
int offset = -1, new;
322+
int offset = -1;
278323

279324
for (bank = 0; bank < mca_cfg.banks; ++bank) {
280325
for (block = 0; block < NR_BLOCKS; ++block) {
@@ -299,41 +344,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
299344
(high & MASK_LOCKED_HI))
300345
continue;
301346

302-
if (!block)
303-
per_cpu(bank_map, cpu) |= (1 << bank);
304-
305-
memset(&b, 0, sizeof(b));
306-
b.cpu = cpu;
307-
b.bank = bank;
308-
b.block = block;
309-
b.address = address;
310-
b.interrupt_capable = lvt_interrupt_supported(bank, high);
311-
312-
if (!b.interrupt_capable)
313-
goto init;
314-
315-
b.interrupt_enable = 1;
316-
317-
if (mce_flags.smca) {
318-
u32 smca_low, smca_high;
319-
320-
/* Gather LVT offset for thresholding: */
321-
if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
322-
break;
323-
324-
new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
325-
} else {
326-
new = (high & MASK_LVTOFF_HI) >> 20;
327-
}
328-
329-
offset = setup_APIC_mce_threshold(offset, new);
330-
331-
if ((offset == new) &&
332-
(mce_threshold_vector != amd_threshold_interrupt))
333-
mce_threshold_vector = amd_threshold_interrupt;
334-
335-
init:
336-
mce_threshold_block_init(&b, offset);
347+
offset = prepare_threshold_block(bank, block, address, offset, high);
337348
}
338349
}
339350

0 commit comments

Comments
 (0)