Skip to content

Commit 945fd17

Browse files
committed
x86/cpu_entry_area: Sync cpu_entry_area to initial_page_table
The separation of the cpu_entry_area from the fixmap missed the fact that on 32bit non-PAE kernels the cpu_entry_area mapping might not be covered in initial_page_table by the previous synchronizations. This results in suspend/resume failures because 32bit utilizes initial page table for resume. The absence of the cpu_entry_area mapping results in a triple fault, aka. insta reboot. With PAE enabled this works by chance because the PGD entry which covers the fixmap and other parts incindentally provides the cpu_entry_area mapping as well. Synchronize the initial page table after setting up the cpu entry area. Instead of adding yet another copy of the same code, move it to a function and invoke it from the various places. It needs to be investigated if the existing calls in setup_arch() and setup_per_cpu_areas() can be replaced by the later invocation from setup_cpu_entry_areas(), but that's beyond the scope of this fix. Fixes: 92a0f81 ("x86/cpu_entry_area: Move it out of the fixmap") Reported-by: Woody Suwalski <terraluna977@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Woody Suwalski <terraluna977@gmail.com> Cc: William Grant <william.grant@canonical.com> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1802282137290.1392@nanos.tec.linutronix.de
1 parent 1402fd8 commit 945fd17

File tree

6 files changed

+32
-25
lines changed

6 files changed

+32
-25
lines changed

arch/x86/include/asm/pgtable_32.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
3232
static inline void pgtable_cache_init(void) { }
3333
static inline void check_pgt_cache(void) { }
3434
void paging_init(void);
35+
void sync_initial_page_table(void);
3536

3637
/*
3738
* Define this if things work differently on an i386 and an i486:

arch/x86/include/asm/pgtable_64.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
2828
#define swapper_pg_dir init_top_pgt
2929

3030
extern void paging_init(void);
31+
static inline void sync_initial_page_table(void) { }
3132

3233
#define pte_ERROR(e) \
3334
pr_err("%s:%d: bad pte %p(%016lx)\n", \

arch/x86/kernel/setup.c

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1204,20 +1204,13 @@ void __init setup_arch(char **cmdline_p)
12041204

12051205
kasan_init();
12061206

1207-
#ifdef CONFIG_X86_32
1208-
/* sync back kernel address range */
1209-
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1210-
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1211-
KERNEL_PGD_PTRS);
1212-
12131207
/*
1214-
* sync back low identity map too. It is used for example
1215-
* in the 32-bit EFI stub.
1208+
* Sync back kernel address range.
1209+
*
1210+
* FIXME: Can the later sync in setup_cpu_entry_areas() replace
1211+
* this call?
12161212
*/
1217-
clone_pgd_range(initial_page_table,
1218-
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1219-
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1220-
#endif
1213+
sync_initial_page_table();
12211214

12221215
tboot_probe();
12231216

arch/x86/kernel/setup_percpu.c

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
287287
/* Setup cpu initialized, callin, callout masks */
288288
setup_cpu_local_masks();
289289

290-
#ifdef CONFIG_X86_32
291290
/*
292291
* Sync back kernel address range again. We already did this in
293292
* setup_arch(), but percpu data also needs to be available in
294293
* the smpboot asm. We can't reliably pick up percpu mappings
295294
* using vmalloc_fault(), because exception dispatch needs
296295
* percpu data.
296+
*
297+
* FIXME: Can the later sync in setup_cpu_entry_areas() replace
298+
* this call?
297299
*/
298-
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
299-
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
300-
KERNEL_PGD_PTRS);
301-
302-
/*
303-
* sync back low identity map too. It is used for example
304-
* in the 32-bit EFI stub.
305-
*/
306-
clone_pgd_range(initial_page_table,
307-
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
308-
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
309-
#endif
300+
sync_initial_page_table();
310301
}

arch/x86/mm/cpu_entry_area.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
163163

164164
for_each_possible_cpu(cpu)
165165
setup_cpu_entry_area(cpu);
166+
167+
/*
168+
* This is the last essential update to swapper_pgdir which needs
169+
* to be synchronized to initial_page_table on 32bit.
170+
*/
171+
sync_initial_page_table();
166172
}

arch/x86/mm/init_32.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
453453
}
454454
#endif /* CONFIG_HIGHMEM */
455455

456+
void __init sync_initial_page_table(void)
457+
{
458+
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
459+
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
460+
KERNEL_PGD_PTRS);
461+
462+
/*
463+
* sync back low identity map too. It is used for example
464+
* in the 32-bit EFI stub.
465+
*/
466+
clone_pgd_range(initial_page_table,
467+
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
468+
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
469+
}
470+
456471
void __init native_pagetable_init(void)
457472
{
458473
unsigned long pfn, va;

0 commit comments

Comments
 (0)