Skip to content

Commit 9b971e7

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "Apologies for this being so late, but we've uncovered a few nasty issues on arm64 which didn't settle down until yesterday and the fixes all look suitable for 4.3. Of the four patches, three of them are Cc'd to stable, with the remaining patch fixing an issue that only took effect during the merge window. Summary: - Fix corruption in SWP emulation when STXR fails due to contention - Fix MMU re-initialisation when resuming from a low-power state - Fix stack unwinding code to match what ftrace expects - Fix relocation code in the EFI stub when DRAM base is not 2MB aligned" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/efi: do not assume DRAM base is aligned to 2 MB Revert "ARM64: unwind: Fix PC calculation" arm64: kernel: fix tcr_el1.t0sz restore on systems with extended idmap arm64: compat: fix stxr failure case in SWP emulation
2 parents 7c0f488 + 73effcc commit 9b971e7

File tree

4 files changed

+35
-23
lines changed

4 files changed

+35
-23
lines changed

arch/arm64/kernel/armv8_deprecated.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
284284
__asm__ __volatile__( \
285285
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
286286
CONFIG_ARM64_PAN) \
287-
" mov %w2, %w1\n" \
288-
"0: ldxr"B" %w1, [%3]\n" \
289-
"1: stxr"B" %w0, %w2, [%3]\n" \
287+
"0: ldxr"B" %w2, [%3]\n" \
288+
"1: stxr"B" %w0, %w1, [%3]\n" \
290289
" cbz %w0, 2f\n" \
291290
" mov %w0, %w4\n" \
291+
" b 3f\n" \
292292
"2:\n" \
293+
" mov %w1, %w2\n" \
294+
"3:\n" \
293295
" .pushsection .fixup,\"ax\"\n" \
294296
" .align 2\n" \
295-
"3: mov %w0, %w5\n" \
296-
" b 2b\n" \
297+
"4: mov %w0, %w5\n" \
298+
" b 3b\n" \
297299
" .popsection" \
298300
" .pushsection __ex_table,\"a\"\n" \
299301
" .align 3\n" \
300-
" .quad 0b, 3b\n" \
301-
" .quad 1b, 3b\n" \
302+
" .quad 0b, 4b\n" \
303+
" .quad 1b, 4b\n" \
302304
" .popsection\n" \
303305
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
304306
CONFIG_ARM64_PAN) \

arch/arm64/kernel/efi-stub.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
2525
unsigned long kernel_size, kernel_memsize = 0;
2626
unsigned long nr_pages;
2727
void *old_image_addr = (void *)*image_addr;
28+
unsigned long preferred_offset;
29+
30+
/*
31+
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
32+
* a 2 MB aligned base, which itself may be lower than dram_base, as
33+
* long as the resulting offset equals or exceeds it.
34+
*/
35+
preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
36+
if (preferred_offset < dram_base)
37+
preferred_offset += SZ_2M;
2838

2939
/* Relocate the image, if required. */
3040
kernel_size = _edata - _text;
31-
if (*image_addr != (dram_base + TEXT_OFFSET)) {
41+
if (*image_addr != preferred_offset) {
3242
kernel_memsize = kernel_size + (_end - _edata);
3343

3444
/*
@@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
4252
* Mustang), we can still place the kernel at the address
4353
* 'dram_base + TEXT_OFFSET'.
4454
*/
45-
*image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
55+
*image_addr = *reserve_addr = preferred_offset;
4656
nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
4757
EFI_PAGE_SIZE;
4858
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,

arch/arm64/kernel/stacktrace.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
4848

4949
frame->sp = fp + 0x10;
5050
frame->fp = *(unsigned long *)(fp);
51-
/*
52-
* -4 here because we care about the PC at time of bl,
53-
* not where the return will go.
54-
*/
55-
frame->pc = *(unsigned long *)(fp + 8) - 4;
51+
frame->pc = *(unsigned long *)(fp + 8);
5652

5753
return 0;
5854
}

arch/arm64/kernel/suspend.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
8080
if (ret == 0) {
8181
/*
8282
* We are resuming from reset with TTBR0_EL1 set to the
83-
* idmap to enable the MMU; restore the active_mm mappings in
84-
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
85-
* the thread entered cpu_suspend with TTBR0_EL1 set to
86-
* reserved TTBR0 page tables and should be restored as such.
83+
* idmap to enable the MMU; set the TTBR0 to the reserved
84+
* page tables to prevent speculative TLB allocations, flush
85+
* the local tlb and set the default tcr_el1.t0sz so that
86+
* the TTBR0 address space set-up is properly restored.
87+
* If the current active_mm != &init_mm we entered cpu_suspend
88+
* with mappings in TTBR0 that must be restored, so we switch
89+
* them back to complete the address space configuration
90+
* restoration before returning.
8791
*/
88-
if (mm == &init_mm)
89-
cpu_set_reserved_ttbr0();
90-
else
91-
cpu_switch_mm(mm->pgd, mm);
92-
92+
cpu_set_reserved_ttbr0();
9393
flush_tlb_all();
94+
cpu_set_default_tcr_t0sz();
95+
96+
if (mm != &init_mm)
97+
cpu_switch_mm(mm->pgd, mm);
9498

9599
/*
96100
* Restore per-cpu offset before any kernel

0 commit comments

Comments
 (0)