Skip to content

Commit c226fab

Browse files
committed
x86/power/64: Do not refer to __PAGE_OFFSET from assembly code
When CONFIG_RANDOMIZE_MEMORY is set on x86-64, __PAGE_OFFSET becomes a variable and using it as a symbol in the image memory restoration assembly code under core_restore_code is not correct any more. To avoid that problem, modify set_up_temporary_mappings() to compute the physical address of the temporary page tables and store it in temp_level4_pgt, so that the value of that variable is ready to be written into CR3. Then, the assembly code doesn't have to worry about converting that value into a physical address and things work regardless of whether or not CONFIG_RANDOMIZE_MEMORY is set. Reported-and-tested-by: Thomas Garnier <thgarnie@google.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
1 parent 4ce827b commit c226fab

File tree

2 files changed

+9
-11
lines changed

2 files changed

+9
-11
lines changed

arch/x86/power/hibernate_64.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,11 @@ unsigned long jump_address_phys;
3737
*/
3838
unsigned long restore_cr3 __visible;
3939

40-
pgd_t *temp_level4_pgt __visible;
40+
unsigned long temp_level4_pgt __visible;
4141

4242
unsigned long relocated_restore_code __visible;
4343

44-
static int set_up_temporary_text_mapping(void)
44+
static int set_up_temporary_text_mapping(pgd_t *pgd)
4545
{
4646
pmd_t *pmd;
4747
pud_t *pud;
@@ -71,7 +71,7 @@ static int set_up_temporary_text_mapping(void)
7171
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
7272
set_pud(pud + pud_index(restore_jump_address),
7373
__pud(__pa(pmd) | _KERNPG_TABLE));
74-
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
74+
set_pgd(pgd + pgd_index(restore_jump_address),
7575
__pgd(__pa(pud) | _KERNPG_TABLE));
7676

7777
return 0;
@@ -90,15 +90,16 @@ static int set_up_temporary_mappings(void)
9090
.kernel_mapping = true,
9191
};
9292
unsigned long mstart, mend;
93+
pgd_t *pgd;
9394
int result;
9495
int i;
9596

96-
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
97-
if (!temp_level4_pgt)
97+
pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
98+
if (!pgd)
9899
return -ENOMEM;
99100

100101
/* Prepare a temporary mapping for the kernel text */
101-
result = set_up_temporary_text_mapping();
102+
result = set_up_temporary_text_mapping(pgd);
102103
if (result)
103104
return result;
104105

@@ -107,13 +108,12 @@ static int set_up_temporary_mappings(void)
107108
mstart = pfn_mapped[i].start << PAGE_SHIFT;
108109
mend = pfn_mapped[i].end << PAGE_SHIFT;
109110

110-
result = kernel_ident_mapping_init(&info, temp_level4_pgt,
111-
mstart, mend);
112-
111+
result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
113112
if (result)
114113
return result;
115114
}
116115

116+
temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
117117
return 0;
118118
}
119119

arch/x86/power/hibernate_asm_64.S

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,6 @@ ENTRY(restore_image)
7272
/* code below has been relocated to a safe page */
7373
ENTRY(core_restore_code)
7474
/* switch to temporary page tables */
75-
movq $__PAGE_OFFSET, %rcx
76-
subq %rcx, %rax
7775
movq %rax, %cr3
7876
/* flush TLB */
7977
movq %rbx, %rcx

0 commit comments

Comments
 (0)