Skip to content

Commit 2077be6

Browse files
labbottwildea01
authored andcommitted
arm64: Use __pa_symbol for kernel symbols
__pa_symbol is technically the marcro that should be used for kernel symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which will do bounds checking. Reviewed-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 869dcfd commit 2077be6

File tree

16 files changed

+76
-57
lines changed

16 files changed

+76
-57
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
* If the page is in the bottom half, we have to use the top half. If
4848
* the page is in the top half, we have to use the bottom half:
4949
*
50-
* T = __virt_to_phys(__hyp_idmap_text_start)
50+
* T = __pa_symbol(__hyp_idmap_text_start)
5151
* if (T & BIT(VA_BITS - 1))
5252
* HYP_VA_MIN = 0 //idmap in upper half
5353
* else
@@ -271,7 +271,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
271271
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
272272
}
273273

274-
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
274+
#define kvm_virt_to_phys(x) __pa_symbol(x)
275275

276276
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
277277
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);

arch/arm64/include/asm/memory.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ static inline void *phys_to_virt(phys_addr_t x)
210210
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
211211
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
212212
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
213+
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
213214

214215
/*
215216
* virt_to_page(k) convert a _valid_ virtual address to struct page *

arch/arm64/include/asm/mmu_context.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
4545
*/
4646
static inline void cpu_set_reserved_ttbr0(void)
4747
{
48-
unsigned long ttbr = virt_to_phys(empty_zero_page);
48+
unsigned long ttbr = __pa_symbol(empty_zero_page);
4949

5050
write_sysreg(ttbr, ttbr0_el1);
5151
isb();
@@ -114,7 +114,7 @@ static inline void cpu_install_idmap(void)
114114
local_flush_tlb_all();
115115
cpu_set_idmap_tcr_t0sz();
116116

117-
cpu_switch_mm(idmap_pg_dir, &init_mm);
117+
cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
118118
}
119119

120120
/*
@@ -129,7 +129,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd)
129129

130130
phys_addr_t pgd_phys = virt_to_phys(pgd);
131131

132-
replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
132+
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
133133

134134
cpu_install_idmap();
135135
replace_phys(pgd_phys);

arch/arm64/include/asm/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
5252
* for zero-mapped memory areas etc..
5353
*/
5454
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
55-
#define ZERO_PAGE(vaddr) pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
55+
#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
5656

5757
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
5858

arch/arm64/kernel/acpi_parking_protocol.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
* along with this program. If not, see <http://www.gnu.org/licenses/>.
1818
*/
1919
#include <linux/acpi.h>
20+
#include <linux/mm.h>
2021
#include <linux/types.h>
2122

2223
#include <asm/cpu_ops.h>
@@ -109,7 +110,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
109110
* that read this address need to convert this address to the
110111
* Boot-Loader's endianness before jumping.
111112
*/
112-
writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
113+
writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
113114
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
114115

115116
arch_send_wakeup_ipi_mask(cpumask_of(cpu));

arch/arm64/kernel/cpu-reset.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
2424

2525
el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
2626
is_hyp_mode_available();
27-
restart = (void *)virt_to_phys(__cpu_soft_restart);
27+
restart = (void *)__pa_symbol(__cpu_soft_restart);
2828

2929
cpu_install_idmap();
3030
restart(el2_switch, entry, arg0, arg1, arg2);

arch/arm64/kernel/cpufeature.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/sort.h>
2424
#include <linux/stop_machine.h>
2525
#include <linux/types.h>
26+
#include <linux/mm.h>
2627
#include <asm/cpu.h>
2728
#include <asm/cpufeature.h>
2829
#include <asm/cpu_ops.h>
@@ -746,7 +747,7 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
746747
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
747748
int __unused)
748749
{
749-
phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
750+
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
750751

751752
/*
752753
* Activate the lower HYP offset only if:

arch/arm64/kernel/hibernate.c

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,6 @@
5050
*/
5151
extern int in_suspend;
5252

53-
/* Find a symbols alias in the linear map */
54-
#define LMADDR(x) phys_to_virt(virt_to_phys(x))
55-
5653
/* Do we need to reset el2? */
5754
#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
5855

@@ -102,8 +99,8 @@ static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
10299

103100
int pfn_is_nosave(unsigned long pfn)
104101
{
105-
unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
106-
unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
102+
unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
103+
unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
107104

108105
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
109106
}
@@ -125,12 +122,12 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
125122
return -EOVERFLOW;
126123

127124
arch_hdr_invariants(&hdr->invariants);
128-
hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
125+
hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
129126
hdr->reenter_kernel = _cpu_resume;
130127

131128
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
132129
if (el2_reset_needed())
133-
hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
130+
hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
134131
else
135132
hdr->__hyp_stub_vectors = 0;
136133

@@ -460,7 +457,6 @@ int swsusp_arch_resume(void)
460457
void *zero_page;
461458
size_t exit_size;
462459
pgd_t *tmp_pg_dir;
463-
void *lm_restore_pblist;
464460
phys_addr_t phys_hibernate_exit;
465461
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
466462
void *, phys_addr_t, phys_addr_t);
@@ -480,12 +476,6 @@ int swsusp_arch_resume(void)
480476
if (rc)
481477
goto out;
482478

483-
/*
484-
* Since we only copied the linear map, we need to find restore_pblist's
485-
* linear map address.
486-
*/
487-
lm_restore_pblist = LMADDR(restore_pblist);
488-
489479
/*
490480
* We need a zero page that is zero before & after resume in order to
491481
* to break before make on the ttbr1 page tables.
@@ -537,7 +527,7 @@ int swsusp_arch_resume(void)
537527
}
538528

539529
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
540-
resume_hdr.reenter_kernel, lm_restore_pblist,
530+
resume_hdr.reenter_kernel, restore_pblist,
541531
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
542532

543533
out:

arch/arm64/kernel/insn.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)
9696
if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
9797
page = vmalloc_to_page(addr);
9898
else if (!module)
99-
page = pfn_to_page(PHYS_PFN(__pa(addr)));
99+
page = phys_to_page(__pa_symbol(addr));
100100
else
101101
return addr;
102102

arch/arm64/kernel/psci.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <linux/smp.h>
2121
#include <linux/delay.h>
2222
#include <linux/psci.h>
23+
#include <linux/mm.h>
2324

2425
#include <uapi/linux/psci.h>
2526

@@ -45,7 +46,7 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
4546

4647
static int cpu_psci_cpu_boot(unsigned int cpu)
4748
{
48-
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
49+
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
4950
if (err)
5051
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
5152

arch/arm64/kernel/setup.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
#include <linux/of_fdt.h>
4343
#include <linux/efi.h>
4444
#include <linux/psci.h>
45+
#include <linux/mm.h>
4546

4647
#include <asm/acpi.h>
4748
#include <asm/fixmap.h>
@@ -199,10 +200,10 @@ static void __init request_standard_resources(void)
199200
struct memblock_region *region;
200201
struct resource *res;
201202

202-
kernel_code.start = virt_to_phys(_text);
203-
kernel_code.end = virt_to_phys(__init_begin - 1);
204-
kernel_data.start = virt_to_phys(_sdata);
205-
kernel_data.end = virt_to_phys(_end - 1);
203+
kernel_code.start = __pa_symbol(_text);
204+
kernel_code.end = __pa_symbol(__init_begin - 1);
205+
kernel_data.start = __pa_symbol(_sdata);
206+
kernel_data.end = __pa_symbol(_end - 1);
206207

207208
for_each_memblock(memory, region) {
208209
res = alloc_bootmem_low(sizeof(*res));

arch/arm64/kernel/smp_spin_table.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <linux/of.h>
2222
#include <linux/smp.h>
2323
#include <linux/types.h>
24+
#include <linux/mm.h>
2425

2526
#include <asm/cacheflush.h>
2627
#include <asm/cpu_ops.h>
@@ -98,7 +99,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
9899
* boot-loader's endianess before jumping. This is mandated by
99100
* the boot protocol.
100101
*/
101-
writeq_relaxed(__pa(secondary_holding_pen), release_addr);
102+
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
102103
__flush_dcache_area((__force void *)release_addr,
103104
sizeof(*release_addr));
104105

arch/arm64/kernel/vdso.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ static int __init vdso_init(void)
123123
{
124124
int i;
125125
struct page **vdso_pagelist;
126+
unsigned long pfn;
126127

127128
if (memcmp(&vdso_start, "\177ELF", 4)) {
128129
pr_err("vDSO is not a valid ELF object!\n");
@@ -140,11 +141,14 @@ static int __init vdso_init(void)
140141
return -ENOMEM;
141142

142143
/* Grab the vDSO data page. */
143-
vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
144+
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
145+
144146

145147
/* Grab the vDSO code pages. */
148+
pfn = sym_to_pfn(&vdso_start);
149+
146150
for (i = 0; i < vdso_pages; i++)
147-
vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
151+
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
148152

149153
vdso_spec[0].pages = &vdso_pagelist[0];
150154
vdso_spec[1].pages = &vdso_pagelist[1];

arch/arm64/mm/init.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include <linux/efi.h>
3737
#include <linux/swiotlb.h>
3838
#include <linux/vmalloc.h>
39+
#include <linux/mm.h>
3940

4041
#include <asm/boot.h>
4142
#include <asm/fixmap.h>
@@ -209,8 +210,8 @@ void __init arm64_memblock_init(void)
209210
* linear mapping. Take care not to clip the kernel which may be
210211
* high in memory.
211212
*/
212-
memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
213-
ULLONG_MAX);
213+
memblock_remove(max_t(u64, memstart_addr + linear_region_size,
214+
__pa_symbol(_end)), ULLONG_MAX);
214215
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
215216
/* ensure that memstart_addr remains sufficiently aligned */
216217
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
@@ -225,7 +226,7 @@ void __init arm64_memblock_init(void)
225226
*/
226227
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
227228
memblock_mem_limit_remove_map(memory_limit);
228-
memblock_add(__pa(_text), (u64)(_end - _text));
229+
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
229230
}
230231

231232
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
@@ -278,7 +279,7 @@ void __init arm64_memblock_init(void)
278279
* Register the kernel text, kernel data, initrd, and initial
279280
* pagetables with memblock.
280281
*/
281-
memblock_reserve(__pa(_text), _end - _text);
282+
memblock_reserve(__pa_symbol(_text), _end - _text);
282283
#ifdef CONFIG_BLK_DEV_INITRD
283284
if (initrd_start) {
284285
memblock_reserve(initrd_start, initrd_end - initrd_start);
@@ -484,7 +485,8 @@ void __init mem_init(void)
484485

485486
void free_initmem(void)
486487
{
487-
free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
488+
free_reserved_area(lm_alias(__init_begin),
489+
lm_alias(__init_end),
488490
0, "unused kernel");
489491
/*
490492
* Unmap the __init region but leave the VM area in place. This

arch/arm64/mm/kasan_init.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/kernel.h>
1616
#include <linux/memblock.h>
1717
#include <linux/start_kernel.h>
18+
#include <linux/mm.h>
1819

1920
#include <asm/mmu_context.h>
2021
#include <asm/kernel-pgtable.h>
@@ -26,19 +27,26 @@
2627

2728
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
2829

30+
/*
31+
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
32+
* directly on kernel symbols (bm_p*d). All the early functions are called too
33+
* early to use lm_alias so __p*d_populate functions must be used to populate
34+
* with the physical address from __pa_symbol.
35+
*/
36+
2937
static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
3038
unsigned long end)
3139
{
3240
pte_t *pte;
3341
unsigned long next;
3442

3543
if (pmd_none(*pmd))
36-
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
44+
__pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
3745

3846
pte = pte_offset_kimg(pmd, addr);
3947
do {
4048
next = addr + PAGE_SIZE;
41-
set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
49+
set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
4250
PAGE_KERNEL));
4351
} while (pte++, addr = next, addr != end && pte_none(*pte));
4452
}
@@ -51,7 +59,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
5159
unsigned long next;
5260

5361
if (pud_none(*pud))
54-
pud_populate(&init_mm, pud, kasan_zero_pmd);
62+
__pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
5563

5664
pmd = pmd_offset_kimg(pud, addr);
5765
do {
@@ -68,7 +76,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
6876
unsigned long next;
6977

7078
if (pgd_none(*pgd))
71-
pgd_populate(&init_mm, pgd, kasan_zero_pud);
79+
__pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
7280

7381
pud = pud_offset_kimg(pgd, addr);
7482
do {
@@ -148,7 +156,7 @@ void __init kasan_init(void)
148156
*/
149157
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
150158
dsb(ishst);
151-
cpu_replace_ttbr1(tmp_pg_dir);
159+
cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
152160

153161
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
154162

@@ -199,10 +207,10 @@ void __init kasan_init(void)
199207
*/
200208
for (i = 0; i < PTRS_PER_PTE; i++)
201209
set_pte(&kasan_zero_pte[i],
202-
pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
210+
pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
203211

204212
memset(kasan_zero_page, 0, PAGE_SIZE);
205-
cpu_replace_ttbr1(swapper_pg_dir);
213+
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
206214

207215
/* At this point kasan is fully initialized. Enable error messages */
208216
init_task.kasan_depth = 0;

0 commit comments

Comments
 (0)