|
12 | 12 |
|
13 | 13 | #define DISABLE_BRANCH_PROFILING
|
14 | 14 |
|
| 15 | +/* |
| 16 | + * Since we're dealing with identity mappings, physical and virtual |
| 17 | + * addresses are the same, so override these defines which are ultimately |
| 18 | + * used by the headers in misc.h. |
| 19 | + */ |
| 20 | +#define __pa(x) ((unsigned long)(x)) |
| 21 | +#define __va(x) ((void *)((unsigned long)(x))) |
| 22 | + |
| 23 | +/* |
| 24 | + * Special hack: we have to be careful, because no indirections are |
| 25 | + * allowed here, and paravirt_ops is a kind of one. As it will only run in |
| 26 | + * baremetal anyway, we just keep it from happening. (This list needs to |
| 27 | + * be extended when new paravirt and debugging variants are added.) |
| 28 | + */ |
| 29 | +#undef CONFIG_PARAVIRT |
| 30 | +#undef CONFIG_PARAVIRT_SPINLOCKS |
| 31 | + |
| 32 | +#include <linux/kernel.h> |
15 | 33 | #include <linux/mm.h>
|
16 | 34 | #include <linux/mem_encrypt.h>
|
17 | 35 |
|
@@ -73,116 +91,83 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
|
73 | 91 | memset(pgd_p, 0, pgd_size);
|
74 | 92 | }
|
75 | 93 |
|
76 |
| -static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) |
| 94 | +static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) |
77 | 95 | {
|
78 |
| - pgd_t *pgd_p; |
79 |
| - p4d_t *p4d_p; |
80 |
| - pud_t *pud_p; |
81 |
| - pmd_t *pmd_p; |
82 |
| - |
83 |
| - pgd_p = ppd->pgd + pgd_index(ppd->vaddr); |
84 |
| - if (native_pgd_val(*pgd_p)) { |
85 |
| - if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
86 |
| - p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); |
87 |
| - else |
88 |
| - pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); |
89 |
| - } else { |
90 |
| - pgd_t pgd; |
91 |
| - |
92 |
| - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
93 |
| - p4d_p = ppd->pgtable_area; |
94 |
| - memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); |
95 |
| - ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; |
96 |
| - |
97 |
| - pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); |
98 |
| - } else { |
99 |
| - pud_p = ppd->pgtable_area; |
100 |
| - memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); |
101 |
| - ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; |
102 |
| - |
103 |
| - pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); |
104 |
| - } |
105 |
| - native_set_pgd(pgd_p, pgd); |
| 96 | + pgd_t *pgd; |
| 97 | + p4d_t *p4d; |
| 98 | + pud_t *pud; |
| 99 | + pmd_t *pmd; |
| 100 | + |
| 101 | + pgd = ppd->pgd + pgd_index(ppd->vaddr); |
| 102 | + if (pgd_none(*pgd)) { |
| 103 | + p4d = ppd->pgtable_area; |
| 104 | + memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D); |
| 105 | + ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D; |
| 106 | + set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d))); |
106 | 107 | }
|
107 | 108 |
|
108 |
| - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
109 |
| - p4d_p += p4d_index(ppd->vaddr); |
110 |
| - if (native_p4d_val(*p4d_p)) { |
111 |
| - pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); |
112 |
| - } else { |
113 |
| - p4d_t p4d; |
114 |
| - |
115 |
| - pud_p = ppd->pgtable_area; |
116 |
| - memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); |
117 |
| - ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; |
118 |
| - |
119 |
| - p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); |
120 |
| - native_set_p4d(p4d_p, p4d); |
121 |
| - } |
| 109 | + p4d = p4d_offset(pgd, ppd->vaddr); |
| 110 | + if (p4d_none(*p4d)) { |
| 111 | + pud = ppd->pgtable_area; |
| 112 | + memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD); |
| 113 | + ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD; |
| 114 | + set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud))); |
122 | 115 | }
|
123 | 116 |
|
124 |
| - pud_p += pud_index(ppd->vaddr); |
125 |
| - if (native_pud_val(*pud_p)) { |
126 |
| - if (native_pud_val(*pud_p) & _PAGE_PSE) |
127 |
| - return NULL; |
128 |
| - |
129 |
| - pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); |
130 |
| - } else { |
131 |
| - pud_t pud; |
132 |
| - |
133 |
| - pmd_p = ppd->pgtable_area; |
134 |
| - memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); |
135 |
| - ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; |
136 |
| - |
137 |
| - pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); |
138 |
| - native_set_pud(pud_p, pud); |
| 117 | + pud = pud_offset(p4d, ppd->vaddr); |
| 118 | + if (pud_none(*pud)) { |
| 119 | + pmd = ppd->pgtable_area; |
| 120 | + memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD); |
| 121 | + ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD; |
| 122 | + set_pud(pud, __pud(PUD_FLAGS | __pa(pmd))); |
139 | 123 | }
|
140 | 124 |
|
141 |
| - return pmd_p; |
| 125 | + if (pud_large(*pud)) |
| 126 | + return NULL; |
| 127 | + |
| 128 | + return pud; |
142 | 129 | }
|
143 | 130 |
|
144 | 131 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
|
145 | 132 | {
|
146 |
| - pmd_t *pmd_p; |
| 133 | + pud_t *pud; |
| 134 | + pmd_t *pmd; |
147 | 135 |
|
148 |
| - pmd_p = sme_prepare_pgd(ppd); |
149 |
| - if (!pmd_p) |
| 136 | + pud = sme_prepare_pgd(ppd); |
| 137 | + if (!pud) |
150 | 138 | return;
|
151 | 139 |
|
152 |
| - pmd_p += pmd_index(ppd->vaddr); |
153 |
| - if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) |
154 |
| - native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); |
| 140 | + pmd = pmd_offset(pud, ppd->vaddr); |
| 141 | + if (pmd_large(*pmd)) |
| 142 | + return; |
| 143 | + |
| 144 | + set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); |
155 | 145 | }
|
156 | 146 |
|
157 | 147 | static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
158 | 148 | {
|
159 |
| - pmd_t *pmd_p; |
160 |
| - pte_t *pte_p; |
| 149 | + pud_t *pud; |
| 150 | + pmd_t *pmd; |
| 151 | + pte_t *pte; |
161 | 152 |
|
162 |
| - pmd_p = sme_prepare_pgd(ppd); |
163 |
| - if (!pmd_p) |
| 153 | + pud = sme_prepare_pgd(ppd); |
| 154 | + if (!pud) |
164 | 155 | return;
|
165 | 156 |
|
166 |
| - pmd_p += pmd_index(ppd->vaddr); |
167 |
| - if (native_pmd_val(*pmd_p)) { |
168 |
| - if (native_pmd_val(*pmd_p) & _PAGE_PSE) |
169 |
| - return; |
170 |
| - |
171 |
| - pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); |
172 |
| - } else { |
173 |
| - pmd_t pmd; |
174 |
| - |
175 |
| - pte_p = ppd->pgtable_area; |
176 |
| - memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); |
177 |
| - ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; |
178 |
| - |
179 |
| - pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); |
180 |
| - native_set_pmd(pmd_p, pmd); |
| 157 | + pmd = pmd_offset(pud, ppd->vaddr); |
| 158 | + if (pmd_none(*pmd)) { |
| 159 | + pte = ppd->pgtable_area; |
| 160 | + memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); |
| 161 | + ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; |
| 162 | + set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); |
181 | 163 | }
|
182 | 164 |
|
183 |
| - pte_p += pte_index(ppd->vaddr); |
184 |
| - if (!native_pte_val(*pte_p)) |
185 |
| - native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); |
| 165 | + if (pmd_large(*pmd)) |
| 166 | + return; |
| 167 | + |
| 168 | + pte = pte_offset_map(pmd, ppd->vaddr); |
| 169 | + if (pte_none(*pte)) |
| 170 | + set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); |
186 | 171 | }
|
187 | 172 |
|
188 | 173 | static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
|
|
0 commit comments