Skip to content

Commit 9bae319

Browse files
joergroedelKAGA-KOKO
authored andcommitted
x86/ldt: Split out sanity check in map_ldt_struct()
This splits out the mapping sanity check and the actual mapping of the LDT to user-space from the map_ldt_struct() function in a way so that it is re-usable for PAE paging. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-36-git-send-email-joro@8bytes.org
1 parent 8195d86 commit 9bae319

File tree

1 file changed

+58
-24
lines changed

1 file changed

+58
-24
lines changed

arch/x86/kernel/ldt.c

Lines changed: 58 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,49 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
100100
return new_ldt;
101101
}
102102

103+
#ifdef CONFIG_PAGE_TABLE_ISOLATION
104+
105+
static void do_sanity_check(struct mm_struct *mm,
106+
bool had_kernel_mapping,
107+
bool had_user_mapping)
108+
{
109+
if (mm->context.ldt) {
110+
/*
111+
* We already had an LDT. The top-level entry should already
112+
* have been allocated and synchronized with the usermode
113+
* tables.
114+
*/
115+
WARN_ON(!had_kernel_mapping);
116+
if (static_cpu_has(X86_FEATURE_PTI))
117+
WARN_ON(!had_user_mapping);
118+
} else {
119+
/*
120+
* This is the first time we're mapping an LDT for this process.
121+
* Sync the pgd to the usermode tables.
122+
*/
123+
WARN_ON(had_kernel_mapping);
124+
if (static_cpu_has(X86_FEATURE_PTI))
125+
WARN_ON(had_user_mapping);
126+
}
127+
}
128+
129+
static void map_ldt_struct_to_user(struct mm_struct *mm)
130+
{
131+
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
132+
133+
if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
134+
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
135+
}
136+
137+
static void sanity_check_ldt_mapping(struct mm_struct *mm)
138+
{
139+
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
140+
bool had_kernel = (pgd->pgd != 0);
141+
bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
142+
143+
do_sanity_check(mm, had_kernel, had_user);
144+
}
145+
103146
/*
104147
* If PTI is enabled, this maps the LDT into the kernelmode and
105148
* usermode tables for the given mm.
@@ -115,9 +158,8 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
115158
static int
116159
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
117160
{
118-
#ifdef CONFIG_PAGE_TABLE_ISOLATION
119-
bool is_vmalloc, had_top_level_entry;
120161
unsigned long va;
162+
bool is_vmalloc;
121163
spinlock_t *ptl;
122164
pgd_t *pgd;
123165
int i;
@@ -131,13 +173,15 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
131173
*/
132174
WARN_ON(ldt->slot != -1);
133175

176+
/* Check if the current mappings are sane */
177+
sanity_check_ldt_mapping(mm);
178+
134179
/*
135180
* Did we already have the top level entry allocated? We can't
136181
* use pgd_none() for this because it doens't do anything on
137182
* 4-level page table kernels.
138183
*/
139184
pgd = pgd_offset(mm, LDT_BASE_ADDR);
140-
had_top_level_entry = (pgd->pgd != 0);
141185

142186
is_vmalloc = is_vmalloc_addr(ldt->entries);
143187

@@ -172,35 +216,25 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
172216
pte_unmap_unlock(ptep, ptl);
173217
}
174218

175-
if (mm->context.ldt) {
176-
/*
177-
* We already had an LDT. The top-level entry should already
178-
* have been allocated and synchronized with the usermode
179-
* tables.
180-
*/
181-
WARN_ON(!had_top_level_entry);
182-
if (static_cpu_has(X86_FEATURE_PTI))
183-
WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
184-
} else {
185-
/*
186-
* This is the first time we're mapping an LDT for this process.
187-
* Sync the pgd to the usermode tables.
188-
*/
189-
WARN_ON(had_top_level_entry);
190-
if (static_cpu_has(X86_FEATURE_PTI)) {
191-
WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
192-
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
193-
}
194-
}
219+
/* Propagate LDT mapping to the user page-table */
220+
map_ldt_struct_to_user(mm);
195221

196222
va = (unsigned long)ldt_slot_va(slot);
197223
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
198224

199225
ldt->slot = slot;
200-
#endif
201226
return 0;
202227
}
203228

229+
#else /* !CONFIG_PAGE_TABLE_ISOLATION */
230+
231+
static int
232+
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
233+
{
234+
return 0;
235+
}
236+
#endif /* CONFIG_PAGE_TABLE_ISOLATION */
237+
204238
static void free_ldt_pgtables(struct mm_struct *mm)
205239
{
206240
#ifdef CONFIG_PAGE_TABLE_ISOLATION

0 commit comments

Comments
 (0)