Skip to content

Commit b7ceaec

Browse files
amlutorkrcmar
authored andcommitted
x86/asm: Tidy up TSS limit code
In an earlier version of the patch ("x86/kvm/vmx: Defer TR reload after VM exit") that introduced TSS limit validity tracking, I confused which helper was which. On reflection, the names I chose sucked. Rename the helpers to make it more obvious what's going on and add some comments. While I'm at it, clear __tss_limit_invalid when force-reloading as well as when contitionally reloading, since any TR reload fixes the limit. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
1 parent e3736c3 commit b7ceaec

File tree

3 files changed

+21
-11
lines changed

3 files changed

+21
-11
lines changed

arch/x86/include/asm/desc.h

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void)
205205
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
206206
}
207207

208+
DECLARE_PER_CPU(bool, __tss_limit_invalid);
209+
208210
static inline void force_reload_TR(void)
209211
{
210212
struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
@@ -220,18 +222,20 @@ static inline void force_reload_TR(void)
220222
write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
221223

222224
load_TR_desc();
225+
this_cpu_write(__tss_limit_invalid, false);
223226
}
224227

225-
DECLARE_PER_CPU(bool, need_tr_refresh);
226-
227-
static inline void refresh_TR(void)
228+
/*
229+
* Call this if you need the TSS limit to be correct, which should be the case
230+
* if and only if you have TIF_IO_BITMAP set or you're switching to a task
231+
* with TIF_IO_BITMAP set.
232+
*/
233+
static inline void refresh_tss_limit(void)
228234
{
229235
DEBUG_LOCKS_WARN_ON(preemptible());
230236

231-
if (unlikely(this_cpu_read(need_tr_refresh))) {
237+
if (unlikely(this_cpu_read(__tss_limit_invalid)))
232238
force_reload_TR();
233-
this_cpu_write(need_tr_refresh, false);
234-
}
235239
}
236240

237241
/*
@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void)
250254
if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
251255
force_reload_TR();
252256
else
253-
this_cpu_write(need_tr_refresh, true);
257+
this_cpu_write(__tss_limit_invalid, true);
254258
}
255259

256260
static inline void native_load_gdt(const struct desc_ptr *dtr)

arch/x86/kernel/ioport.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
4747
t->io_bitmap_ptr = bitmap;
4848
set_thread_flag(TIF_IO_BITMAP);
4949

50+
/*
51+
* Now that we have an IO bitmap, we need our TSS limit to be
52+
* correct. It's fine if we are preempted after doing this:
53+
* with TIF_IO_BITMAP set, context switches will keep our TSS
54+
* limit correct.
55+
*/
5056
preempt_disable();
51-
refresh_TR();
57+
refresh_tss_limit();
5258
preempt_enable();
5359
}
5460

arch/x86/kernel/process.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
6565
};
6666
EXPORT_PER_CPU_SYMBOL(cpu_tss);
6767

68-
DEFINE_PER_CPU(bool, need_tr_refresh);
69-
EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh);
68+
DEFINE_PER_CPU(bool, __tss_limit_invalid);
69+
EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
7070

7171
/*
7272
* this gets called so that we can store lazy state into memory and copy the
@@ -218,7 +218,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
218218
* Make sure that the TSS limit is correct for the CPU
219219
* to notice the IO bitmap.
220220
*/
221-
refresh_TR();
221+
refresh_tss_limit();
222222
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
223223
/*
224224
* Clear any possible leftover bits:

0 commit comments

Comments
 (0)