Skip to content

Commit 9581d44

Browse files
avikivitymatosatti
authored andcommitted
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
1 parent 2b666ca commit 9581d44

File tree

3 files changed

+19
-44
lines changed

3 files changed

+19
-44
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -652,37 +652,13 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
652652
return (struct kvm_mmu_page *)page_private(page);
653653
}
654654

655-
static inline u16 kvm_read_fs(void)
656-
{
657-
u16 seg;
658-
asm("mov %%fs, %0" : "=g"(seg));
659-
return seg;
660-
}
661-
662-
static inline u16 kvm_read_gs(void)
663-
{
664-
u16 seg;
665-
asm("mov %%gs, %0" : "=g"(seg));
666-
return seg;
667-
}
668-
669655
static inline u16 kvm_read_ldt(void)
670656
{
671657
u16 ldt;
672658
asm("sldt %0" : "=g"(ldt));
673659
return ldt;
674660
}
675661

676-
static inline void kvm_load_fs(u16 sel)
677-
{
678-
asm("mov %0, %%fs" : : "rm"(sel));
679-
}
680-
681-
static inline void kvm_load_gs(u16 sel)
682-
{
683-
asm("mov %0, %%gs" : : "rm"(sel));
684-
}
685-
686662
static inline void kvm_load_ldt(u16 sel)
687663
{
688664
asm("lldt %0" : : "rm"(sel));

arch/x86/kvm/svm.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
31633163
sync_lapic_to_cr8(vcpu);
31643164

31653165
save_host_msrs(vcpu);
3166-
fs_selector = kvm_read_fs();
3167-
gs_selector = kvm_read_gs();
3166+
savesegment(fs, fs_selector);
3167+
savesegment(gs, gs_selector);
31683168
ldt_selector = kvm_read_ldt();
31693169
svm->vmcb->save.cr2 = vcpu->arch.cr2;
31703170
/* required for live migration with NPT */
@@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
32513251
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
32523252
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
32533253

3254-
kvm_load_fs(fs_selector);
3255-
kvm_load_gs(gs_selector);
3256-
kvm_load_ldt(ldt_selector);
32573254
load_host_msrs(vcpu);
3255+
loadsegment(fs, fs_selector);
3256+
#ifdef CONFIG_X86_64
3257+
load_gs_index(gs_selector);
3258+
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
3259+
#else
3260+
loadsegment(gs, gs_selector);
3261+
#endif
3262+
kvm_load_ldt(ldt_selector);
32583263

32593264
reload_tss(vcpu);
32603265

arch/x86/kvm/vmx.c

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -803,15 +803,15 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
803803
*/
804804
vmx->host_state.ldt_sel = kvm_read_ldt();
805805
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
806-
vmx->host_state.fs_sel = kvm_read_fs();
806+
savesegment(fs, vmx->host_state.fs_sel);
807807
if (!(vmx->host_state.fs_sel & 7)) {
808808
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
809809
vmx->host_state.fs_reload_needed = 0;
810810
} else {
811811
vmcs_write16(HOST_FS_SELECTOR, 0);
812812
vmx->host_state.fs_reload_needed = 1;
813813
}
814-
vmx->host_state.gs_sel = kvm_read_gs();
814+
savesegment(gs, vmx->host_state.gs_sel);
815815
if (!(vmx->host_state.gs_sel & 7))
816816
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
817817
else {
@@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
841841

842842
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
843843
{
844-
unsigned long flags;
845-
846844
if (!vmx->host_state.loaded)
847845
return;
848846

849847
++vmx->vcpu.stat.host_state_reload;
850848
vmx->host_state.loaded = 0;
851849
if (vmx->host_state.fs_reload_needed)
852-
kvm_load_fs(vmx->host_state.fs_sel);
850+
loadsegment(fs, vmx->host_state.fs_sel);
853851
if (vmx->host_state.gs_ldt_reload_needed) {
854852
kvm_load_ldt(vmx->host_state.ldt_sel);
855-
/*
856-
* If we have to reload gs, we must take care to
857-
* preserve our gs base.
858-
*/
859-
local_irq_save(flags);
860-
kvm_load_gs(vmx->host_state.gs_sel);
861853
#ifdef CONFIG_X86_64
862-
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
854+
load_gs_index(vmx->host_state.gs_sel);
855+
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
856+
#else
857+
loadsegment(gs, vmx->host_state.gs_sel);
863858
#endif
864-
local_irq_restore(flags);
865859
}
866860
reload_tss();
867861
#ifdef CONFIG_X86_64
@@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
25892583
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
25902584
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
25912585
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2592-
vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
2593-
vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
2586+
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
2587+
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
25942588
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
25952589
#ifdef CONFIG_X86_64
25962590
rdmsrl(MSR_FS_BASE, a);

0 commit comments

Comments
 (0)