Skip to content

Commit 7dcd575

Browse files
vittyvkbonzini
authored andcommitted
x86/kvm/mmu: check if tdp/shadow MMU reconfiguration is needed
MMU reconfiguration in init_kvm_tdp_mmu()/kvm_init_shadow_mmu() can be avoided if the source data used to configure it didn't change; enhance MMU extended role with the required fields and consolidate common code in kvm_calc_mmu_role_common(). Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent a336282 commit 7dcd575

File tree

2 files changed

+63
-34
lines changed

2 files changed

+63
-34
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -293,10 +293,12 @@ union kvm_mmu_extended_role {
293293
struct {
294294
unsigned int valid:1;
295295
unsigned int execonly:1;
296+
unsigned int cr0_pg:1;
296297
unsigned int cr4_pse:1;
297298
unsigned int cr4_pke:1;
298299
unsigned int cr4_smap:1;
299300
unsigned int cr4_smep:1;
301+
unsigned int cr4_la57:1;
300302
};
301303
};
302304

arch/x86/kvm/mmu.c

Lines changed: 61 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -4728,37 +4728,61 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
47284728
{
47294729
union kvm_mmu_extended_role ext = {0};
47304730

4731+
ext.cr0_pg = !!is_paging(vcpu);
47314732
ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
47324733
ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
47334734
ext.cr4_pse = !!is_pse(vcpu);
47344735
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4736+
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
47354737

47364738
ext.valid = 1;
47374739

47384740
return ext;
47394741
}
47404742

4741-
static union kvm_mmu_page_role
4742-
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
4743+
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4744+
bool base_only)
4745+
{
4746+
union kvm_mmu_role role = {0};
4747+
4748+
role.base.access = ACC_ALL;
4749+
role.base.nxe = !!is_nx(vcpu);
4750+
role.base.cr4_pae = !!is_pae(vcpu);
4751+
role.base.cr0_wp = is_write_protection(vcpu);
4752+
role.base.smm = is_smm(vcpu);
4753+
role.base.guest_mode = is_guest_mode(vcpu);
4754+
4755+
if (base_only)
4756+
return role;
4757+
4758+
role.ext = kvm_calc_mmu_role_ext(vcpu);
4759+
4760+
return role;
4761+
}
4762+
4763+
static union kvm_mmu_role
4764+
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
47434765
{
4744-
union kvm_mmu_page_role role = {0};
4766+
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
47454767

4746-
role.guest_mode = is_guest_mode(vcpu);
4747-
role.smm = is_smm(vcpu);
4748-
role.ad_disabled = (shadow_accessed_mask == 0);
4749-
role.level = kvm_x86_ops->get_tdp_level(vcpu);
4750-
role.direct = true;
4751-
role.access = ACC_ALL;
4768+
role.base.ad_disabled = (shadow_accessed_mask == 0);
4769+
role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
4770+
role.base.direct = true;
47524771

47534772
return role;
47544773
}
47554774

47564775
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
47574776
{
47584777
struct kvm_mmu *context = vcpu->arch.mmu;
4778+
union kvm_mmu_role new_role =
4779+
kvm_calc_tdp_mmu_root_page_role(vcpu, false);
47594780

4760-
context->mmu_role.base.word = mmu_base_role_mask.word &
4761-
kvm_calc_tdp_mmu_root_page_role(vcpu).word;
4781+
new_role.base.word &= mmu_base_role_mask.word;
4782+
if (new_role.as_u64 == context->mmu_role.as_u64)
4783+
return;
4784+
4785+
context->mmu_role.as_u64 = new_role.as_u64;
47624786
context->page_fault = tdp_page_fault;
47634787
context->sync_page = nonpaging_sync_page;
47644788
context->invlpg = nonpaging_invlpg;
@@ -4798,36 +4822,36 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
47984822
reset_tdp_shadow_zero_bits_mask(vcpu, context);
47994823
}
48004824

4801-
static union kvm_mmu_page_role
4802-
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
4803-
{
4804-
union kvm_mmu_page_role role = {0};
4805-
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4806-
bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4807-
4808-
role.nxe = is_nx(vcpu);
4809-
role.cr4_pae = !!is_pae(vcpu);
4810-
role.cr0_wp = is_write_protection(vcpu);
4811-
role.smep_andnot_wp = smep && !is_write_protection(vcpu);
4812-
role.smap_andnot_wp = smap && !is_write_protection(vcpu);
4813-
role.guest_mode = is_guest_mode(vcpu);
4814-
role.smm = is_smm(vcpu);
4815-
role.direct = !is_paging(vcpu);
4816-
role.access = ACC_ALL;
4825+
static union kvm_mmu_role
4826+
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4827+
{
4828+
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4829+
4830+
role.base.smep_andnot_wp = role.ext.cr4_smep &&
4831+
!is_write_protection(vcpu);
4832+
role.base.smap_andnot_wp = role.ext.cr4_smap &&
4833+
!is_write_protection(vcpu);
4834+
role.base.direct = !is_paging(vcpu);
48174835

48184836
if (!is_long_mode(vcpu))
4819-
role.level = PT32E_ROOT_LEVEL;
4837+
role.base.level = PT32E_ROOT_LEVEL;
48204838
else if (is_la57_mode(vcpu))
4821-
role.level = PT64_ROOT_5LEVEL;
4839+
role.base.level = PT64_ROOT_5LEVEL;
48224840
else
4823-
role.level = PT64_ROOT_4LEVEL;
4841+
role.base.level = PT64_ROOT_4LEVEL;
48244842

48254843
return role;
48264844
}
48274845

48284846
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
48294847
{
48304848
struct kvm_mmu *context = vcpu->arch.mmu;
4849+
union kvm_mmu_role new_role =
4850+
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4851+
4852+
new_role.base.word &= mmu_base_role_mask.word;
4853+
if (new_role.as_u64 == context->mmu_role.as_u64)
4854+
return;
48314855

48324856
if (!is_paging(vcpu))
48334857
nonpaging_init_context(vcpu, context);
@@ -4838,8 +4862,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
48384862
else
48394863
paging32_init_context(vcpu, context);
48404864

4841-
context->mmu_role.base.word = mmu_base_role_mask.word &
4842-
kvm_calc_shadow_mmu_root_page_role(vcpu).word;
4865+
context->mmu_role.as_u64 = new_role.as_u64;
48434866
reset_shadow_zero_bits_mask(vcpu, context);
48444867
}
48454868
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@@ -4977,10 +5000,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
49775000
static union kvm_mmu_page_role
49785001
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
49795002
{
5003+
union kvm_mmu_role role;
5004+
49805005
if (tdp_enabled)
4981-
return kvm_calc_tdp_mmu_root_page_role(vcpu);
5006+
role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
49825007
else
4983-
return kvm_calc_shadow_mmu_root_page_role(vcpu);
5008+
role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
5009+
5010+
return role.base;
49845011
}
49855012

49865013
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)