@@ -4728,37 +4728,61 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4728
4728
{
4729
4729
union kvm_mmu_extended_role ext = {0 };
4730
4730
4731
+ ext .cr0_pg = !!is_paging (vcpu );
4731
4732
ext .cr4_smep = !!kvm_read_cr4_bits (vcpu , X86_CR4_SMEP );
4732
4733
ext .cr4_smap = !!kvm_read_cr4_bits (vcpu , X86_CR4_SMAP );
4733
4734
ext .cr4_pse = !!is_pse (vcpu );
4734
4735
ext .cr4_pke = !!kvm_read_cr4_bits (vcpu , X86_CR4_PKE );
4736
+ ext .cr4_la57 = !!kvm_read_cr4_bits (vcpu , X86_CR4_LA57 );
4735
4737
4736
4738
ext .valid = 1 ;
4737
4739
4738
4740
return ext ;
4739
4741
}
4740
4742
4741
- static union kvm_mmu_page_role
4742
- kvm_calc_tdp_mmu_root_page_role (struct kvm_vcpu * vcpu )
4743
+ static union kvm_mmu_role kvm_calc_mmu_role_common (struct kvm_vcpu * vcpu ,
4744
+ bool base_only )
4745
+ {
4746
+ union kvm_mmu_role role = {0 };
4747
+
4748
+ role .base .access = ACC_ALL ;
4749
+ role .base .nxe = !!is_nx (vcpu );
4750
+ role .base .cr4_pae = !!is_pae (vcpu );
4751
+ role .base .cr0_wp = is_write_protection (vcpu );
4752
+ role .base .smm = is_smm (vcpu );
4753
+ role .base .guest_mode = is_guest_mode (vcpu );
4754
+
4755
+ if (base_only )
4756
+ return role ;
4757
+
4758
+ role .ext = kvm_calc_mmu_role_ext (vcpu );
4759
+
4760
+ return role ;
4761
+ }
4762
+
4763
+ static union kvm_mmu_role
4764
+ kvm_calc_tdp_mmu_root_page_role (struct kvm_vcpu * vcpu , bool base_only )
4743
4765
{
4744
- union kvm_mmu_page_role role = { 0 } ;
4766
+ union kvm_mmu_role role = kvm_calc_mmu_role_common ( vcpu , base_only ) ;
4745
4767
4746
- role .guest_mode = is_guest_mode (vcpu );
4747
- role .smm = is_smm (vcpu );
4748
- role .ad_disabled = (shadow_accessed_mask == 0 );
4749
- role .level = kvm_x86_ops -> get_tdp_level (vcpu );
4750
- role .direct = true;
4751
- role .access = ACC_ALL ;
4768
+ role .base .ad_disabled = (shadow_accessed_mask == 0 );
4769
+ role .base .level = kvm_x86_ops -> get_tdp_level (vcpu );
4770
+ role .base .direct = true;
4752
4771
4753
4772
return role ;
4754
4773
}
4755
4774
4756
4775
static void init_kvm_tdp_mmu (struct kvm_vcpu * vcpu )
4757
4776
{
4758
4777
struct kvm_mmu * context = vcpu -> arch .mmu ;
4778
+ union kvm_mmu_role new_role =
4779
+ kvm_calc_tdp_mmu_root_page_role (vcpu , false);
4759
4780
4760
- context -> mmu_role .base .word = mmu_base_role_mask .word &
4761
- kvm_calc_tdp_mmu_root_page_role (vcpu ).word ;
4781
+ new_role .base .word &= mmu_base_role_mask .word ;
4782
+ if (new_role .as_u64 == context -> mmu_role .as_u64 )
4783
+ return ;
4784
+
4785
+ context -> mmu_role .as_u64 = new_role .as_u64 ;
4762
4786
context -> page_fault = tdp_page_fault ;
4763
4787
context -> sync_page = nonpaging_sync_page ;
4764
4788
context -> invlpg = nonpaging_invlpg ;
@@ -4798,36 +4822,36 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4798
4822
reset_tdp_shadow_zero_bits_mask (vcpu , context );
4799
4823
}
4800
4824
4801
- static union kvm_mmu_page_role
4802
- kvm_calc_shadow_mmu_root_page_role (struct kvm_vcpu * vcpu )
4803
- {
4804
- union kvm_mmu_page_role role = {0 };
4805
- bool smep = kvm_read_cr4_bits (vcpu , X86_CR4_SMEP );
4806
- bool smap = kvm_read_cr4_bits (vcpu , X86_CR4_SMAP );
4807
-
4808
- role .nxe = is_nx (vcpu );
4809
- role .cr4_pae = !!is_pae (vcpu );
4810
- role .cr0_wp = is_write_protection (vcpu );
4811
- role .smep_andnot_wp = smep && !is_write_protection (vcpu );
4812
- role .smap_andnot_wp = smap && !is_write_protection (vcpu );
4813
- role .guest_mode = is_guest_mode (vcpu );
4814
- role .smm = is_smm (vcpu );
4815
- role .direct = !is_paging (vcpu );
4816
- role .access = ACC_ALL ;
4825
+ static union kvm_mmu_role
4826
+ kvm_calc_shadow_mmu_root_page_role (struct kvm_vcpu * vcpu , bool base_only )
4827
+ {
4828
+ union kvm_mmu_role role = kvm_calc_mmu_role_common (vcpu , base_only );
4829
+
4830
+ role .base .smep_andnot_wp = role .ext .cr4_smep &&
4831
+ !is_write_protection (vcpu );
4832
+ role .base .smap_andnot_wp = role .ext .cr4_smap &&
4833
+ !is_write_protection (vcpu );
4834
+ role .base .direct = !is_paging (vcpu );
4817
4835
4818
4836
if (!is_long_mode (vcpu ))
4819
- role .level = PT32E_ROOT_LEVEL ;
4837
+ role .base . level = PT32E_ROOT_LEVEL ;
4820
4838
else if (is_la57_mode (vcpu ))
4821
- role .level = PT64_ROOT_5LEVEL ;
4839
+ role .base . level = PT64_ROOT_5LEVEL ;
4822
4840
else
4823
- role .level = PT64_ROOT_4LEVEL ;
4841
+ role .base . level = PT64_ROOT_4LEVEL ;
4824
4842
4825
4843
return role ;
4826
4844
}
4827
4845
4828
4846
void kvm_init_shadow_mmu (struct kvm_vcpu * vcpu )
4829
4847
{
4830
4848
struct kvm_mmu * context = vcpu -> arch .mmu ;
4849
+ union kvm_mmu_role new_role =
4850
+ kvm_calc_shadow_mmu_root_page_role (vcpu , false);
4851
+
4852
+ new_role .base .word &= mmu_base_role_mask .word ;
4853
+ if (new_role .as_u64 == context -> mmu_role .as_u64 )
4854
+ return ;
4831
4855
4832
4856
if (!is_paging (vcpu ))
4833
4857
nonpaging_init_context (vcpu , context );
@@ -4838,8 +4862,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
4838
4862
else
4839
4863
paging32_init_context (vcpu , context );
4840
4864
4841
- context -> mmu_role .base .word = mmu_base_role_mask .word &
4842
- kvm_calc_shadow_mmu_root_page_role (vcpu ).word ;
4865
+ context -> mmu_role .as_u64 = new_role .as_u64 ;
4843
4866
reset_shadow_zero_bits_mask (vcpu , context );
4844
4867
}
4845
4868
EXPORT_SYMBOL_GPL (kvm_init_shadow_mmu );
@@ -4977,10 +5000,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
4977
5000
static union kvm_mmu_page_role
4978
5001
kvm_mmu_calc_root_page_role (struct kvm_vcpu * vcpu )
4979
5002
{
5003
+ union kvm_mmu_role role ;
5004
+
4980
5005
if (tdp_enabled )
4981
- return kvm_calc_tdp_mmu_root_page_role (vcpu );
5006
+ role = kvm_calc_tdp_mmu_root_page_role (vcpu , true );
4982
5007
else
4983
- return kvm_calc_shadow_mmu_root_page_role (vcpu );
5008
+ role = kvm_calc_shadow_mmu_root_page_role (vcpu , true);
5009
+
5010
+ return role .base ;
4984
5011
}
4985
5012
4986
5013
void kvm_mmu_reset_context (struct kvm_vcpu * vcpu )
0 commit comments