@@ -4724,6 +4724,20 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
4724
4724
paging64_init_context_common (vcpu , context , PT32E_ROOT_LEVEL );
4725
4725
}
4726
4726
4727
+ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext (struct kvm_vcpu * vcpu )
4728
+ {
4729
+ union kvm_mmu_extended_role ext = {0 };
4730
+
4731
+ ext .cr4_smep = !!kvm_read_cr4_bits (vcpu , X86_CR4_SMEP );
4732
+ ext .cr4_smap = !!kvm_read_cr4_bits (vcpu , X86_CR4_SMAP );
4733
+ ext .cr4_pse = !!is_pse (vcpu );
4734
+ ext .cr4_pke = !!kvm_read_cr4_bits (vcpu , X86_CR4_PKE );
4735
+
4736
+ ext .valid = 1 ;
4737
+
4738
+ return ext ;
4739
+ }
4740
+
4727
4741
static union kvm_mmu_page_role
4728
4742
kvm_calc_tdp_mmu_root_page_role (struct kvm_vcpu * vcpu )
4729
4743
{
@@ -4830,19 +4844,23 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
4830
4844
}
4831
4845
EXPORT_SYMBOL_GPL (kvm_init_shadow_mmu );
4832
4846
4833
- static union kvm_mmu_page_role
4834
- kvm_calc_shadow_ept_root_page_role (struct kvm_vcpu * vcpu , bool accessed_dirty )
4847
+ static union kvm_mmu_role
4848
+ kvm_calc_shadow_ept_root_page_role (struct kvm_vcpu * vcpu , bool accessed_dirty ,
4849
+ bool execonly )
4835
4850
{
4836
- union kvm_mmu_page_role role ;
4851
+ union kvm_mmu_role role ;
4837
4852
4838
- /* Role is inherited from root_mmu */
4839
- role .word = vcpu -> arch .root_mmu .base_role .word ;
4853
+ /* Base role is inherited from root_mmu */
4854
+ role .base .word = vcpu -> arch .root_mmu .mmu_role .base .word ;
4855
+ role .ext = kvm_calc_mmu_role_ext (vcpu );
4840
4856
4841
- role .level = PT64_ROOT_4LEVEL ;
4842
- role .direct = false;
4843
- role .ad_disabled = !accessed_dirty ;
4844
- role .guest_mode = true;
4845
- role .access = ACC_ALL ;
4857
+ role .base .level = PT64_ROOT_4LEVEL ;
4858
+ role .base .direct = false;
4859
+ role .base .ad_disabled = !accessed_dirty ;
4860
+ role .base .guest_mode = true;
4861
+ role .base .access = ACC_ALL ;
4862
+
4863
+ role .ext .execonly = execonly ;
4846
4864
4847
4865
return role ;
4848
4866
}
@@ -4851,10 +4869,16 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4851
4869
bool accessed_dirty , gpa_t new_eptp )
4852
4870
{
4853
4871
struct kvm_mmu * context = vcpu -> arch .mmu ;
4854
- union kvm_mmu_page_role root_page_role =
4855
- kvm_calc_shadow_ept_root_page_role (vcpu , accessed_dirty );
4872
+ union kvm_mmu_role new_role =
4873
+ kvm_calc_shadow_ept_root_page_role (vcpu , accessed_dirty ,
4874
+ execonly );
4875
+
4876
+ __kvm_mmu_new_cr3 (vcpu , new_eptp , new_role .base , false);
4877
+
4878
+ new_role .base .word &= mmu_base_role_mask .word ;
4879
+ if (new_role .as_u64 == context -> mmu_role .as_u64 )
4880
+ return ;
4856
4881
4857
- __kvm_mmu_new_cr3 (vcpu , new_eptp , root_page_role , false);
4858
4882
context -> shadow_root_level = PT64_ROOT_4LEVEL ;
4859
4883
4860
4884
context -> nx = true;
@@ -4866,8 +4890,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4866
4890
context -> update_pte = ept_update_pte ;
4867
4891
context -> root_level = PT64_ROOT_4LEVEL ;
4868
4892
context -> direct_map = false;
4869
- context -> mmu_role .base .word =
4870
- root_page_role .word & mmu_base_role_mask .word ;
4893
+ context -> mmu_role .as_u64 = new_role .as_u64 ;
4871
4894
4872
4895
update_permission_bitmask (vcpu , context , true);
4873
4896
update_pkru_bitmask (vcpu , context , true);
0 commit comments