@@ -46,11 +46,6 @@ enum {
46
46
PORT_DONE ,
47
47
};
48
48
49
- struct vmx_page {
50
- vm_vaddr_t virt ;
51
- vm_paddr_t phys ;
52
- };
53
-
54
49
enum {
55
50
VMXON_PAGE = 0 ,
56
51
VMCS_PAGE ,
@@ -67,9 +62,6 @@ struct kvm_single_msr {
67
62
/* The virtual machine object. */
68
63
static struct kvm_vm * vm ;
69
64
70
- /* Array of vmx_page descriptors that is shared with the guest. */
71
- struct vmx_page * vmx_pages ;
72
-
73
65
#define exit_to_l0 (_port , _arg ) do_exit_to_l0(_port, (unsigned long) (_arg))
74
66
static void do_exit_to_l0 (uint16_t port , unsigned long arg )
75
67
{
@@ -105,7 +97,7 @@ static void l2_guest_code(void)
105
97
__asm__ __volatile__("vmcall" );
106
98
}
107
99
108
- static void l1_guest_code (struct vmx_page * vmx_pages )
100
+ static void l1_guest_code (struct vmx_pages * vmx_pages )
109
101
{
110
102
#define L2_GUEST_STACK_SIZE 64
111
103
unsigned long l2_guest_stack [L2_GUEST_STACK_SIZE ];
@@ -116,23 +108,14 @@ static void l1_guest_code(struct vmx_page *vmx_pages)
116
108
wrmsr (MSR_IA32_TSC , rdtsc () - TSC_ADJUST_VALUE );
117
109
check_ia32_tsc_adjust (-1 * TSC_ADJUST_VALUE );
118
110
119
- prepare_for_vmx_operation ();
120
-
121
- /* Enter VMX root operation. */
122
- * (uint32_t * )vmx_pages [VMXON_PAGE ].virt = vmcs_revision ();
123
- GUEST_ASSERT (!vmxon (vmx_pages [VMXON_PAGE ].phys ));
124
-
125
- /* Load a VMCS. */
126
- * (uint32_t * )vmx_pages [VMCS_PAGE ].virt = vmcs_revision ();
127
- GUEST_ASSERT (!vmclear (vmx_pages [VMCS_PAGE ].phys ));
128
- GUEST_ASSERT (!vmptrld (vmx_pages [VMCS_PAGE ].phys ));
111
+ GUEST_ASSERT (prepare_for_vmx_operation (vmx_pages ));
129
112
130
113
/* Prepare the VMCS for L2 execution. */
131
- prepare_vmcs (l2_guest_code , & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
114
+ prepare_vmcs (vmx_pages , l2_guest_code ,
115
+ & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
132
116
control = vmreadz (CPU_BASED_VM_EXEC_CONTROL );
133
117
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING ;
134
118
vmwrite (CPU_BASED_VM_EXEC_CONTROL , control );
135
- vmwrite (MSR_BITMAP , vmx_pages [MSR_BITMAP_PAGE ].phys );
136
119
vmwrite (TSC_OFFSET , TSC_OFFSET_VALUE );
137
120
138
121
/* Jump into L2. First, test failure to load guest CR3. */
@@ -152,33 +135,6 @@ static void l1_guest_code(struct vmx_page *vmx_pages)
152
135
exit_to_l0 (PORT_DONE , 0 );
153
136
}
154
137
155
- static void allocate_vmx_page (struct vmx_page * page )
156
- {
157
- vm_vaddr_t virt ;
158
-
159
- virt = vm_vaddr_alloc (vm , PAGE_SIZE , 0 , 0 , 0 );
160
- memset (addr_gva2hva (vm , virt ), 0 , PAGE_SIZE );
161
-
162
- page -> virt = virt ;
163
- page -> phys = addr_gva2gpa (vm , virt );
164
- }
165
-
166
- static vm_vaddr_t allocate_vmx_pages (void )
167
- {
168
- vm_vaddr_t vmx_pages_vaddr ;
169
- int i ;
170
-
171
- vmx_pages_vaddr = vm_vaddr_alloc (
172
- vm , sizeof (struct vmx_page ) * NUM_VMX_PAGES , 0 , 0 , 0 );
173
-
174
- vmx_pages = (void * ) addr_gva2hva (vm , vmx_pages_vaddr );
175
-
176
- for (i = 0 ; i < NUM_VMX_PAGES ; i ++ )
177
- allocate_vmx_page (& vmx_pages [i ]);
178
-
179
- return vmx_pages_vaddr ;
180
- }
181
-
182
138
void report (int64_t val )
183
139
{
184
140
printf ("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n" ,
@@ -187,31 +143,32 @@ void report(int64_t val)
187
143
188
144
int main (int argc , char * argv [])
189
145
{
190
- vm_vaddr_t vmx_pages_vaddr ;
146
+ struct vmx_pages * vmx_pages ;
147
+ vm_vaddr_t vmx_pages_gva ;
191
148
struct kvm_cpuid_entry2 * entry = kvm_get_supported_cpuid_entry (1 );
192
149
193
150
if (!(entry -> ecx & CPUID_VMX )) {
194
151
fprintf (stderr , "nested VMX not enabled, skipping test\n" );
195
152
exit (KSFT_SKIP );
196
153
}
197
154
198
- vm = vm_create_default_vmx (VCPU_ID , (void * ) l1_guest_code );
155
+ vm = vm_create_default (VCPU_ID , (void * ) l1_guest_code );
156
+ vcpu_set_cpuid (vm , VCPU_ID , kvm_get_supported_cpuid ());
199
157
200
158
/* Allocate VMX pages and shared descriptors (vmx_pages). */
201
- vmx_pages_vaddr = allocate_vmx_pages ( );
202
- vcpu_args_set (vm , VCPU_ID , 1 , vmx_pages_vaddr );
159
+ vmx_pages = vcpu_alloc_vmx ( vm , & vmx_pages_gva );
160
+ vcpu_args_set (vm , VCPU_ID , 1 , vmx_pages_gva );
203
161
204
162
for (;;) {
205
163
volatile struct kvm_run * run = vcpu_state (vm , VCPU_ID );
206
164
struct kvm_regs regs ;
207
165
208
166
vcpu_run (vm , VCPU_ID );
167
+ vcpu_regs_get (vm , VCPU_ID , & regs );
209
168
TEST_ASSERT (run -> exit_reason == KVM_EXIT_IO ,
210
- "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n" ,
169
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s), rip=%lx \n" ,
211
170
run -> exit_reason ,
212
- exit_reason_str (run -> exit_reason ));
213
-
214
- vcpu_regs_get (vm , VCPU_ID , & regs );
171
+ exit_reason_str (run -> exit_reason ), regs .rip );
215
172
216
173
switch (run -> io .port ) {
217
174
case PORT_ABORT :
0 commit comments