@@ -193,7 +193,8 @@ extern const ulong vmx_return;
193
193
194
194
static DEFINE_STATIC_KEY_FALSE (vmx_l1d_should_flush );
195
195
196
- static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND ;
196
+ /* Storage for pre module init parameter parsing */
197
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO ;
197
198
198
199
static const struct {
199
200
const char * option ;
@@ -205,33 +206,85 @@ static const struct {
205
206
{"always" , VMENTER_L1D_FLUSH_ALWAYS },
206
207
};
207
208
208
- static int vmentry_l1d_flush_set (const char * s , const struct kernel_param * kp )
209
+ #define L1D_CACHE_ORDER 4
210
+ static void * vmx_l1d_flush_pages ;
211
+
212
+ static int vmx_setup_l1d_flush (enum vmx_l1d_flush_state l1tf )
209
213
{
210
- unsigned int i ;
214
+ struct page * page ;
211
215
212
- if (!s )
213
- return - EINVAL ;
216
+ /* If set to 'auto' select 'cond' */
217
+ if (l1tf == VMENTER_L1D_FLUSH_AUTO )
218
+ l1tf = VMENTER_L1D_FLUSH_COND ;
214
219
215
- for (i = 0 ; i < ARRAY_SIZE (vmentry_l1d_param ); i ++ ) {
216
- if (!strcmp (s , vmentry_l1d_param [i ].option )) {
217
- vmentry_l1d_flush = vmentry_l1d_param [i ].cmd ;
218
- return 0 ;
219
- }
220
+ if (!enable_ept ) {
221
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED ;
222
+ return 0 ;
220
223
}
221
224
225
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
226
+ !boot_cpu_has (X86_FEATURE_FLUSH_L1D )) {
227
+ page = alloc_pages (GFP_KERNEL , L1D_CACHE_ORDER );
228
+ if (!page )
229
+ return - ENOMEM ;
230
+ vmx_l1d_flush_pages = page_address (page );
231
+ }
232
+
233
+ l1tf_vmx_mitigation = l1tf ;
234
+
235
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER )
236
+ static_branch_enable (& vmx_l1d_should_flush );
237
+ return 0 ;
238
+ }
239
+
240
+ static int vmentry_l1d_flush_parse (const char * s )
241
+ {
242
+ unsigned int i ;
243
+
244
+ if (s ) {
245
+ for (i = 0 ; i < ARRAY_SIZE (vmentry_l1d_param ); i ++ ) {
246
+ if (!strcmp (s , vmentry_l1d_param [i ].option ))
247
+ return vmentry_l1d_param [i ].cmd ;
248
+ }
249
+ }
222
250
return - EINVAL ;
223
251
}
224
252
253
+ static int vmentry_l1d_flush_set (const char * s , const struct kernel_param * kp )
254
+ {
255
+ int l1tf ;
256
+
257
+ if (!boot_cpu_has (X86_BUG_L1TF ))
258
+ return 0 ;
259
+
260
+ l1tf = vmentry_l1d_flush_parse (s );
261
+ if (l1tf < 0 )
262
+ return l1tf ;
263
+
264
+ /*
265
+ * Has vmx_init() run already? If not then this is the pre init
266
+ * parameter parsing. In that case just store the value and let
267
+ * vmx_init() do the proper setup after enable_ept has been
268
+ * established.
269
+ */
270
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO ) {
271
+ vmentry_l1d_flush_param = l1tf ;
272
+ return 0 ;
273
+ }
274
+
275
+ return vmx_setup_l1d_flush (l1tf );
276
+ }
277
+
225
278
static int vmentry_l1d_flush_get (char * s , const struct kernel_param * kp )
226
279
{
227
- return sprintf (s , "%s\n" , vmentry_l1d_param [vmentry_l1d_flush ].option );
280
+ return sprintf (s , "%s\n" , vmentry_l1d_param [l1tf_vmx_mitigation ].option );
228
281
}
229
282
230
283
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
231
284
.set = vmentry_l1d_flush_set ,
232
285
.get = vmentry_l1d_flush_get ,
233
286
};
234
- module_param_cb (vmentry_l1d_flush , & vmentry_l1d_flush_ops , & vmentry_l1d_flush , S_IRUGO );
287
+ module_param_cb (vmentry_l1d_flush , & vmentry_l1d_flush_ops , NULL , S_IRUGO );
235
288
236
289
struct kvm_vmx {
237
290
struct kvm kvm ;
@@ -9608,7 +9661,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
9608
9661
* it. The flush bit gets set again either from vcpu_run() or from
9609
9662
* one of the unsafe VMEXIT handlers.
9610
9663
*/
9611
- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS ;
9664
+ always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS ;
9612
9665
vcpu -> arch .l1tf_flush_l1d = always ;
9613
9666
9614
9667
vcpu -> stat .l1d_flush ++ ;
@@ -13197,34 +13250,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
13197
13250
.enable_smi_window = enable_smi_window ,
13198
13251
};
13199
13252
13200
- static int __init vmx_setup_l1d_flush (void )
13201
- {
13202
- struct page * page ;
13203
-
13204
- if (!boot_cpu_has_bug (X86_BUG_L1TF ))
13205
- return 0 ;
13206
-
13207
- if (!enable_ept ) {
13208
- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED ;
13209
- return 0 ;
13210
- }
13211
-
13212
- l1tf_vmx_mitigation = vmentry_l1d_flush ;
13213
-
13214
- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER )
13215
- return 0 ;
13216
-
13217
- if (!boot_cpu_has (X86_FEATURE_FLUSH_L1D )) {
13218
- page = alloc_pages (GFP_KERNEL , L1D_CACHE_ORDER );
13219
- if (!page )
13220
- return - ENOMEM ;
13221
- vmx_l1d_flush_pages = page_address (page );
13222
- }
13223
-
13224
- static_branch_enable (& vmx_l1d_should_flush );
13225
- return 0 ;
13226
- }
13227
-
13228
13253
static void vmx_cleanup_l1d_flush (void )
13229
13254
{
13230
13255
if (vmx_l1d_flush_pages ) {
@@ -13309,12 +13334,18 @@ static int __init vmx_init(void)
13309
13334
return r ;
13310
13335
13311
13336
/*
13312
- * Must be called after kvm_init() so enable_ept is properly set up
13313
- */
13314
- r = vmx_setup_l1d_flush ();
13315
- if (r ) {
13316
- vmx_exit ();
13317
- return r ;
13337
+ * Must be called after kvm_init() so enable_ept is properly set
13338
+ * up. Hand the parameter mitigation value in which was stored in
13339
+ * the pre module init parser. If no parameter was given, it will
13340
+ * contain 'auto' which will be turned into the default 'cond'
13341
+ * mitigation mode.
13342
+ */
13343
+ if (boot_cpu_has (X86_BUG_L1TF )) {
13344
+ r = vmx_setup_l1d_flush (vmentry_l1d_flush_param );
13345
+ if (r ) {
13346
+ vmx_exit ();
13347
+ return r ;
13348
+ }
13318
13349
}
13319
13350
13320
13351
#ifdef CONFIG_KEXEC_CORE
0 commit comments