@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
368
368
#define get_desc_from_elsp_dwords (ed , i ) \
369
369
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
370
370
371
- static void prepare_shadow_batch_buffer (struct intel_vgpu_workload * workload )
371
+ static int prepare_shadow_batch_buffer (struct intel_vgpu_workload * workload )
372
372
{
373
373
const int gmadr_bytes = workload -> vgpu -> gvt -> device_info .gmadr_bytes_in_cmd ;
374
374
struct intel_shadow_bb_entry * entry_obj ;
@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
379
379
380
380
vma = i915_gem_object_ggtt_pin (entry_obj -> obj , NULL , 0 , 4 , 0 );
381
381
if (IS_ERR (vma )) {
382
- return ;
382
+ return PTR_ERR ( vma ) ;
383
383
}
384
384
385
385
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
392
392
if (gmadr_bytes == 8 )
393
393
entry_obj -> bb_start_cmd_va [2 ] = 0 ;
394
394
}
395
+ return 0 ;
395
396
}
396
397
397
398
static int update_wa_ctx_2_shadow_ctx (struct intel_shadow_wa_ctx * wa_ctx )
@@ -420,20 +421,20 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
420
421
return 0 ;
421
422
}
422
423
423
- static void prepare_shadow_wa_ctx (struct intel_shadow_wa_ctx * wa_ctx )
424
+ static int prepare_shadow_wa_ctx (struct intel_shadow_wa_ctx * wa_ctx )
424
425
{
425
426
struct i915_vma * vma ;
426
427
unsigned char * per_ctx_va =
427
428
(unsigned char * )wa_ctx -> indirect_ctx .shadow_va +
428
429
wa_ctx -> indirect_ctx .size ;
429
430
430
431
if (wa_ctx -> indirect_ctx .size == 0 )
431
- return ;
432
+ return 0 ;
432
433
433
434
vma = i915_gem_object_ggtt_pin (wa_ctx -> indirect_ctx .obj , NULL ,
434
435
0 , CACHELINE_BYTES , 0 );
435
436
if (IS_ERR (vma )) {
436
- return ;
437
+ return PTR_ERR ( vma ) ;
437
438
}
438
439
439
440
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
447
448
memset (per_ctx_va , 0 , CACHELINE_BYTES );
448
449
449
450
update_wa_ctx_2_shadow_ctx (wa_ctx );
450
- }
451
-
452
- static int prepare_execlist_workload (struct intel_vgpu_workload * workload )
453
- {
454
- struct intel_vgpu * vgpu = workload -> vgpu ;
455
- struct execlist_ctx_descriptor_format ctx [2 ];
456
- int ring_id = workload -> ring_id ;
457
-
458
- intel_vgpu_pin_mm (workload -> shadow_mm );
459
- intel_vgpu_sync_oos_pages (workload -> vgpu );
460
- intel_vgpu_flush_post_shadow (workload -> vgpu );
461
- prepare_shadow_batch_buffer (workload );
462
- prepare_shadow_wa_ctx (& workload -> wa_ctx );
463
- if (!workload -> emulate_schedule_in )
464
- return 0 ;
465
-
466
- ctx [0 ] = * get_desc_from_elsp_dwords (& workload -> elsp_dwords , 1 );
467
- ctx [1 ] = * get_desc_from_elsp_dwords (& workload -> elsp_dwords , 0 );
468
-
469
- return emulate_execlist_schedule_in (& vgpu -> execlist [ring_id ], ctx );
451
+ return 0 ;
470
452
}
471
453
472
454
static void release_shadow_batch_buffer (struct intel_vgpu_workload * workload )
@@ -489,6 +471,64 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
489
471
}
490
472
}
491
473
474
+ static int prepare_execlist_workload (struct intel_vgpu_workload * workload )
475
+ {
476
+ struct intel_vgpu * vgpu = workload -> vgpu ;
477
+ struct execlist_ctx_descriptor_format ctx [2 ];
478
+ int ring_id = workload -> ring_id ;
479
+ int ret ;
480
+
481
+ ret = intel_vgpu_pin_mm (workload -> shadow_mm );
482
+ if (ret ) {
483
+ gvt_vgpu_err ("fail to vgpu pin mm\n" );
484
+ goto out ;
485
+ }
486
+
487
+ ret = intel_vgpu_sync_oos_pages (workload -> vgpu );
488
+ if (ret ) {
489
+ gvt_vgpu_err ("fail to vgpu sync oos pages\n" );
490
+ goto err_unpin_mm ;
491
+ }
492
+
493
+ ret = intel_vgpu_flush_post_shadow (workload -> vgpu );
494
+ if (ret ) {
495
+ gvt_vgpu_err ("fail to flush post shadow\n" );
496
+ goto err_unpin_mm ;
497
+ }
498
+
499
+ ret = prepare_shadow_batch_buffer (workload );
500
+ if (ret ) {
501
+ gvt_vgpu_err ("fail to prepare_shadow_batch_buffer\n" );
502
+ goto err_unpin_mm ;
503
+ }
504
+
505
+ ret = prepare_shadow_wa_ctx (& workload -> wa_ctx );
506
+ if (ret ) {
507
+ gvt_vgpu_err ("fail to prepare_shadow_wa_ctx\n" );
508
+ goto err_shadow_batch ;
509
+ }
510
+
511
+ if (!workload -> emulate_schedule_in )
512
+ return 0 ;
513
+
514
+ ctx [0 ] = * get_desc_from_elsp_dwords (& workload -> elsp_dwords , 1 );
515
+ ctx [1 ] = * get_desc_from_elsp_dwords (& workload -> elsp_dwords , 0 );
516
+
517
+ ret = emulate_execlist_schedule_in (& vgpu -> execlist [ring_id ], ctx );
518
+ if (!ret )
519
+ goto out ;
520
+ else
521
+ gvt_vgpu_err ("fail to emulate execlist schedule in\n" );
522
+
523
+ release_shadow_wa_ctx (& workload -> wa_ctx );
524
+ err_shadow_batch :
525
+ release_shadow_batch_buffer (workload );
526
+ err_unpin_mm :
527
+ intel_vgpu_unpin_mm (workload -> shadow_mm );
528
+ out :
529
+ return ret ;
530
+ }
531
+
492
532
static int complete_execlist_workload (struct intel_vgpu_workload * workload )
493
533
{
494
534
struct intel_vgpu * vgpu = workload -> vgpu ;
@@ -502,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
502
542
gvt_dbg_el ("complete workload %p status %d\n" , workload ,
503
543
workload -> status );
504
544
505
- release_shadow_batch_buffer (workload );
506
- release_shadow_wa_ctx (& workload -> wa_ctx );
545
+ if (!workload -> status ) {
546
+ release_shadow_batch_buffer (workload );
547
+ release_shadow_wa_ctx (& workload -> wa_ctx );
548
+ }
507
549
508
550
if (workload -> status || (vgpu -> resetting_eng & ENGINE_MASK (ring_id ))) {
509
551
/* if workload->status is not successful means HW GPU
0 commit comments