Skip to content

Commit 0cce282

Browse files
fred1gaozhenyw
authored andcommitted
drm/i915/gvt: Refine error handling for prepare_execlist_workload
refine the error handling for prepare_execlist_workload to restore to the original states once error occurs. only release the shadowed batch buffer and wa ctx when the workload is completed successfully. v2: - split the mixed several error paths for better review. (Zhenyu) v3: - handle prepare batch buffer/wa ctx pin errors and - emulate_schedule_in null issue. (Zhenyu) v4: - no need to handle emulate_schedule_in null issue. (Zhenyu) v5: - release the shadowed batch buffer and wa ctx only for the successful workload. (Zhenyu) v6: - polish the return style. (Zhenyu) Signed-off-by: fred gao <fred.gao@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
1 parent a3cfdca commit 0cce282

File tree

1 file changed

+69
-27
lines changed

1 file changed

+69
-27
lines changed

drivers/gpu/drm/i915/gvt/execlist.c

Lines changed: 69 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
368368
#define get_desc_from_elsp_dwords(ed, i) \
369369
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
370370

371-
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
371+
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
372372
{
373373
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
374374
struct intel_shadow_bb_entry *entry_obj;
@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
379379

380380
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
381381
if (IS_ERR(vma)) {
382-
return;
382+
return PTR_ERR(vma);
383383
}
384384

385385
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
392392
if (gmadr_bytes == 8)
393393
entry_obj->bb_start_cmd_va[2] = 0;
394394
}
395+
return 0;
395396
}
396397

397398
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -420,20 +421,20 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
420421
return 0;
421422
}
422423

423-
static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
424+
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
424425
{
425426
struct i915_vma *vma;
426427
unsigned char *per_ctx_va =
427428
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
428429
wa_ctx->indirect_ctx.size;
429430

430431
if (wa_ctx->indirect_ctx.size == 0)
431-
return;
432+
return 0;
432433

433434
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
434435
0, CACHELINE_BYTES, 0);
435436
if (IS_ERR(vma)) {
436-
return;
437+
return PTR_ERR(vma);
437438
}
438439

439440
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
447448
memset(per_ctx_va, 0, CACHELINE_BYTES);
448449

449450
update_wa_ctx_2_shadow_ctx(wa_ctx);
450-
}
451-
452-
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
453-
{
454-
struct intel_vgpu *vgpu = workload->vgpu;
455-
struct execlist_ctx_descriptor_format ctx[2];
456-
int ring_id = workload->ring_id;
457-
458-
intel_vgpu_pin_mm(workload->shadow_mm);
459-
intel_vgpu_sync_oos_pages(workload->vgpu);
460-
intel_vgpu_flush_post_shadow(workload->vgpu);
461-
prepare_shadow_batch_buffer(workload);
462-
prepare_shadow_wa_ctx(&workload->wa_ctx);
463-
if (!workload->emulate_schedule_in)
464-
return 0;
465-
466-
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
467-
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
468-
469-
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
451+
return 0;
470452
}
471453

472454
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -489,6 +471,64 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
489471
}
490472
}
491473

474+
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
475+
{
476+
struct intel_vgpu *vgpu = workload->vgpu;
477+
struct execlist_ctx_descriptor_format ctx[2];
478+
int ring_id = workload->ring_id;
479+
int ret;
480+
481+
ret = intel_vgpu_pin_mm(workload->shadow_mm);
482+
if (ret) {
483+
gvt_vgpu_err("fail to vgpu pin mm\n");
484+
goto out;
485+
}
486+
487+
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
488+
if (ret) {
489+
gvt_vgpu_err("fail to vgpu sync oos pages\n");
490+
goto err_unpin_mm;
491+
}
492+
493+
ret = intel_vgpu_flush_post_shadow(workload->vgpu);
494+
if (ret) {
495+
gvt_vgpu_err("fail to flush post shadow\n");
496+
goto err_unpin_mm;
497+
}
498+
499+
ret = prepare_shadow_batch_buffer(workload);
500+
if (ret) {
501+
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
502+
goto err_unpin_mm;
503+
}
504+
505+
ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
506+
if (ret) {
507+
gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
508+
goto err_shadow_batch;
509+
}
510+
511+
if (!workload->emulate_schedule_in)
512+
return 0;
513+
514+
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
515+
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
516+
517+
ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
518+
if (!ret)
519+
goto out;
520+
else
521+
gvt_vgpu_err("fail to emulate execlist schedule in\n");
522+
523+
release_shadow_wa_ctx(&workload->wa_ctx);
524+
err_shadow_batch:
525+
release_shadow_batch_buffer(workload);
526+
err_unpin_mm:
527+
intel_vgpu_unpin_mm(workload->shadow_mm);
528+
out:
529+
return ret;
530+
}
531+
492532
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
493533
{
494534
struct intel_vgpu *vgpu = workload->vgpu;
@@ -502,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
502542
gvt_dbg_el("complete workload %p status %d\n", workload,
503543
workload->status);
504544

505-
release_shadow_batch_buffer(workload);
506-
release_shadow_wa_ctx(&workload->wa_ctx);
545+
if (!workload->status) {
546+
release_shadow_batch_buffer(workload);
547+
release_shadow_wa_ctx(&workload->wa_ctx);
548+
}
507549

508550
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
509551
/* if workload->status is not successful means HW GPU

0 commit comments

Comments
 (0)