Skip to content

Commit 2724b2d

Browse files
committed
drm/vmwgfx: Use new validation interface for the modesetting code v2
Strip the old KMS helpers and use the new validation interface also in the modesetting code. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com> #v1 Reviewed-by: Sinclair Yeh <syeh@vmware.com>
1 parent 9c079b8 commit 2724b2d

File tree

4 files changed

+86
-216
lines changed

4 files changed

+86
-216
lines changed

drivers/gpu/drm/vmwgfx/vmwgfx_kms.c

Lines changed: 21 additions & 178 deletions
Original file line numberDiff line numberDiff line change
@@ -2557,88 +2557,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
25572557
}
25582558

25592559
/**
2560-
* vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
2561-
* command submission.
2562-
*
2563-
* @dev_priv. Pointer to a device private structure.
2564-
* @buf: The buffer object
2565-
* @interruptible: Whether to perform waits as interruptible.
2566-
* @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
2567-
* The buffer will be validated as a GMR. Already pinned buffers will not be
2568-
* validated.
2569-
*
2570-
* Returns 0 on success, negative error code on failure, -ERESTARTSYS if
2571-
* interrupted by a signal.
2560+
* vmw_kms_helper_validation_finish - Helper for post KMS command submission
2561+
* cleanup and fencing
2562+
* @dev_priv: Pointer to the device-private struct
2563+
* @file_priv: Pointer identifying the client when user-space fencing is used
2564+
* @ctx: Pointer to the validation context
2565+
* @out_fence: If non-NULL, returned refcounted fence-pointer
2566+
* @user_fence_rep: If non-NULL, pointer to user-space address area
2567+
* in which to copy user-space fence info
25722568
*/
2573-
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
2574-
struct vmw_buffer_object *buf,
2575-
bool interruptible,
2576-
bool validate_as_mob,
2577-
bool for_cpu_blit)
2578-
{
2579-
struct ttm_operation_ctx ctx = {
2580-
.interruptible = interruptible,
2581-
.no_wait_gpu = false};
2582-
struct ttm_buffer_object *bo = &buf->base;
2583-
int ret;
2584-
2585-
ttm_bo_reserve(bo, false, false, NULL);
2586-
if (for_cpu_blit)
2587-
ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
2588-
else
2589-
ret = vmw_validation_bo_validate_single(bo, interruptible,
2590-
validate_as_mob);
2591-
if (ret)
2592-
ttm_bo_unreserve(bo);
2593-
2594-
return ret;
2595-
}
2596-
2597-
/**
2598-
* vmw_kms_helper_buffer_revert - Undo the actions of
2599-
* vmw_kms_helper_buffer_prepare.
2600-
*
2601-
* @res: Pointer to the buffer object.
2602-
*
2603-
* Helper to be used if an error forces the caller to undo the actions of
2604-
* vmw_kms_helper_buffer_prepare.
2605-
*/
2606-
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
2607-
{
2608-
if (buf)
2609-
ttm_bo_unreserve(&buf->base);
2610-
}
2611-
2612-
/**
2613-
* vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
2614-
* kms command submission.
2615-
*
2616-
* @dev_priv: Pointer to a device private structure.
2617-
* @file_priv: Pointer to a struct drm_file representing the caller's
2618-
* connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
2619-
* if non-NULL, @user_fence_rep must be non-NULL.
2620-
* @buf: The buffer object.
2621-
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2622-
* ref-counted fence pointer is returned here.
2623-
* @user_fence_rep: Optional pointer to a user-space provided struct
2624-
* drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
2625-
* function copies fence data to user-space in a fail-safe manner.
2626-
*/
2627-
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2628-
struct drm_file *file_priv,
2629-
struct vmw_buffer_object *buf,
2630-
struct vmw_fence_obj **out_fence,
2631-
struct drm_vmw_fence_rep __user *
2632-
user_fence_rep)
2633-
{
2634-
struct vmw_fence_obj *fence;
2569+
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2570+
struct drm_file *file_priv,
2571+
struct vmw_validation_context *ctx,
2572+
struct vmw_fence_obj **out_fence,
2573+
struct drm_vmw_fence_rep __user *
2574+
user_fence_rep)
2575+
{
2576+
struct vmw_fence_obj *fence = NULL;
26352577
uint32_t handle;
26362578
int ret;
26372579

2638-
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2639-
file_priv ? &handle : NULL);
2640-
if (buf)
2641-
vmw_bo_fence_single(&buf->base, fence);
2580+
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2581+
out_fence)
2582+
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2583+
file_priv ? &handle : NULL);
2584+
vmw_validation_done(ctx, fence);
26422585
if (file_priv)
26432586
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
26442587
ret, user_fence_rep, fence,
@@ -2647,106 +2590,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
26472590
*out_fence = fence;
26482591
else
26492592
vmw_fence_obj_unreference(&fence);
2650-
2651-
vmw_kms_helper_buffer_revert(buf);
2652-
}
2653-
2654-
2655-
/**
2656-
* vmw_kms_helper_resource_revert - Undo the actions of
2657-
* vmw_kms_helper_resource_prepare.
2658-
*
2659-
* @res: Pointer to the resource. Typically a surface.
2660-
*
2661-
* Helper to be used if an error forces the caller to undo the actions of
2662-
* vmw_kms_helper_resource_prepare.
2663-
*/
2664-
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2665-
{
2666-
struct vmw_resource *res = ctx->res;
2667-
2668-
vmw_kms_helper_buffer_revert(ctx->buf);
2669-
vmw_bo_unreference(&ctx->buf);
2670-
vmw_resource_unreserve(res, false, NULL, 0);
2671-
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2672-
}
2673-
2674-
/**
2675-
* vmw_kms_helper_resource_prepare - Reserve and validate a resource before
2676-
* command submission.
2677-
*
2678-
* @res: Pointer to the resource. Typically a surface.
2679-
* @interruptible: Whether to perform waits as interruptible.
2680-
*
2681-
* Reserves and validates also the backup buffer if a guest-backed resource.
2682-
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
2683-
* interrupted by a signal.
2684-
*/
2685-
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2686-
bool interruptible,
2687-
struct vmw_validation_ctx *ctx)
2688-
{
2689-
int ret = 0;
2690-
2691-
ctx->buf = NULL;
2692-
ctx->res = res;
2693-
2694-
if (interruptible)
2695-
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2696-
else
2697-
mutex_lock(&res->dev_priv->cmdbuf_mutex);
2698-
2699-
if (unlikely(ret != 0))
2700-
return -ERESTARTSYS;
2701-
2702-
ret = vmw_resource_reserve(res, interruptible, false);
2703-
if (ret)
2704-
goto out_unlock;
2705-
2706-
if (res->backup) {
2707-
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
2708-
interruptible,
2709-
res->dev_priv->has_mob,
2710-
false);
2711-
if (ret)
2712-
goto out_unreserve;
2713-
2714-
ctx->buf = vmw_bo_reference(res->backup);
2715-
}
2716-
ret = vmw_resource_validate(res, interruptible);
2717-
if (ret)
2718-
goto out_revert;
2719-
return 0;
2720-
2721-
out_revert:
2722-
vmw_kms_helper_buffer_revert(ctx->buf);
2723-
out_unreserve:
2724-
vmw_resource_unreserve(res, false, NULL, 0);
2725-
out_unlock:
2726-
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2727-
return ret;
2728-
}
2729-
2730-
/**
2731-
* vmw_kms_helper_resource_finish - Unreserve and fence a resource after
2732-
* kms command submission.
2733-
*
2734-
* @res: Pointer to the resource. Typically a surface.
2735-
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2736-
* ref-counted fence pointer is returned here.
2737-
*/
2738-
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2739-
struct vmw_fence_obj **out_fence)
2740-
{
2741-
struct vmw_resource *res = ctx->res;
2742-
2743-
if (ctx->buf || out_fence)
2744-
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2745-
out_fence, NULL);
2746-
2747-
vmw_bo_unreference(&ctx->buf);
2748-
vmw_resource_unreserve(res, false, NULL, 0);
2749-
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
27502593
}
27512594

27522595
/**

drivers/gpu/drm/vmwgfx/vmwgfx_kms.h

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
308308
int increment,
309309
struct vmw_kms_dirty *dirty);
310310

311-
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
312-
struct vmw_buffer_object *buf,
313-
bool interruptible,
314-
bool validate_as_mob,
315-
bool for_cpu_blit);
316-
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
317-
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
318-
struct drm_file *file_priv,
319-
struct vmw_buffer_object *buf,
320-
struct vmw_fence_obj **out_fence,
321-
struct drm_vmw_fence_rep __user *
322-
user_fence_rep);
323-
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
324-
bool interruptible,
325-
struct vmw_validation_ctx *ctx);
326-
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
327-
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
328-
struct vmw_fence_obj **out_fence);
311+
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
312+
struct drm_file *file_priv,
313+
struct vmw_validation_context *ctx,
314+
struct vmw_fence_obj **out_fence,
315+
struct drm_vmw_fence_rep __user *
316+
user_fence_rep);
329317
int vmw_kms_readback(struct vmw_private *dev_priv,
330318
struct drm_file *file_priv,
331319
struct vmw_framebuffer *vfb,

drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c

Lines changed: 35 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
946946
struct vmw_framebuffer_surface *vfbs =
947947
container_of(framebuffer, typeof(*vfbs), base);
948948
struct vmw_kms_sou_surface_dirty sdirty;
949-
struct vmw_validation_ctx ctx;
949+
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
950950
int ret;
951951

952952
if (!srf)
953953
srf = &vfbs->surface->res;
954954

955-
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
955+
ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
956956
if (ret)
957957
return ret;
958958

959+
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
960+
if (ret)
961+
goto out_unref;
962+
959963
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
960964
sdirty.base.clip = vmw_sou_surface_clip;
961965
sdirty.base.dev_priv = dev_priv;
@@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
972976
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
973977
dest_x, dest_y, num_clips, inc,
974978
&sdirty.base);
975-
vmw_kms_helper_resource_finish(&ctx, out_fence);
979+
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
980+
NULL);
976981

977982
return ret;
983+
984+
out_unref:
985+
vmw_validation_unref_lists(&val_ctx);
986+
return ret;
978987
}
979988

980989
/**
@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
10511060
container_of(framebuffer, struct vmw_framebuffer_bo,
10521061
base)->buffer;
10531062
struct vmw_kms_dirty dirty;
1063+
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
10541064
int ret;
10551065

1056-
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
1057-
false, false);
1066+
ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
10581067
if (ret)
10591068
return ret;
10601069

1070+
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
1071+
if (ret)
1072+
goto out_unref;
1073+
10611074
ret = do_bo_define_gmrfb(dev_priv, framebuffer);
10621075
if (unlikely(ret != 0))
10631076
goto out_revert;
@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
10691082
num_clips;
10701083
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
10711084
0, 0, num_clips, increment, &dirty);
1072-
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
1085+
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
1086+
NULL);
10731087

10741088
return ret;
10751089

10761090
out_revert:
1077-
vmw_kms_helper_buffer_revert(buf);
1091+
vmw_validation_revert(&val_ctx);
1092+
out_unref:
1093+
vmw_validation_unref_lists(&val_ctx);
10781094

10791095
return ret;
10801096
}
@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
11501166
struct vmw_buffer_object *buf =
11511167
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
11521168
struct vmw_kms_dirty dirty;
1169+
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
11531170
int ret;
11541171

1155-
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
1156-
false);
1172+
ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
11571173
if (ret)
11581174
return ret;
11591175

1176+
ret = vmw_validation_prepare(&val_ctx, NULL, true);
1177+
if (ret)
1178+
goto out_unref;
1179+
11601180
ret = do_bo_define_gmrfb(dev_priv, vfb);
11611181
if (unlikely(ret != 0))
11621182
goto out_revert;
@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
11681188
num_clips;
11691189
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
11701190
0, 0, num_clips, 1, &dirty);
1171-
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
1172-
user_fence_rep);
1191+
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
1192+
user_fence_rep);
11731193

11741194
return ret;
11751195

11761196
out_revert:
1177-
vmw_kms_helper_buffer_revert(buf);
1178-
1197+
vmw_validation_revert(&val_ctx);
1198+
out_unref:
1199+
vmw_validation_unref_lists(&val_ctx);
1200+
11791201
return ret;
11801202
}

0 commit comments

Comments
 (0)