Skip to content

Commit e61e0f5

Browse files
committed
drm/i915: Rename drm_i915_gem_request to i915_request
We want to de-emphasize the link between the request (dependency, execution and fence tracking) from GEM and so rename the struct from drm_i915_gem_request to i915_request. That is we may implement the GEM user interface on top of requests, but they are an abstraction for tracking execution rather than an implementation detail of GEM. (Since they are not tied to HW, we keep the i915 prefix as opposed to intel.) In short, the spatch: @@ @@ - struct drm_i915_gem_request + struct i915_request A corollary to contracting the type name, we also harmonise on using 'rq' shorthand for local variables where space if of the essence and repetition makes 'request' unwieldy. For globals and struct members, 'request' is still much preferred for its clarity. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180221095636.6649-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
1 parent ea3f0ef commit e61e0f5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+990
-996
lines changed

drivers/gpu/drm/i915/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,13 @@ i915-y += i915_cmd_parser.o \
6363
i915_gem.o \
6464
i915_gem_object.o \
6565
i915_gem_render_state.o \
66-
i915_gem_request.o \
6766
i915_gem_shrinker.o \
6867
i915_gem_stolen.o \
6968
i915_gem_tiling.o \
7069
i915_gem_timeline.o \
7170
i915_gem_userptr.o \
7271
i915_gemfs.o \
72+
i915_request.o \
7373
i915_trace_points.o \
7474
i915_vma.o \
7575
intel_breadcrumbs.o \

drivers/gpu/drm/i915/gvt/scheduler.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
126126
return 0;
127127
}
128128

129-
static inline bool is_gvt_request(struct drm_i915_gem_request *req)
129+
static inline bool is_gvt_request(struct i915_request *req)
130130
{
131131
return i915_gem_context_force_single_submission(req->ctx);
132132
}
@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
148148
static int shadow_context_status_change(struct notifier_block *nb,
149149
unsigned long action, void *data)
150150
{
151-
struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
151+
struct i915_request *req = data;
152152
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
153153
shadow_ctx_notifier_block[req->engine->id]);
154154
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -333,13 +333,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
333333
int ring_id = workload->ring_id;
334334
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
335335
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
336-
struct drm_i915_gem_request *rq;
336+
struct i915_request *rq;
337337
struct intel_vgpu *vgpu = workload->vgpu;
338338
struct intel_vgpu_submission *s = &vgpu->submission;
339339
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
340340
int ret;
341341

342-
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
342+
rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
343343
if (IS_ERR(rq)) {
344344
gvt_vgpu_err("fail to allocate gem request\n");
345345
ret = PTR_ERR(rq);
@@ -348,7 +348,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
348348

349349
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
350350

351-
workload->req = i915_gem_request_get(rq);
351+
workload->req = i915_request_get(rq);
352352
ret = copy_workload_to_ring_buffer(workload);
353353
if (ret)
354354
goto err_unpin;
@@ -582,7 +582,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
582582
if (!IS_ERR_OR_NULL(workload->req)) {
583583
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
584584
ring_id, workload->req);
585-
i915_add_request(workload->req);
585+
i915_request_add(workload->req);
586586
workload->dispatched = true;
587587
}
588588

@@ -769,7 +769,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
769769
workload->status = 0;
770770
}
771771

772-
i915_gem_request_put(fetch_and_zero(&workload->req));
772+
i915_request_put(fetch_and_zero(&workload->req));
773773

774774
if (!workload->status && !(vgpu->resetting_eng &
775775
ENGINE_MASK(ring_id))) {
@@ -886,7 +886,7 @@ static int workload_thread(void *priv)
886886

887887
gvt_dbg_sched("ring id %d wait workload %p\n",
888888
workload->ring_id, workload);
889-
i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
889+
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
890890

891891
complete:
892892
gvt_dbg_sched("will complete workload %p, status: %d\n",

drivers/gpu/drm/i915/gvt/scheduler.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
8080
struct intel_vgpu_workload {
8181
struct intel_vgpu *vgpu;
8282
int ring_id;
83-
struct drm_i915_gem_request *req;
83+
struct i915_request *req;
8484
/* if this workload has been dispatched to i915? */
8585
bool dispatched;
8686
bool shadowed;

drivers/gpu/drm/i915/i915_debugfs.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -519,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
519519
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
520520
struct file_stats stats;
521521
struct drm_i915_file_private *file_priv = file->driver_priv;
522-
struct drm_i915_gem_request *request;
522+
struct i915_request *request;
523523
struct task_struct *task;
524524

525525
mutex_lock(&dev->struct_mutex);
@@ -536,7 +536,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
536536
* Therefore, we need to protect this ->comm access using RCU.
537537
*/
538538
request = list_first_entry_or_null(&file_priv->mm.request_list,
539-
struct drm_i915_gem_request,
539+
struct i915_request,
540540
client_link);
541541
rcu_read_lock();
542542
task = pid_task(request && request->ctx->pid ?
@@ -4060,7 +4060,7 @@ i915_drop_caches_set(void *data, u64 val)
40604060
I915_WAIT_LOCKED);
40614061

40624062
if (val & DROP_RETIRE)
4063-
i915_gem_retire_requests(dev_priv);
4063+
i915_retire_requests(dev_priv);
40644064

40654065
mutex_unlock(&dev->struct_mutex);
40664066
}

drivers/gpu/drm/i915/i915_drv.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -808,7 +808,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
808808
/*
809809
* The i915 workqueue is primarily used for batched retirement of
810810
* requests (and thus managing bo) once the task has been completed
811-
* by the GPU. i915_gem_retire_requests() is called directly when we
811+
* by the GPU. i915_retire_requests() is called directly when we
812812
* need high-priority retirement, such as waiting for an explicit
813813
* bo.
814814
*
@@ -1992,7 +1992,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
19921992
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
19931993
error:
19941994
i915_gem_set_wedged(i915);
1995-
i915_gem_retire_requests(i915);
1995+
i915_retire_requests(i915);
19961996
intel_gpu_reset(i915, ALL_ENGINES);
19971997
goto finish;
19981998
}
@@ -2019,7 +2019,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
20192019
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
20202020
{
20212021
struct i915_gpu_error *error = &engine->i915->gpu_error;
2022-
struct drm_i915_gem_request *active_request;
2022+
struct i915_request *active_request;
20232023
int ret;
20242024

20252025
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));

drivers/gpu/drm/i915/i915_drv.h

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,9 @@
7171
#include "i915_gem_fence_reg.h"
7272
#include "i915_gem_object.h"
7373
#include "i915_gem_gtt.h"
74-
#include "i915_gem_request.h"
7574
#include "i915_gem_timeline.h"
7675

76+
#include "i915_request.h"
7777
#include "i915_vma.h"
7878

7979
#include "intel_gvt.h"
@@ -1231,7 +1231,7 @@ struct i915_gpu_error {
12311231
*
12321232
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
12331233
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
1234-
* i915_gem_request_alloc(), this bit is checked and the sequence
1234+
* i915_request_alloc(), this bit is checked and the sequence
12351235
* aborted (with -EIO reported to userspace) if set.
12361236
*/
12371237
unsigned long flags;
@@ -3329,7 +3329,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
33293329

33303330
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
33313331
void i915_vma_move_to_active(struct i915_vma *vma,
3332-
struct drm_i915_gem_request *req,
3332+
struct i915_request *rq,
33333333
unsigned int flags);
33343334
int i915_gem_dumb_create(struct drm_file *file_priv,
33353335
struct drm_device *dev,
@@ -3344,11 +3344,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
33443344

33453345
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
33463346

3347-
struct drm_i915_gem_request *
3347+
struct i915_request *
33483348
i915_gem_find_active_request(struct intel_engine_cs *engine);
33493349

3350-
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3351-
33523350
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
33533351
{
33543352
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -3380,7 +3378,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
33803378
return READ_ONCE(error->reset_engine_count[engine->id]);
33813379
}
33823380

3383-
struct drm_i915_gem_request *
3381+
struct i915_request *
33843382
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
33853383
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
33863384
void i915_gem_reset(struct drm_i915_private *dev_priv);
@@ -3389,7 +3387,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
33893387
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
33903388
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
33913389
void i915_gem_reset_engine(struct intel_engine_cs *engine,
3392-
struct drm_i915_gem_request *request);
3390+
struct i915_request *request);
33933391

33943392
void i915_gem_init_mmio(struct drm_i915_private *i915);
33953393
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -4007,9 +4005,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
40074005
}
40084006

40094007
static inline bool
4010-
__i915_request_irq_complete(const struct drm_i915_gem_request *req)
4008+
__i915_request_irq_complete(const struct i915_request *rq)
40114009
{
4012-
struct intel_engine_cs *engine = req->engine;
4010+
struct intel_engine_cs *engine = rq->engine;
40134011
u32 seqno;
40144012

40154013
/* Note that the engine may have wrapped around the seqno, and
@@ -4018,7 +4016,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
40184016
* this by kicking all the waiters before resetting the seqno
40194017
* in hardware, and also signal the fence.
40204018
*/
4021-
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
4019+
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
40224020
return true;
40234021

40244022
/* The request was dequeued before we were awoken. We check after
@@ -4027,14 +4025,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
40274025
* the request execution are sufficient to ensure that a check
40284026
* after reading the value from hw matches this request.
40294027
*/
4030-
seqno = i915_gem_request_global_seqno(req);
4028+
seqno = i915_request_global_seqno(rq);
40314029
if (!seqno)
40324030
return false;
40334031

40344032
/* Before we do the heavier coherent read of the seqno,
40354033
* check the value (hopefully) in the CPU cacheline.
40364034
*/
4037-
if (__i915_gem_request_completed(req, seqno))
4035+
if (__i915_request_completed(rq, seqno))
40384036
return true;
40394037

40404038
/* Ensure our read of the seqno is coherent so that we
@@ -4083,7 +4081,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
40834081
wake_up_process(b->irq_wait->tsk);
40844082
spin_unlock_irq(&b->irq_lock);
40854083

4086-
if (__i915_gem_request_completed(req, seqno))
4084+
if (__i915_request_completed(rq, seqno))
40874085
return true;
40884086
}
40894087

0 commit comments

Comments
 (0)