Skip to content

Commit 64d6c50

Browse files
committed
drm/i915: Generalise GPU activity tracking
We currently track GPU memory usage inside VMA, such that we never release memory used by the GPU until after it has finished accessing it. However, we may want to track other resources aside from VMA, or we may want to split a VMA into multiple independent regions and track each separately. For this purpose, generalise our request tracking (akin to struct reservation_object) so that we can embed it into other objects. v2: Tweak error handling during selftest setup. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-2-chris@chris-wilson.co.uk
1 parent a21f453 commit 64d6c50

File tree

9 files changed

+518
-154
lines changed

9 files changed

+518
-154
lines changed

drivers/gpu/drm/i915/Makefile

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,9 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
5757
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
5858

5959
# GEM code
60-
i915-y += i915_cmd_parser.o \
60+
i915-y += \
61+
i915_active.o \
62+
i915_cmd_parser.o \
6163
i915_gem_batch_pool.o \
6264
i915_gem_clflush.o \
6365
i915_gem_context.o \

drivers/gpu/drm/i915/i915_active.c

Lines changed: 228 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,228 @@
1+
/*
2+
* SPDX-License-Identifier: MIT
3+
*
4+
* Copyright © 2019 Intel Corporation
5+
*/
6+
7+
#include "i915_drv.h"
8+
#include "i915_active.h"
9+
10+
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
11+
12+
struct active_node {
13+
struct i915_gem_active base;
14+
struct i915_active *ref;
15+
struct rb_node node;
16+
u64 timeline;
17+
};
18+
19+
static void
20+
__active_retire(struct i915_active *ref)
21+
{
22+
GEM_BUG_ON(!ref->count);
23+
if (!--ref->count)
24+
ref->retire(ref);
25+
}
26+
27+
static void
28+
node_retire(struct i915_gem_active *base, struct i915_request *rq)
29+
{
30+
__active_retire(container_of(base, struct active_node, base)->ref);
31+
}
32+
33+
static void
34+
last_retire(struct i915_gem_active *base, struct i915_request *rq)
35+
{
36+
__active_retire(container_of(base, struct i915_active, last));
37+
}
38+
39+
static struct i915_gem_active *
40+
active_instance(struct i915_active *ref, u64 idx)
41+
{
42+
struct active_node *node;
43+
struct rb_node **p, *parent;
44+
struct i915_request *old;
45+
46+
/*
47+
* We track the most recently used timeline to skip a rbtree search
48+
* for the common case, under typical loads we never need the rbtree
49+
* at all. We can reuse the last slot if it is empty, that is
50+
* after the previous activity has been retired, or if it matches the
51+
* current timeline.
52+
*
53+
* Note that we allow the timeline to be active simultaneously in
54+
* the rbtree and the last cache. We do this to avoid having
55+
* to search and replace the rbtree element for a new timeline, with
56+
* the cost being that we must be aware that the ref may be retired
57+
* twice for the same timeline (as the older rbtree element will be
58+
* retired before the new request added to last).
59+
*/
60+
old = i915_gem_active_raw(&ref->last, BKL(ref));
61+
if (!old || old->fence.context == idx)
62+
goto out;
63+
64+
/* Move the currently active fence into the rbtree */
65+
idx = old->fence.context;
66+
67+
parent = NULL;
68+
p = &ref->tree.rb_node;
69+
while (*p) {
70+
parent = *p;
71+
72+
node = rb_entry(parent, struct active_node, node);
73+
if (node->timeline == idx)
74+
goto replace;
75+
76+
if (node->timeline < idx)
77+
p = &parent->rb_right;
78+
else
79+
p = &parent->rb_left;
80+
}
81+
82+
node = kmalloc(sizeof(*node), GFP_KERNEL);
83+
84+
/* kmalloc may retire the ref->last (thanks shrinker)! */
85+
if (unlikely(!i915_gem_active_raw(&ref->last, BKL(ref)))) {
86+
kfree(node);
87+
goto out;
88+
}
89+
90+
if (unlikely(!node))
91+
return ERR_PTR(-ENOMEM);
92+
93+
init_request_active(&node->base, node_retire);
94+
node->ref = ref;
95+
node->timeline = idx;
96+
97+
rb_link_node(&node->node, parent, p);
98+
rb_insert_color(&node->node, &ref->tree);
99+
100+
replace:
101+
/*
102+
* Overwrite the previous active slot in the rbtree with last,
103+
* leaving last zeroed. If the previous slot is still active,
104+
* we must be careful as we now only expect to receive one retire
105+
* callback not two, and so much undo the active counting for the
106+
* overwritten slot.
107+
*/
108+
if (i915_gem_active_isset(&node->base)) {
109+
/* Retire ourselves from the old rq->active_list */
110+
__list_del_entry(&node->base.link);
111+
ref->count--;
112+
GEM_BUG_ON(!ref->count);
113+
}
114+
GEM_BUG_ON(list_empty(&ref->last.link));
115+
list_replace_init(&ref->last.link, &node->base.link);
116+
node->base.request = fetch_and_zero(&ref->last.request);
117+
118+
out:
119+
return &ref->last;
120+
}
121+
122+
void i915_active_init(struct drm_i915_private *i915,
123+
struct i915_active *ref,
124+
void (*retire)(struct i915_active *ref))
125+
{
126+
ref->i915 = i915;
127+
ref->retire = retire;
128+
ref->tree = RB_ROOT;
129+
init_request_active(&ref->last, last_retire);
130+
ref->count = 0;
131+
}
132+
133+
int i915_active_ref(struct i915_active *ref,
134+
u64 timeline,
135+
struct i915_request *rq)
136+
{
137+
struct i915_gem_active *active;
138+
139+
active = active_instance(ref, timeline);
140+
if (IS_ERR(active))
141+
return PTR_ERR(active);
142+
143+
if (!i915_gem_active_isset(active))
144+
ref->count++;
145+
i915_gem_active_set(active, rq);
146+
147+
GEM_BUG_ON(!ref->count);
148+
return 0;
149+
}
150+
151+
bool i915_active_acquire(struct i915_active *ref)
152+
{
153+
lockdep_assert_held(BKL(ref));
154+
return !ref->count++;
155+
}
156+
157+
void i915_active_release(struct i915_active *ref)
158+
{
159+
lockdep_assert_held(BKL(ref));
160+
__active_retire(ref);
161+
}
162+
163+
int i915_active_wait(struct i915_active *ref)
164+
{
165+
struct active_node *it, *n;
166+
int ret = 0;
167+
168+
if (i915_active_acquire(ref))
169+
goto out_release;
170+
171+
ret = i915_gem_active_retire(&ref->last, BKL(ref));
172+
if (ret)
173+
goto out_release;
174+
175+
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
176+
ret = i915_gem_active_retire(&it->base, BKL(ref));
177+
if (ret)
178+
break;
179+
}
180+
181+
out_release:
182+
i915_active_release(ref);
183+
return ret;
184+
}
185+
186+
static int __i915_request_await_active(struct i915_request *rq,
187+
struct i915_gem_active *active)
188+
{
189+
struct i915_request *barrier =
190+
i915_gem_active_raw(active, &rq->i915->drm.struct_mutex);
191+
192+
return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
193+
}
194+
195+
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
196+
{
197+
struct active_node *it, *n;
198+
int ret;
199+
200+
ret = __i915_request_await_active(rq, &ref->last);
201+
if (ret)
202+
return ret;
203+
204+
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
205+
ret = __i915_request_await_active(rq, &it->base);
206+
if (ret)
207+
return ret;
208+
}
209+
210+
return 0;
211+
}
212+
213+
void i915_active_fini(struct i915_active *ref)
214+
{
215+
struct active_node *it, *n;
216+
217+
GEM_BUG_ON(i915_gem_active_isset(&ref->last));
218+
219+
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
220+
GEM_BUG_ON(i915_gem_active_isset(&it->base));
221+
kfree(it);
222+
}
223+
ref->tree = RB_ROOT;
224+
}
225+
226+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
227+
#include "selftests/i915_active.c"
228+
#endif

drivers/gpu/drm/i915/i915_active.h

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* SPDX-License-Identifier: MIT
3+
*
4+
* Copyright © 2019 Intel Corporation
5+
*/
6+
7+
#ifndef _I915_ACTIVE_H_
8+
#define _I915_ACTIVE_H_
9+
10+
#include "i915_active_types.h"
11+
12+
/*
13+
* GPU activity tracking
14+
*
15+
* Each set of commands submitted to the GPU compromises a single request that
16+
* signals a fence upon completion. struct i915_request combines the
17+
* command submission, scheduling and fence signaling roles. If we want to see
18+
* if a particular task is complete, we need to grab the fence (struct
19+
* i915_request) for that task and check or wait for it to be signaled. More
20+
* often though we want to track the status of a bunch of tasks, for example
21+
* to wait for the GPU to finish accessing some memory across a variety of
22+
* different command pipelines from different clients. We could choose to
23+
* track every single request associated with the task, but knowing that
24+
* each request belongs to an ordered timeline (later requests within a
25+
* timeline must wait for earlier requests), we need only track the
26+
* latest request in each timeline to determine the overall status of the
27+
* task.
28+
*
29+
* struct i915_active provides this tracking across timelines. It builds a
30+
* composite shared-fence, and is updated as new work is submitted to the task,
31+
* forming a snapshot of the current status. It should be embedded into the
32+
* different resources that need to track their associated GPU activity to
33+
* provide a callback when that GPU activity has ceased, or otherwise to
34+
* provide a serialisation point either for request submission or for CPU
35+
* synchronisation.
36+
*/
37+
38+
void i915_active_init(struct drm_i915_private *i915,
39+
struct i915_active *ref,
40+
void (*retire)(struct i915_active *ref));
41+
42+
int i915_active_ref(struct i915_active *ref,
43+
u64 timeline,
44+
struct i915_request *rq);
45+
46+
int i915_active_wait(struct i915_active *ref);
47+
48+
int i915_request_await_active(struct i915_request *rq,
49+
struct i915_active *ref);
50+
51+
bool i915_active_acquire(struct i915_active *ref);
52+
53+
static inline void i915_active_cancel(struct i915_active *ref)
54+
{
55+
GEM_BUG_ON(ref->count != 1);
56+
ref->count = 0;
57+
}
58+
59+
void i915_active_release(struct i915_active *ref);
60+
61+
static inline bool
62+
i915_active_is_idle(const struct i915_active *ref)
63+
{
64+
return !ref->count;
65+
}
66+
67+
void i915_active_fini(struct i915_active *ref);
68+
69+
#endif /* _I915_ACTIVE_H_ */
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
/*
2+
* SPDX-License-Identifier: MIT
3+
*
4+
* Copyright © 2019 Intel Corporation
5+
*/
6+
7+
#ifndef _I915_ACTIVE_TYPES_H_
8+
#define _I915_ACTIVE_TYPES_H_
9+
10+
#include <linux/rbtree.h>
11+
12+
#include "i915_request.h"
13+
14+
struct drm_i915_private;
15+
16+
struct i915_active {
17+
struct drm_i915_private *i915;
18+
19+
struct rb_root tree;
20+
struct i915_gem_active last;
21+
unsigned int count;
22+
23+
void (*retire)(struct i915_active *ref);
24+
};
25+
26+
#endif /* _I915_ACTIVE_TYPES_H_ */

drivers/gpu/drm/i915/i915_gem_gtt.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1917,14 +1917,13 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
19171917
if (!vma)
19181918
return ERR_PTR(-ENOMEM);
19191919

1920+
i915_active_init(i915, &vma->active, NULL);
19201921
init_request_active(&vma->last_fence, NULL);
19211922

19221923
vma->vm = &ggtt->vm;
19231924
vma->ops = &pd_vma_ops;
19241925
vma->private = ppgtt;
19251926

1926-
vma->active = RB_ROOT;
1927-
19281927
vma->size = size;
19291928
vma->fence_size = size;
19301929
vma->flags = I915_VMA_GGTT;

0 commit comments

Comments
 (0)