|
| 1 | +/* |
| 2 | + * SPDX-License-Identifier: MIT |
| 3 | + * |
| 4 | + * Copyright © 2019 Intel Corporation |
| 5 | + */ |
| 6 | + |
| 7 | +#include "i915_drv.h" |
| 8 | +#include "i915_active.h" |
| 9 | + |
| 10 | +#define BKL(ref) (&(ref)->i915->drm.struct_mutex) |
| 11 | + |
| 12 | +struct active_node { |
| 13 | + struct i915_gem_active base; |
| 14 | + struct i915_active *ref; |
| 15 | + struct rb_node node; |
| 16 | + u64 timeline; |
| 17 | +}; |
| 18 | + |
| 19 | +static void |
| 20 | +__active_retire(struct i915_active *ref) |
| 21 | +{ |
| 22 | + GEM_BUG_ON(!ref->count); |
| 23 | + if (!--ref->count) |
| 24 | + ref->retire(ref); |
| 25 | +} |
| 26 | + |
| 27 | +static void |
| 28 | +node_retire(struct i915_gem_active *base, struct i915_request *rq) |
| 29 | +{ |
| 30 | + __active_retire(container_of(base, struct active_node, base)->ref); |
| 31 | +} |
| 32 | + |
| 33 | +static void |
| 34 | +last_retire(struct i915_gem_active *base, struct i915_request *rq) |
| 35 | +{ |
| 36 | + __active_retire(container_of(base, struct i915_active, last)); |
| 37 | +} |
| 38 | + |
| 39 | +static struct i915_gem_active * |
| 40 | +active_instance(struct i915_active *ref, u64 idx) |
| 41 | +{ |
| 42 | + struct active_node *node; |
| 43 | + struct rb_node **p, *parent; |
| 44 | + struct i915_request *old; |
| 45 | + |
| 46 | + /* |
| 47 | + * We track the most recently used timeline to skip a rbtree search |
| 48 | + * for the common case, under typical loads we never need the rbtree |
| 49 | + * at all. We can reuse the last slot if it is empty, that is |
| 50 | + * after the previous activity has been retired, or if it matches the |
| 51 | + * current timeline. |
| 52 | + * |
| 53 | + * Note that we allow the timeline to be active simultaneously in |
| 54 | + * the rbtree and the last cache. We do this to avoid having |
| 55 | + * to search and replace the rbtree element for a new timeline, with |
| 56 | + * the cost being that we must be aware that the ref may be retired |
| 57 | + * twice for the same timeline (as the older rbtree element will be |
| 58 | + * retired before the new request added to last). |
| 59 | + */ |
| 60 | + old = i915_gem_active_raw(&ref->last, BKL(ref)); |
| 61 | + if (!old || old->fence.context == idx) |
| 62 | + goto out; |
| 63 | + |
| 64 | + /* Move the currently active fence into the rbtree */ |
| 65 | + idx = old->fence.context; |
| 66 | + |
| 67 | + parent = NULL; |
| 68 | + p = &ref->tree.rb_node; |
| 69 | + while (*p) { |
| 70 | + parent = *p; |
| 71 | + |
| 72 | + node = rb_entry(parent, struct active_node, node); |
| 73 | + if (node->timeline == idx) |
| 74 | + goto replace; |
| 75 | + |
| 76 | + if (node->timeline < idx) |
| 77 | + p = &parent->rb_right; |
| 78 | + else |
| 79 | + p = &parent->rb_left; |
| 80 | + } |
| 81 | + |
| 82 | + node = kmalloc(sizeof(*node), GFP_KERNEL); |
| 83 | + |
| 84 | + /* kmalloc may retire the ref->last (thanks shrinker)! */ |
| 85 | + if (unlikely(!i915_gem_active_raw(&ref->last, BKL(ref)))) { |
| 86 | + kfree(node); |
| 87 | + goto out; |
| 88 | + } |
| 89 | + |
| 90 | + if (unlikely(!node)) |
| 91 | + return ERR_PTR(-ENOMEM); |
| 92 | + |
| 93 | + init_request_active(&node->base, node_retire); |
| 94 | + node->ref = ref; |
| 95 | + node->timeline = idx; |
| 96 | + |
| 97 | + rb_link_node(&node->node, parent, p); |
| 98 | + rb_insert_color(&node->node, &ref->tree); |
| 99 | + |
| 100 | +replace: |
| 101 | + /* |
| 102 | + * Overwrite the previous active slot in the rbtree with last, |
| 103 | + * leaving last zeroed. If the previous slot is still active, |
| 104 | + * we must be careful as we now only expect to receive one retire |
| 105 | + * callback not two, and so much undo the active counting for the |
| 106 | + * overwritten slot. |
| 107 | + */ |
| 108 | + if (i915_gem_active_isset(&node->base)) { |
| 109 | + /* Retire ourselves from the old rq->active_list */ |
| 110 | + __list_del_entry(&node->base.link); |
| 111 | + ref->count--; |
| 112 | + GEM_BUG_ON(!ref->count); |
| 113 | + } |
| 114 | + GEM_BUG_ON(list_empty(&ref->last.link)); |
| 115 | + list_replace_init(&ref->last.link, &node->base.link); |
| 116 | + node->base.request = fetch_and_zero(&ref->last.request); |
| 117 | + |
| 118 | +out: |
| 119 | + return &ref->last; |
| 120 | +} |
| 121 | + |
| 122 | +void i915_active_init(struct drm_i915_private *i915, |
| 123 | + struct i915_active *ref, |
| 124 | + void (*retire)(struct i915_active *ref)) |
| 125 | +{ |
| 126 | + ref->i915 = i915; |
| 127 | + ref->retire = retire; |
| 128 | + ref->tree = RB_ROOT; |
| 129 | + init_request_active(&ref->last, last_retire); |
| 130 | + ref->count = 0; |
| 131 | +} |
| 132 | + |
| 133 | +int i915_active_ref(struct i915_active *ref, |
| 134 | + u64 timeline, |
| 135 | + struct i915_request *rq) |
| 136 | +{ |
| 137 | + struct i915_gem_active *active; |
| 138 | + |
| 139 | + active = active_instance(ref, timeline); |
| 140 | + if (IS_ERR(active)) |
| 141 | + return PTR_ERR(active); |
| 142 | + |
| 143 | + if (!i915_gem_active_isset(active)) |
| 144 | + ref->count++; |
| 145 | + i915_gem_active_set(active, rq); |
| 146 | + |
| 147 | + GEM_BUG_ON(!ref->count); |
| 148 | + return 0; |
| 149 | +} |
| 150 | + |
| 151 | +bool i915_active_acquire(struct i915_active *ref) |
| 152 | +{ |
| 153 | + lockdep_assert_held(BKL(ref)); |
| 154 | + return !ref->count++; |
| 155 | +} |
| 156 | + |
| 157 | +void i915_active_release(struct i915_active *ref) |
| 158 | +{ |
| 159 | + lockdep_assert_held(BKL(ref)); |
| 160 | + __active_retire(ref); |
| 161 | +} |
| 162 | + |
| 163 | +int i915_active_wait(struct i915_active *ref) |
| 164 | +{ |
| 165 | + struct active_node *it, *n; |
| 166 | + int ret = 0; |
| 167 | + |
| 168 | + if (i915_active_acquire(ref)) |
| 169 | + goto out_release; |
| 170 | + |
| 171 | + ret = i915_gem_active_retire(&ref->last, BKL(ref)); |
| 172 | + if (ret) |
| 173 | + goto out_release; |
| 174 | + |
| 175 | + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
| 176 | + ret = i915_gem_active_retire(&it->base, BKL(ref)); |
| 177 | + if (ret) |
| 178 | + break; |
| 179 | + } |
| 180 | + |
| 181 | +out_release: |
| 182 | + i915_active_release(ref); |
| 183 | + return ret; |
| 184 | +} |
| 185 | + |
| 186 | +static int __i915_request_await_active(struct i915_request *rq, |
| 187 | + struct i915_gem_active *active) |
| 188 | +{ |
| 189 | + struct i915_request *barrier = |
| 190 | + i915_gem_active_raw(active, &rq->i915->drm.struct_mutex); |
| 191 | + |
| 192 | + return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0; |
| 193 | +} |
| 194 | + |
| 195 | +int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) |
| 196 | +{ |
| 197 | + struct active_node *it, *n; |
| 198 | + int ret; |
| 199 | + |
| 200 | + ret = __i915_request_await_active(rq, &ref->last); |
| 201 | + if (ret) |
| 202 | + return ret; |
| 203 | + |
| 204 | + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
| 205 | + ret = __i915_request_await_active(rq, &it->base); |
| 206 | + if (ret) |
| 207 | + return ret; |
| 208 | + } |
| 209 | + |
| 210 | + return 0; |
| 211 | +} |
| 212 | + |
| 213 | +void i915_active_fini(struct i915_active *ref) |
| 214 | +{ |
| 215 | + struct active_node *it, *n; |
| 216 | + |
| 217 | + GEM_BUG_ON(i915_gem_active_isset(&ref->last)); |
| 218 | + |
| 219 | + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
| 220 | + GEM_BUG_ON(i915_gem_active_isset(&it->base)); |
| 221 | + kfree(it); |
| 222 | + } |
| 223 | + ref->tree = RB_ROOT; |
| 224 | +} |
| 225 | + |
| 226 | +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 227 | +#include "selftests/i915_active.c" |
| 228 | +#endif |
0 commit comments