|
38 | 38 | #include "amdgpu_gem.h"
|
39 | 39 | #include <drm/amdgpu_drm.h>
|
40 | 40 | #include <linux/dma-buf.h>
|
| 41 | +#include <linux/dma-fence-array.h> |
41 | 42 |
|
42 | 43 | /**
|
43 | 44 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
|
@@ -187,6 +188,48 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
187 | 188 | return ERR_PTR(ret);
|
188 | 189 | }
|
189 | 190 |
|
| 191 | +static int |
| 192 | +__reservation_object_make_exclusive(struct reservation_object *obj) |
| 193 | +{ |
| 194 | + struct dma_fence **fences; |
| 195 | + unsigned int count; |
| 196 | + int r; |
| 197 | + |
| 198 | + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ |
| 199 | + return 0; |
| 200 | + |
| 201 | + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); |
| 202 | + if (r) |
| 203 | + return r; |
| 204 | + |
| 205 | + if (count == 0) { |
| 206 | + /* Now that was unexpected. */ |
| 207 | + } else if (count == 1) { |
| 208 | + reservation_object_add_excl_fence(obj, fences[0]); |
| 209 | + dma_fence_put(fences[0]); |
| 210 | + kfree(fences); |
| 211 | + } else { |
| 212 | + struct dma_fence_array *array; |
| 213 | + |
| 214 | + array = dma_fence_array_create(count, fences, |
| 215 | + dma_fence_context_alloc(1), 0, |
| 216 | + false); |
| 217 | + if (!array) |
| 218 | + goto err_fences_put; |
| 219 | + |
| 220 | + reservation_object_add_excl_fence(obj, &array->base); |
| 221 | + dma_fence_put(&array->base); |
| 222 | + } |
| 223 | + |
| 224 | + return 0; |
| 225 | + |
| 226 | +err_fences_put: |
| 227 | + while (count--) |
| 228 | + dma_fence_put(fences[count]); |
| 229 | + kfree(fences); |
| 230 | + return -ENOMEM; |
| 231 | +} |
| 232 | + |
190 | 233 | /**
|
191 | 234 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
|
192 | 235 | * @dma_buf: Shared DMA buffer
|
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
218 | 261 |
|
219 | 262 | if (attach->dev->driver != adev->dev->driver) {
|
220 | 263 | /*
|
221 |
| - * Wait for all shared fences to complete before we switch to future |
222 |
| - * use of exclusive fence on this prime shared bo. |
| 264 | + * We only create shared fences for internal use, but importers |
| 265 | + * of the dmabuf rely on exclusive fences for implicitly |
| 266 | + * tracking write hazards. As any of the current fences may |
| 267 | + * correspond to a write, we need to convert all existing |
| 268 | + * fences on the reservation object into a single exclusive |
| 269 | + * fence. |
223 | 270 | */
|
224 |
| - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, |
225 |
| - true, false, |
226 |
| - MAX_SCHEDULE_TIMEOUT); |
227 |
| - if (unlikely(r < 0)) { |
228 |
| - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); |
| 271 | + r = __reservation_object_make_exclusive(bo->tbo.resv); |
| 272 | + if (r) |
229 | 273 | goto error_unreserve;
|
230 |
| - } |
231 | 274 | }
|
232 | 275 |
|
233 | 276 | /* pin buffer into GTT */
|
|
0 commit comments