Skip to content

Commit 33654ef

Browse files
ChristianKoenigAMDThomas Hellström
authored andcommitted
drm/i915: remove questionable fence optimization during copy
First of all as discussed multiple times now kernel copies *must* always wait for all fences in a BO before actually doing the copy. This is mandatory. Additional to that drop the handling when there can't be a shared slot allocated on the source BO and just properly return an error code. Otherwise this code path would only be tested under out of memory conditions. Signed-off-by: Christian König <[email protected]> Reviewed-by: Thomas Hellström <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] Signed-off-by: Thomas Hellström <[email protected]>
1 parent 1193081 commit 33654ef

File tree

1 file changed

+14
-29
lines changed

1 file changed

+14
-29
lines changed

drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c

Lines changed: 14 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -195,19 +195,14 @@ static int i915_deps_add_dependency(struct i915_deps *deps,
195195
}
196196

197197
static int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
198-
bool all, const bool no_excl,
199198
const struct ttm_operation_ctx *ctx)
200199
{
201200
struct dma_resv_iter iter;
202201
struct dma_fence *fence;
202+
int ret;
203203

204204
dma_resv_assert_held(resv);
205-
dma_resv_for_each_fence(&iter, resv, all, fence) {
206-
int ret;
207-
208-
if (no_excl && dma_resv_iter_is_exclusive(&iter))
209-
continue;
210-
205+
dma_resv_for_each_fence(&iter, resv, true, fence) {
211206
ret = i915_deps_add_dependency(deps, fence, ctx);
212207
if (ret)
213208
return ret;
@@ -634,11 +629,7 @@ prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
634629

635630
ret = i915_deps_add_dependency(deps, bo->moving, ctx);
636631
if (!ret)
637-
/*
638-
* TODO: Only await excl fence here, and shared fences before
639-
* signaling the migration fence.
640-
*/
641-
ret = i915_deps_add_resv(deps, bo->base.resv, true, false, ctx);
632+
ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
642633

643634
return ret;
644635
}
@@ -768,22 +759,21 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
768759
struct i915_refct_sgt *dst_rsgt;
769760
struct dma_fence *copy_fence;
770761
struct i915_deps deps;
771-
int ret, shared_err;
762+
int ret;
772763

773764
assert_object_held(dst);
774765
assert_object_held(src);
775766
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
776767

777-
/*
778-
* We plan to add a shared fence only for the source. If that
779-
* fails, we await all source fences before commencing
780-
* the copy instead of only the exclusive.
781-
*/
782-
shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1);
783-
ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx);
784-
if (!ret)
785-
ret = i915_deps_add_resv(&deps, src_bo->base.resv,
786-
!!shared_err, false, &ctx);
768+
ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
769+
if (ret)
770+
return ret;
771+
772+
ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx);
773+
if (ret)
774+
return ret;
775+
776+
ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx);
787777
if (ret)
788778
return ret;
789779

@@ -798,12 +788,7 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
798788
return PTR_ERR_OR_ZERO(copy_fence);
799789

800790
dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
801-
802-
/* If we failed to reserve a shared slot, add an exclusive fence */
803-
if (shared_err)
804-
dma_resv_add_excl_fence(src_bo->base.resv, copy_fence);
805-
else
806-
dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
791+
dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
807792

808793
dma_fence_put(copy_fence);
809794

0 commit comments

Comments
 (0)