Skip to content

Commit 093b922

Browse files
icklejlahtine-intel
authored andcommitted
drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree
As we want to be able to run inside atomic context for retiring the i915_active, and we are no longer allowed to abuse mutex_trylock, split the tree management portion of i915_active.mutex into an irq-safe spinlock. References: a0855d2 ("locking/mutex: Complain upon mutex API misuse in IRQ contexts") References: https://bugs.freedesktop.org/show_bug.cgi?id=111626 Fixes: 274cbf2 ("drm/i915: Push the i915_active.retire into a worker") Signed-off-by: Chris Wilson <[email protected]> Cc: Tvrtko Ursulin <[email protected]> Cc: Matthew Auld <[email protected]> Reviewed-by: Tvrtko Ursulin <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] (cherry picked from commit c9ad602) Signed-off-by: Joonas Lahtinen <[email protected]>
1 parent fa039b9 commit 093b922

File tree

2 files changed

+29
-29
lines changed

2 files changed

+29
-29
lines changed

drivers/gpu/drm/i915/i915_active.c

Lines changed: 28 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
9191

9292
static void debug_active_activate(struct i915_active *ref)
9393
{
94-
lockdep_assert_held(&ref->mutex);
94+
spin_lock_irq(&ref->tree_lock);
9595
if (!atomic_read(&ref->count)) /* before the first inc */
9696
debug_object_activate(ref, &active_debug_desc);
97+
spin_unlock_irq(&ref->tree_lock);
9798
}
9899

99100
static void debug_active_deactivate(struct i915_active *ref)
100101
{
101-
lockdep_assert_held(&ref->mutex);
102+
lockdep_assert_held(&ref->tree_lock);
102103
if (!atomic_read(&ref->count)) /* after the last dec */
103104
debug_object_deactivate(ref, &active_debug_desc);
104105
}
@@ -128,36 +129,34 @@ __active_retire(struct i915_active *ref)
128129
{
129130
struct active_node *it, *n;
130131
struct rb_root root;
131-
bool retire = false;
132+
unsigned long flags;
132133

133-
lockdep_assert_held(&ref->mutex);
134134
GEM_BUG_ON(i915_active_is_idle(ref));
135135

136136
/* return the unused nodes to our slabcache -- flushing the allocator */
137-
if (atomic_dec_and_test(&ref->count)) {
138-
debug_active_deactivate(ref);
139-
root = ref->tree;
140-
ref->tree = RB_ROOT;
141-
ref->cache = NULL;
142-
retire = true;
143-
}
144-
145-
mutex_unlock(&ref->mutex);
146-
if (!retire)
137+
if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
147138
return;
148139

149140
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
150-
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
151-
GEM_BUG_ON(i915_active_fence_isset(&it->base));
152-
kmem_cache_free(global.slab_cache, it);
153-
}
141+
debug_active_deactivate(ref);
142+
143+
root = ref->tree;
144+
ref->tree = RB_ROOT;
145+
ref->cache = NULL;
146+
147+
spin_unlock_irqrestore(&ref->tree_lock, flags);
154148

155149
/* After the final retire, the entire struct may be freed */
156150
if (ref->retire)
157151
ref->retire(ref);
158152

159153
/* ... except if you wait on it, you must manage your own references! */
160154
wake_up_var(ref);
155+
156+
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
157+
GEM_BUG_ON(i915_active_fence_isset(&it->base));
158+
kmem_cache_free(global.slab_cache, it);
159+
}
161160
}
162161

163162
static void
@@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
169168
if (atomic_add_unless(&ref->count, -1, 1))
170169
return;
171170

172-
mutex_lock(&ref->mutex);
173171
__active_retire(ref);
174172
}
175173

@@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
180178
if (atomic_add_unless(&ref->count, -1, 1))
181179
return;
182180

183-
/* If we are inside interrupt context (fence signaling), defer */
184-
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
185-
!mutex_trylock(&ref->mutex)) {
181+
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
186182
queue_work(system_unbound_wq, &ref->work);
187183
return;
188184
}
@@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
227223
if (!prealloc)
228224
return NULL;
229225

230-
mutex_lock(&ref->mutex);
226+
spin_lock_irq(&ref->tree_lock);
231227
GEM_BUG_ON(i915_active_is_idle(ref));
232228

233229
parent = NULL;
@@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
257253

258254
out:
259255
ref->cache = node;
260-
mutex_unlock(&ref->mutex);
256+
spin_unlock_irq(&ref->tree_lock);
261257

262258
BUILD_BUG_ON(offsetof(typeof(*node), base));
263259
return &node->base;
@@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
278274
if (bits & I915_ACTIVE_MAY_SLEEP)
279275
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
280276

277+
spin_lock_init(&ref->tree_lock);
281278
ref->tree = RB_ROOT;
282279
ref->cache = NULL;
280+
283281
init_llist_head(&ref->preallocated_barriers);
284282
atomic_set(&ref->count, 0);
285283
__mutex_init(&ref->mutex, "i915_active", key);
@@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
510508
if (RB_EMPTY_ROOT(&ref->tree))
511509
return NULL;
512510

513-
mutex_lock(&ref->mutex);
511+
spin_lock_irq(&ref->tree_lock);
514512
GEM_BUG_ON(i915_active_is_idle(ref));
515513

516514
/*
@@ -575,15 +573,15 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
575573
goto match;
576574
}
577575

578-
mutex_unlock(&ref->mutex);
576+
spin_unlock_irq(&ref->tree_lock);
579577

580578
return NULL;
581579

582580
match:
583581
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
584582
if (p == &ref->cache->node)
585583
ref->cache = NULL;
586-
mutex_unlock(&ref->mutex);
584+
spin_unlock_irq(&ref->tree_lock);
587585

588586
return rb_entry(p, struct active_node, node);
589587
}
@@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
664662
void i915_active_acquire_barrier(struct i915_active *ref)
665663
{
666664
struct llist_node *pos, *next;
665+
unsigned long flags;
667666

668667
GEM_BUG_ON(i915_active_is_idle(ref));
669668

@@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
673672
* populated by i915_request_add_active_barriers() to point to the
674673
* request that will eventually release them.
675674
*/
676-
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
675+
spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
677676
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
678677
struct active_node *node = barrier_from_ll(pos);
679678
struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
699698
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
700699
intel_engine_pm_put(engine);
701700
}
702-
mutex_unlock(&ref->mutex);
701+
spin_unlock_irqrestore(&ref->tree_lock, flags);
703702
}
704703

705704
void i915_request_add_active_barriers(struct i915_request *rq)

drivers/gpu/drm/i915/i915_active_types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ struct i915_active {
4848
atomic_t count;
4949
struct mutex mutex;
5050

51+
spinlock_t tree_lock;
5152
struct active_node *cache;
5253
struct rb_root tree;
5354

0 commit comments

Comments
 (0)