@@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
91
91
92
92
static void debug_active_activate (struct i915_active * ref )
93
93
{
94
- lockdep_assert_held (& ref -> mutex );
94
+ spin_lock_irq (& ref -> tree_lock );
95
95
if (!atomic_read (& ref -> count )) /* before the first inc */
96
96
debug_object_activate (ref , & active_debug_desc );
97
+ spin_unlock_irq (& ref -> tree_lock );
97
98
}
98
99
99
100
static void debug_active_deactivate (struct i915_active * ref )
100
101
{
101
- lockdep_assert_held (& ref -> mutex );
102
+ lockdep_assert_held (& ref -> tree_lock );
102
103
if (!atomic_read (& ref -> count )) /* after the last dec */
103
104
debug_object_deactivate (ref , & active_debug_desc );
104
105
}
@@ -128,36 +129,34 @@ __active_retire(struct i915_active *ref)
128
129
{
129
130
struct active_node * it , * n ;
130
131
struct rb_root root ;
131
- bool retire = false ;
132
+ unsigned long flags ;
132
133
133
- lockdep_assert_held (& ref -> mutex );
134
134
GEM_BUG_ON (i915_active_is_idle (ref ));
135
135
136
136
/* return the unused nodes to our slabcache -- flushing the allocator */
137
- if (atomic_dec_and_test (& ref -> count )) {
138
- debug_active_deactivate (ref );
139
- root = ref -> tree ;
140
- ref -> tree = RB_ROOT ;
141
- ref -> cache = NULL ;
142
- retire = true;
143
- }
144
-
145
- mutex_unlock (& ref -> mutex );
146
- if (!retire )
137
+ if (!atomic_dec_and_lock_irqsave (& ref -> count , & ref -> tree_lock , flags ))
147
138
return ;
148
139
149
140
GEM_BUG_ON (rcu_access_pointer (ref -> excl .fence ));
150
- rbtree_postorder_for_each_entry_safe (it , n , & root , node ) {
151
- GEM_BUG_ON (i915_active_fence_isset (& it -> base ));
152
- kmem_cache_free (global .slab_cache , it );
153
- }
141
+ debug_active_deactivate (ref );
142
+
143
+ root = ref -> tree ;
144
+ ref -> tree = RB_ROOT ;
145
+ ref -> cache = NULL ;
146
+
147
+ spin_unlock_irqrestore (& ref -> tree_lock , flags );
154
148
155
149
/* After the final retire, the entire struct may be freed */
156
150
if (ref -> retire )
157
151
ref -> retire (ref );
158
152
159
153
/* ... except if you wait on it, you must manage your own references! */
160
154
wake_up_var (ref );
155
+
156
+ rbtree_postorder_for_each_entry_safe (it , n , & root , node ) {
157
+ GEM_BUG_ON (i915_active_fence_isset (& it -> base ));
158
+ kmem_cache_free (global .slab_cache , it );
159
+ }
161
160
}
162
161
163
162
static void
@@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
169
168
if (atomic_add_unless (& ref -> count , -1 , 1 ))
170
169
return ;
171
170
172
- mutex_lock (& ref -> mutex );
173
171
__active_retire (ref );
174
172
}
175
173
@@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
180
178
if (atomic_add_unless (& ref -> count , -1 , 1 ))
181
179
return ;
182
180
183
- /* If we are inside interrupt context (fence signaling), defer */
184
- if (ref -> flags & I915_ACTIVE_RETIRE_SLEEPS ||
185
- !mutex_trylock (& ref -> mutex )) {
181
+ if (ref -> flags & I915_ACTIVE_RETIRE_SLEEPS ) {
186
182
queue_work (system_unbound_wq , & ref -> work );
187
183
return ;
188
184
}
@@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
227
223
if (!prealloc )
228
224
return NULL ;
229
225
230
- mutex_lock (& ref -> mutex );
226
+ spin_lock_irq (& ref -> tree_lock );
231
227
GEM_BUG_ON (i915_active_is_idle (ref ));
232
228
233
229
parent = NULL ;
@@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
257
253
258
254
out :
259
255
ref -> cache = node ;
260
- mutex_unlock (& ref -> mutex );
256
+ spin_unlock_irq (& ref -> tree_lock );
261
257
262
258
BUILD_BUG_ON (offsetof (typeof (* node ), base ));
263
259
return & node -> base ;
@@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
278
274
if (bits & I915_ACTIVE_MAY_SLEEP )
279
275
ref -> flags |= I915_ACTIVE_RETIRE_SLEEPS ;
280
276
277
+ spin_lock_init (& ref -> tree_lock );
281
278
ref -> tree = RB_ROOT ;
282
279
ref -> cache = NULL ;
280
+
283
281
init_llist_head (& ref -> preallocated_barriers );
284
282
atomic_set (& ref -> count , 0 );
285
283
__mutex_init (& ref -> mutex , "i915_active" , key );
@@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
510
508
if (RB_EMPTY_ROOT (& ref -> tree ))
511
509
return NULL ;
512
510
513
- mutex_lock (& ref -> mutex );
511
+ spin_lock_irq (& ref -> tree_lock );
514
512
GEM_BUG_ON (i915_active_is_idle (ref ));
515
513
516
514
/*
@@ -575,15 +573,15 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
575
573
goto match ;
576
574
}
577
575
578
- mutex_unlock (& ref -> mutex );
576
+ spin_unlock_irq (& ref -> tree_lock );
579
577
580
578
return NULL ;
581
579
582
580
match :
583
581
rb_erase (p , & ref -> tree ); /* Hide from waits and sibling allocations */
584
582
if (p == & ref -> cache -> node )
585
583
ref -> cache = NULL ;
586
- mutex_unlock (& ref -> mutex );
584
+ spin_unlock_irq (& ref -> tree_lock );
587
585
588
586
return rb_entry (p , struct active_node , node );
589
587
}
@@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
664
662
void i915_active_acquire_barrier (struct i915_active * ref )
665
663
{
666
664
struct llist_node * pos , * next ;
665
+ unsigned long flags ;
667
666
668
667
GEM_BUG_ON (i915_active_is_idle (ref ));
669
668
@@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
673
672
* populated by i915_request_add_active_barriers() to point to the
674
673
* request that will eventually release them.
675
674
*/
676
- mutex_lock_nested (& ref -> mutex , SINGLE_DEPTH_NESTING );
675
+ spin_lock_irqsave_nested (& ref -> tree_lock , flags , SINGLE_DEPTH_NESTING );
677
676
llist_for_each_safe (pos , next , take_preallocated_barriers (ref )) {
678
677
struct active_node * node = barrier_from_ll (pos );
679
678
struct intel_engine_cs * engine = barrier_to_engine (node );
@@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
699
698
llist_add (barrier_to_ll (node ), & engine -> barrier_tasks );
700
699
intel_engine_pm_put (engine );
701
700
}
702
- mutex_unlock (& ref -> mutex );
701
+ spin_unlock_irqrestore (& ref -> tree_lock , flags );
703
702
}
704
703
705
704
void i915_request_add_active_barriers (struct i915_request * rq )
0 commit comments