File tree Expand file tree Collapse file tree 1 file changed +24
-3
lines changed Expand file tree Collapse file tree 1 file changed +24
-3
lines changed Original file line number Diff line number Diff line change @@ -281,12 +281,33 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
281
281
if (ret )
282
282
return ret ;
283
283
284
- /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
285
- ret = i915_gem_object_userptr_unbind (obj , false);
284
+ /* optimistically try to preserve current pages while unlocked */
285
+ if (i915_gem_object_has_pages (obj ) &&
286
+ !mmu_interval_check_retry (& obj -> userptr .notifier ,
287
+ obj -> userptr .notifier_seq )) {
288
+ spin_lock (& i915 -> mm .notifier_lock );
289
+ if (obj -> userptr .pvec &&
290
+ !mmu_interval_read_retry (& obj -> userptr .notifier ,
291
+ obj -> userptr .notifier_seq )) {
292
+ obj -> userptr .page_ref ++ ;
293
+
294
+ /* We can keep using the current binding, this is the fastpath */
295
+ ret = 1 ;
296
+ }
297
+ spin_unlock (& i915 -> mm .notifier_lock );
298
+ }
299
+
300
+ if (!ret ) {
301
+ /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
302
+ ret = i915_gem_object_userptr_unbind (obj , false);
303
+ }
286
304
i915_gem_object_unlock (obj );
287
- if (ret )
305
+ if (ret < 0 )
288
306
return ret ;
289
307
308
+ if (ret > 0 )
309
+ return 0 ;
310
+
290
311
notifier_seq = mmu_interval_read_begin (& obj -> userptr .notifier );
291
312
292
313
pvec = kvmalloc_array (num_pages , sizeof (struct page * ), GFP_KERNEL );
You can’t perform that action at this time.
0 commit comments