@@ -353,15 +353,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
353
353
354
354
if (ret )
355
355
return ret ;
356
- }
357
356
358
- fobj = dma_resv_shared_list (resv );
359
- fence = dma_resv_excl_fence (resv );
357
+ fobj = NULL ;
358
+ } else {
359
+ fobj = dma_resv_shared_list (resv );
360
+ }
360
361
361
- if (fence ) {
362
+ /* Waiting for the exclusive fence first causes performance regressions
363
+ * under some circumstances. So manually wait for the shared ones first.
364
+ */
365
+ for (i = 0 ; i < (fobj ? fobj -> shared_count : 0 ) && !ret ; ++ i ) {
362
366
struct nouveau_channel * prev = NULL ;
363
367
bool must_wait = true;
364
368
369
+ fence = rcu_dereference_protected (fobj -> shared [i ],
370
+ dma_resv_held (resv ));
371
+
365
372
f = nouveau_local_fence (fence , chan -> drm );
366
373
if (f ) {
367
374
rcu_read_lock ();
@@ -373,20 +380,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
373
380
374
381
if (must_wait )
375
382
ret = dma_fence_wait (fence , intr );
376
-
377
- return ret ;
378
383
}
379
384
380
- if (!exclusive || !fobj )
381
- return ret ;
382
-
383
- for (i = 0 ; i < fobj -> shared_count && !ret ; ++ i ) {
385
+ fence = dma_resv_excl_fence (resv );
386
+ if (fence ) {
384
387
struct nouveau_channel * prev = NULL ;
385
388
bool must_wait = true;
386
389
387
- fence = rcu_dereference_protected (fobj -> shared [i ],
388
- dma_resv_held (resv ));
389
-
390
390
f = nouveau_local_fence (fence , chan -> drm );
391
391
if (f ) {
392
392
rcu_read_lock ();
@@ -398,6 +398,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
398
398
399
399
if (must_wait )
400
400
ret = dma_fence_wait (fence , intr );
401
+
402
+ return ret ;
401
403
}
402
404
403
405
return ret ;
0 commit comments