@@ -33,6 +33,16 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
33
33
(bool )bo -> base .base .import_attach );
34
34
}
35
35
36
+ static inline int ivpu_bo_lock (struct ivpu_bo * bo )
37
+ {
38
+ return dma_resv_lock (bo -> base .base .resv , NULL );
39
+ }
40
+
41
+ static inline void ivpu_bo_unlock (struct ivpu_bo * bo )
42
+ {
43
+ dma_resv_unlock (bo -> base .base .resv );
44
+ }
45
+
36
46
/*
37
47
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
38
48
*
@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
43
53
int __must_check ivpu_bo_pin (struct ivpu_bo * bo )
44
54
{
45
55
struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
56
+ struct sg_table * sgt ;
46
57
int ret = 0 ;
47
58
48
- mutex_lock (& bo -> lock );
49
-
50
59
ivpu_dbg_bo (vdev , bo , "pin" );
51
- drm_WARN_ON (& vdev -> drm , !bo -> ctx );
52
60
53
- if (!bo -> mmu_mapped ) {
54
- struct sg_table * sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
61
+ sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
62
+ if (IS_ERR (sgt )) {
63
+ ret = PTR_ERR (sgt );
64
+ ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
65
+ return ret ;
66
+ }
55
67
56
- if (IS_ERR (sgt )) {
57
- ret = PTR_ERR (sgt );
58
- ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
59
- goto unlock ;
60
- }
68
+ ivpu_bo_lock (bo );
61
69
70
+ if (!bo -> mmu_mapped ) {
71
+ drm_WARN_ON (& vdev -> drm , !bo -> ctx );
62
72
ret = ivpu_mmu_context_map_sgt (vdev , bo -> ctx , bo -> vpu_addr , sgt ,
63
73
ivpu_bo_is_snooped (bo ));
64
74
if (ret ) {
@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
69
79
}
70
80
71
81
unlock :
72
- mutex_unlock ( & bo -> lock );
82
+ ivpu_bo_unlock ( bo );
73
83
74
84
return ret ;
75
85
}
@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
84
94
if (!drm_dev_enter (& vdev -> drm , & idx ))
85
95
return - ENODEV ;
86
96
87
- mutex_lock ( & bo -> lock );
97
+ ivpu_bo_lock ( bo );
88
98
89
99
ret = ivpu_mmu_context_insert_node (ctx , range , ivpu_bo_size (bo ), & bo -> mm_node );
90
100
if (!ret ) {
@@ -94,7 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
94
104
ivpu_err (vdev , "Failed to add BO to context %u: %d\n" , ctx -> id , ret );
95
105
}
96
106
97
- mutex_unlock ( & bo -> lock );
107
+ ivpu_bo_unlock ( bo );
98
108
99
109
drm_dev_exit (idx );
100
110
@@ -105,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
105
115
{
106
116
struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
107
117
108
- lockdep_assert (lockdep_is_held ( & bo -> lock ) || !kref_read (& bo -> base .base .refcount ));
118
+ lockdep_assert (dma_resv_held ( bo -> base . base . resv ) || !kref_read (& bo -> base .base .refcount ));
109
119
110
120
if (bo -> mmu_mapped ) {
111
121
drm_WARN_ON (& vdev -> drm , !bo -> ctx );
@@ -123,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
123
133
if (bo -> base .base .import_attach )
124
134
return ;
125
135
126
- dma_resv_lock (bo -> base .base .resv , NULL );
127
136
if (bo -> base .sgt ) {
128
137
dma_unmap_sgtable (vdev -> drm .dev , bo -> base .sgt , DMA_BIDIRECTIONAL , 0 );
129
138
sg_free_table (bo -> base .sgt );
130
139
kfree (bo -> base .sgt );
131
140
bo -> base .sgt = NULL ;
132
141
}
133
- dma_resv_unlock (bo -> base .base .resv );
134
142
}
135
143
136
144
void ivpu_bo_unbind_all_bos_from_context (struct ivpu_device * vdev , struct ivpu_mmu_context * ctx )
@@ -142,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
142
150
143
151
mutex_lock (& vdev -> bo_list_lock );
144
152
list_for_each_entry (bo , & vdev -> bo_list , bo_list_node ) {
145
- mutex_lock ( & bo -> lock );
153
+ ivpu_bo_lock ( bo );
146
154
if (bo -> ctx == ctx ) {
147
155
ivpu_dbg_bo (vdev , bo , "unbind" );
148
156
ivpu_bo_unbind_locked (bo );
149
157
}
150
- mutex_unlock ( & bo -> lock );
158
+ ivpu_bo_unlock ( bo );
151
159
}
152
160
mutex_unlock (& vdev -> bo_list_lock );
153
161
}
@@ -167,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
167
175
bo -> base .pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
168
176
169
177
INIT_LIST_HEAD (& bo -> bo_list_node );
170
- mutex_init (& bo -> lock );
171
178
172
179
return & bo -> base .base ;
173
180
}
@@ -278,16 +285,15 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
278
285
list_del (& bo -> bo_list_node );
279
286
mutex_unlock (& vdev -> bo_list_lock );
280
287
281
- drm_WARN_ON (& vdev -> drm , !dma_resv_test_signaled (obj -> resv , DMA_RESV_USAGE_READ ));
288
+ drm_WARN_ON (& vdev -> drm , !drm_gem_is_imported (& bo -> base .base ) &&
289
+ !dma_resv_test_signaled (obj -> resv , DMA_RESV_USAGE_READ ));
282
290
drm_WARN_ON (& vdev -> drm , ivpu_bo_size (bo ) == 0 );
283
291
drm_WARN_ON (& vdev -> drm , bo -> base .vaddr );
284
292
285
293
ivpu_bo_unbind_locked (bo );
286
294
drm_WARN_ON (& vdev -> drm , bo -> mmu_mapped );
287
295
drm_WARN_ON (& vdev -> drm , bo -> ctx );
288
296
289
- mutex_destroy (& bo -> lock );
290
-
291
297
drm_WARN_ON (obj -> dev , bo -> base .pages_use_count > 1 );
292
298
drm_gem_shmem_free (& bo -> base );
293
299
}
@@ -370,9 +376,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
370
376
goto err_put ;
371
377
372
378
if (flags & DRM_IVPU_BO_MAPPABLE ) {
373
- dma_resv_lock (bo -> base . base . resv , NULL );
379
+ ivpu_bo_lock (bo );
374
380
ret = drm_gem_shmem_vmap (& bo -> base , & map );
375
- dma_resv_unlock (bo -> base . base . resv );
381
+ ivpu_bo_unlock (bo );
376
382
377
383
if (ret )
378
384
goto err_put ;
@@ -395,9 +401,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
395
401
struct iosys_map map = IOSYS_MAP_INIT_VADDR (bo -> base .vaddr );
396
402
397
403
if (bo -> flags & DRM_IVPU_BO_MAPPABLE ) {
398
- dma_resv_lock (bo -> base . base . resv , NULL );
404
+ ivpu_bo_lock (bo );
399
405
drm_gem_shmem_vunmap (& bo -> base , & map );
400
- dma_resv_unlock (bo -> base . base . resv );
406
+ ivpu_bo_unlock (bo );
401
407
}
402
408
403
409
drm_gem_object_put (& bo -> base .base );
@@ -416,12 +422,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
416
422
417
423
bo = to_ivpu_bo (obj );
418
424
419
- mutex_lock ( & bo -> lock );
425
+ ivpu_bo_lock ( bo );
420
426
args -> flags = bo -> flags ;
421
427
args -> mmap_offset = drm_vma_node_offset_addr (& obj -> vma_node );
422
428
args -> vpu_addr = bo -> vpu_addr ;
423
429
args -> size = obj -> size ;
424
- mutex_unlock ( & bo -> lock );
430
+ ivpu_bo_unlock ( bo );
425
431
426
432
drm_gem_object_put (obj );
427
433
return ret ;
@@ -458,7 +464,7 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
458
464
459
465
static void ivpu_bo_print_info (struct ivpu_bo * bo , struct drm_printer * p )
460
466
{
461
- mutex_lock ( & bo -> lock );
467
+ ivpu_bo_lock ( bo );
462
468
463
469
drm_printf (p , "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u" ,
464
470
bo , bo -> ctx_id , bo -> vpu_addr , bo -> base .base .size ,
@@ -475,7 +481,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
475
481
476
482
drm_printf (p , "\n" );
477
483
478
- mutex_unlock ( & bo -> lock );
484
+ ivpu_bo_unlock ( bo );
479
485
}
480
486
481
487
void ivpu_bo_list (struct drm_device * dev , struct drm_printer * p )
0 commit comments