@@ -33,6 +33,16 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
3333 (bool )bo -> base .base .import_attach );
3434}
3535
36+ static inline int ivpu_bo_lock (struct ivpu_bo * bo )
37+ {
38+ return dma_resv_lock (bo -> base .base .resv , NULL );
39+ }
40+
41+ static inline void ivpu_bo_unlock (struct ivpu_bo * bo )
42+ {
43+ dma_resv_unlock (bo -> base .base .resv );
44+ }
45+
3646/*
3747 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
3848 *
@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
4353int __must_check ivpu_bo_pin (struct ivpu_bo * bo )
4454{
4555 struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
56+ struct sg_table * sgt ;
4657 int ret = 0 ;
4758
48- mutex_lock (& bo -> lock );
49-
5059 ivpu_dbg_bo (vdev , bo , "pin" );
51- drm_WARN_ON (& vdev -> drm , !bo -> ctx );
5260
53- if (!bo -> mmu_mapped ) {
54- struct sg_table * sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
61+ sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
62+ if (IS_ERR (sgt )) {
63+ ret = PTR_ERR (sgt );
64+ ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
65+ return ret ;
66+ }
5567
56- if (IS_ERR (sgt )) {
57- ret = PTR_ERR (sgt );
58- ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
59- goto unlock ;
60- }
68+ ivpu_bo_lock (bo );
6169
70+ if (!bo -> mmu_mapped ) {
71+ drm_WARN_ON (& vdev -> drm , !bo -> ctx );
6272 ret = ivpu_mmu_context_map_sgt (vdev , bo -> ctx , bo -> vpu_addr , sgt ,
6373 ivpu_bo_is_snooped (bo ));
6474 if (ret ) {
@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
6979 }
7080
7181unlock :
72- mutex_unlock ( & bo -> lock );
82+ ivpu_bo_unlock ( bo );
7383
7484 return ret ;
7585}
@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
8494 if (!drm_dev_enter (& vdev -> drm , & idx ))
8595 return - ENODEV ;
8696
87- mutex_lock ( & bo -> lock );
97+ ivpu_bo_lock ( bo );
8898
8999 ret = ivpu_mmu_context_insert_node (ctx , range , ivpu_bo_size (bo ), & bo -> mm_node );
90100 if (!ret ) {
@@ -94,7 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
94104 ivpu_err (vdev , "Failed to add BO to context %u: %d\n" , ctx -> id , ret );
95105 }
96106
97- mutex_unlock ( & bo -> lock );
107+ ivpu_bo_unlock ( bo );
98108
99109 drm_dev_exit (idx );
100110
@@ -105,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
105115{
106116 struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
107117
108- lockdep_assert (lockdep_is_held ( & bo -> lock ) || !kref_read (& bo -> base .base .refcount ));
118+ lockdep_assert (dma_resv_held ( bo -> base . base . resv ) || !kref_read (& bo -> base .base .refcount ));
109119
110120 if (bo -> mmu_mapped ) {
111121 drm_WARN_ON (& vdev -> drm , !bo -> ctx );
@@ -123,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
123133 if (bo -> base .base .import_attach )
124134 return ;
125135
126- dma_resv_lock (bo -> base .base .resv , NULL );
127136 if (bo -> base .sgt ) {
128137 dma_unmap_sgtable (vdev -> drm .dev , bo -> base .sgt , DMA_BIDIRECTIONAL , 0 );
129138 sg_free_table (bo -> base .sgt );
130139 kfree (bo -> base .sgt );
131140 bo -> base .sgt = NULL ;
132141 }
133- dma_resv_unlock (bo -> base .base .resv );
134142}
135143
136144void ivpu_bo_unbind_all_bos_from_context (struct ivpu_device * vdev , struct ivpu_mmu_context * ctx )
@@ -142,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
142150
143151 mutex_lock (& vdev -> bo_list_lock );
144152 list_for_each_entry (bo , & vdev -> bo_list , bo_list_node ) {
145- mutex_lock ( & bo -> lock );
153+ ivpu_bo_lock ( bo );
146154 if (bo -> ctx == ctx ) {
147155 ivpu_dbg_bo (vdev , bo , "unbind" );
148156 ivpu_bo_unbind_locked (bo );
149157 }
150- mutex_unlock ( & bo -> lock );
158+ ivpu_bo_unlock ( bo );
151159 }
152160 mutex_unlock (& vdev -> bo_list_lock );
153161}
@@ -167,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
167175 bo -> base .pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
168176
169177 INIT_LIST_HEAD (& bo -> bo_list_node );
170- mutex_init (& bo -> lock );
171178
172179 return & bo -> base .base ;
173180}
@@ -278,16 +285,15 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
278285 list_del (& bo -> bo_list_node );
279286 mutex_unlock (& vdev -> bo_list_lock );
280287
281- drm_WARN_ON (& vdev -> drm , !dma_resv_test_signaled (obj -> resv , DMA_RESV_USAGE_READ ));
288+ drm_WARN_ON (& vdev -> drm , !drm_gem_is_imported (& bo -> base .base ) &&
289+ !dma_resv_test_signaled (obj -> resv , DMA_RESV_USAGE_READ ));
282290 drm_WARN_ON (& vdev -> drm , ivpu_bo_size (bo ) == 0 );
283291 drm_WARN_ON (& vdev -> drm , bo -> base .vaddr );
284292
285293 ivpu_bo_unbind_locked (bo );
286294 drm_WARN_ON (& vdev -> drm , bo -> mmu_mapped );
287295 drm_WARN_ON (& vdev -> drm , bo -> ctx );
288296
289- mutex_destroy (& bo -> lock );
290-
291297 drm_WARN_ON (obj -> dev , bo -> base .pages_use_count > 1 );
292298 drm_gem_shmem_free (& bo -> base );
293299}
@@ -370,9 +376,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
370376 goto err_put ;
371377
372378 if (flags & DRM_IVPU_BO_MAPPABLE ) {
373- dma_resv_lock (bo -> base . base . resv , NULL );
379+ ivpu_bo_lock (bo );
374380 ret = drm_gem_shmem_vmap (& bo -> base , & map );
375- dma_resv_unlock (bo -> base . base . resv );
381+ ivpu_bo_unlock (bo );
376382
377383 if (ret )
378384 goto err_put ;
@@ -395,9 +401,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
395401 struct iosys_map map = IOSYS_MAP_INIT_VADDR (bo -> base .vaddr );
396402
397403 if (bo -> flags & DRM_IVPU_BO_MAPPABLE ) {
398- dma_resv_lock (bo -> base . base . resv , NULL );
404+ ivpu_bo_lock (bo );
399405 drm_gem_shmem_vunmap (& bo -> base , & map );
400- dma_resv_unlock (bo -> base . base . resv );
406+ ivpu_bo_unlock (bo );
401407 }
402408
403409 drm_gem_object_put (& bo -> base .base );
@@ -416,12 +422,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
416422
417423 bo = to_ivpu_bo (obj );
418424
419- mutex_lock ( & bo -> lock );
425+ ivpu_bo_lock ( bo );
420426 args -> flags = bo -> flags ;
421427 args -> mmap_offset = drm_vma_node_offset_addr (& obj -> vma_node );
422428 args -> vpu_addr = bo -> vpu_addr ;
423429 args -> size = obj -> size ;
424- mutex_unlock ( & bo -> lock );
430+ ivpu_bo_unlock ( bo );
425431
426432 drm_gem_object_put (obj );
427433 return ret ;
@@ -458,7 +464,7 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
458464
459465static void ivpu_bo_print_info (struct ivpu_bo * bo , struct drm_printer * p )
460466{
461- mutex_lock ( & bo -> lock );
467+ ivpu_bo_lock ( bo );
462468
463469 drm_printf (p , "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u" ,
464470 bo , bo -> ctx_id , bo -> vpu_addr , bo -> base .base .size ,
@@ -475,7 +481,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
475481
476482 drm_printf (p , "\n" );
477483
478- mutex_unlock ( & bo -> lock );
484+ ivpu_bo_unlock ( bo );
479485}
480486
481487void ivpu_bo_list (struct drm_device * dev , struct drm_printer * p )
0 commit comments