@@ -33,6 +33,16 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
3333 (bool )bo -> base .base .import_attach );
3434}
3535
36+ static inline int ivpu_bo_lock (struct ivpu_bo * bo )
37+ {
38+ return dma_resv_lock (bo -> base .base .resv , NULL );
39+ }
40+
41+ static inline void ivpu_bo_unlock (struct ivpu_bo * bo )
42+ {
43+ dma_resv_unlock (bo -> base .base .resv );
44+ }
45+
3646/*
3747 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
3848 *
@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
4353int __must_check ivpu_bo_pin (struct ivpu_bo * bo )
4454{
4555 struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
56+ struct sg_table * sgt ;
4657 int ret = 0 ;
4758
48- mutex_lock (& bo -> lock );
49-
5059 ivpu_dbg_bo (vdev , bo , "pin" );
51- drm_WARN_ON (& vdev -> drm , !bo -> ctx );
5260
53- if (!bo -> mmu_mapped ) {
54- struct sg_table * sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
61+ sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
62+ if (IS_ERR (sgt )) {
63+ ret = PTR_ERR (sgt );
64+ ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
65+ return ret ;
66+ }
5567
56- if (IS_ERR (sgt )) {
57- ret = PTR_ERR (sgt );
58- ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
59- goto unlock ;
60- }
68+ ivpu_bo_lock (bo );
6169
70+ if (!bo -> mmu_mapped ) {
71+ drm_WARN_ON (& vdev -> drm , !bo -> ctx );
6272 ret = ivpu_mmu_context_map_sgt (vdev , bo -> ctx , bo -> vpu_addr , sgt ,
6373 ivpu_bo_is_snooped (bo ));
6474 if (ret ) {
@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
6979 }
7080
7181unlock :
72- mutex_unlock ( & bo -> lock );
82+ ivpu_bo_unlock ( bo );
7383
7484 return ret ;
7585}
@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
8494 if (!drm_dev_enter (& vdev -> drm , & idx ))
8595 return - ENODEV ;
8696
87- mutex_lock ( & bo -> lock );
97+ ivpu_bo_lock ( bo );
8898
8999 ret = ivpu_mmu_context_insert_node (ctx , range , ivpu_bo_size (bo ), & bo -> mm_node );
90100 if (!ret ) {
@@ -94,7 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
94104 ivpu_err (vdev , "Failed to add BO to context %u: %d\n" , ctx -> id , ret );
95105 }
96106
97- mutex_unlock ( & bo -> lock );
107+ ivpu_bo_unlock ( bo );
98108
99109 drm_dev_exit (idx );
100110
@@ -105,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
105115{
106116 struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
107117
108- lockdep_assert (lockdep_is_held ( & bo -> lock ) || !kref_read (& bo -> base .base .refcount ));
118+ lockdep_assert (dma_resv_held ( bo -> base . base . resv ) || !kref_read (& bo -> base .base .refcount ));
109119
110120 if (bo -> mmu_mapped ) {
111121 drm_WARN_ON (& vdev -> drm , !bo -> ctx );
@@ -123,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
123133 if (bo -> base .base .import_attach )
124134 return ;
125135
126- dma_resv_lock (bo -> base .base .resv , NULL );
127136 if (bo -> base .sgt ) {
128137 dma_unmap_sgtable (vdev -> drm .dev , bo -> base .sgt , DMA_BIDIRECTIONAL , 0 );
129138 sg_free_table (bo -> base .sgt );
130139 kfree (bo -> base .sgt );
131140 bo -> base .sgt = NULL ;
132141 }
133- dma_resv_unlock (bo -> base .base .resv );
134142}
135143
136144void ivpu_bo_unbind_all_bos_from_context (struct ivpu_device * vdev , struct ivpu_mmu_context * ctx )
@@ -142,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
142150
143151 mutex_lock (& vdev -> bo_list_lock );
144152 list_for_each_entry (bo , & vdev -> bo_list , bo_list_node ) {
145- mutex_lock ( & bo -> lock );
153+ ivpu_bo_lock ( bo );
146154 if (bo -> ctx == ctx ) {
147155 ivpu_dbg_bo (vdev , bo , "unbind" );
148156 ivpu_bo_unbind_locked (bo );
149157 }
150- mutex_unlock ( & bo -> lock );
158+ ivpu_bo_unlock ( bo );
151159 }
152160 mutex_unlock (& vdev -> bo_list_lock );
153161}
@@ -167,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
167175 bo -> base .pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
168176
169177 INIT_LIST_HEAD (& bo -> bo_list_node );
170- mutex_init (& bo -> lock );
171178
172179 return & bo -> base .base ;
173180}
@@ -286,8 +293,6 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
286293 drm_WARN_ON (& vdev -> drm , bo -> mmu_mapped );
287294 drm_WARN_ON (& vdev -> drm , bo -> ctx );
288295
289- mutex_destroy (& bo -> lock );
290-
291296 drm_WARN_ON (obj -> dev , bo -> base .pages_use_count > 1 );
292297 drm_gem_shmem_free (& bo -> base );
293298}
@@ -370,9 +375,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
370375 goto err_put ;
371376
372377 if (flags & DRM_IVPU_BO_MAPPABLE ) {
373- dma_resv_lock (bo -> base . base . resv , NULL );
378+ ivpu_bo_lock (bo );
374379 ret = drm_gem_shmem_vmap (& bo -> base , & map );
375- dma_resv_unlock (bo -> base . base . resv );
380+ ivpu_bo_unlock (bo );
376381
377382 if (ret )
378383 goto err_put ;
@@ -395,9 +400,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
395400 struct iosys_map map = IOSYS_MAP_INIT_VADDR (bo -> base .vaddr );
396401
397402 if (bo -> flags & DRM_IVPU_BO_MAPPABLE ) {
398- dma_resv_lock (bo -> base . base . resv , NULL );
403+ ivpu_bo_lock (bo );
399404 drm_gem_shmem_vunmap (& bo -> base , & map );
400- dma_resv_unlock (bo -> base . base . resv );
405+ ivpu_bo_unlock (bo );
401406 }
402407
403408 drm_gem_object_put (& bo -> base .base );
@@ -416,12 +421,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
416421
417422 bo = to_ivpu_bo (obj );
418423
419- mutex_lock ( & bo -> lock );
424+ ivpu_bo_lock ( bo );
420425 args -> flags = bo -> flags ;
421426 args -> mmap_offset = drm_vma_node_offset_addr (& obj -> vma_node );
422427 args -> vpu_addr = bo -> vpu_addr ;
423428 args -> size = obj -> size ;
424- mutex_unlock ( & bo -> lock );
429+ ivpu_bo_unlock ( bo );
425430
426431 drm_gem_object_put (obj );
427432 return ret ;
@@ -458,7 +463,7 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
458463
459464static void ivpu_bo_print_info (struct ivpu_bo * bo , struct drm_printer * p )
460465{
461- mutex_lock ( & bo -> lock );
466+ ivpu_bo_lock ( bo );
462467
463468 drm_printf (p , "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u" ,
464469 bo , bo -> ctx_id , bo -> vpu_addr , bo -> base .base .size ,
@@ -475,7 +480,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
475480
476481 drm_printf (p , "\n" );
477482
478- mutex_unlock ( & bo -> lock );
483+ ivpu_bo_unlock ( bo );
479484}
480485
481486void ivpu_bo_list (struct drm_device * dev , struct drm_printer * p )
0 commit comments