@@ -28,11 +28,21 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
28
28
{
29
29
ivpu_dbg (vdev , BO ,
30
30
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n" ,
31
- action , bo , bo -> vpu_addr , ivpu_bo_size (bo ), bo -> ctx ? bo -> ctx -> id : 0 ,
31
+ action , bo , bo -> vpu_addr , ivpu_bo_size (bo ), bo -> ctx_id ,
32
32
(bool )bo -> base .pages , (bool )bo -> base .sgt , bo -> mmu_mapped , bo -> base .map_wc ,
33
33
(bool )drm_gem_is_imported (& bo -> base .base ));
34
34
}
35
35
36
+ static inline int ivpu_bo_lock (struct ivpu_bo * bo )
37
+ {
38
+ return dma_resv_lock (bo -> base .base .resv , NULL );
39
+ }
40
+
41
+ static inline void ivpu_bo_unlock (struct ivpu_bo * bo )
42
+ {
43
+ dma_resv_unlock (bo -> base .base .resv );
44
+ }
45
+
36
46
/*
37
47
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
38
48
*
@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
43
53
int __must_check ivpu_bo_pin (struct ivpu_bo * bo )
44
54
{
45
55
struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
56
+ struct sg_table * sgt ;
46
57
int ret = 0 ;
47
58
48
- mutex_lock (& bo -> lock );
49
-
50
59
ivpu_dbg_bo (vdev , bo , "pin" );
51
- drm_WARN_ON (& vdev -> drm , !bo -> ctx );
52
60
53
- if (!bo -> mmu_mapped ) {
54
- struct sg_table * sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
61
+ sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
62
+ if (IS_ERR (sgt )) {
63
+ ret = PTR_ERR (sgt );
64
+ ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
65
+ return ret ;
66
+ }
55
67
56
- if (IS_ERR (sgt )) {
57
- ret = PTR_ERR (sgt );
58
- ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
59
- goto unlock ;
60
- }
68
+ ivpu_bo_lock (bo );
61
69
70
+ if (!bo -> mmu_mapped ) {
71
+ drm_WARN_ON (& vdev -> drm , !bo -> ctx );
62
72
ret = ivpu_mmu_context_map_sgt (vdev , bo -> ctx , bo -> vpu_addr , sgt ,
63
73
ivpu_bo_is_snooped (bo ));
64
74
if (ret ) {
@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
69
79
}
70
80
71
81
unlock :
72
- mutex_unlock ( & bo -> lock );
82
+ ivpu_bo_unlock ( bo );
73
83
74
84
return ret ;
75
85
}
@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
84
94
if (!drm_dev_enter (& vdev -> drm , & idx ))
85
95
return - ENODEV ;
86
96
87
- mutex_lock ( & bo -> lock );
97
+ ivpu_bo_lock ( bo );
88
98
89
99
ret = ivpu_mmu_context_insert_node (ctx , range , ivpu_bo_size (bo ), & bo -> mm_node );
90
100
if (!ret ) {
@@ -94,9 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
94
104
ivpu_err (vdev , "Failed to add BO to context %u: %d\n" , ctx -> id , ret );
95
105
}
96
106
97
- ivpu_dbg_bo (vdev , bo , "alloc" );
98
-
99
- mutex_unlock (& bo -> lock );
107
+ ivpu_bo_unlock (bo );
100
108
101
109
drm_dev_exit (idx );
102
110
@@ -107,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
107
115
{
108
116
struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
109
117
110
- lockdep_assert (lockdep_is_held ( & bo -> lock ) || !kref_read (& bo -> base .base .refcount ));
118
+ lockdep_assert (dma_resv_held ( bo -> base . base . resv ) || !kref_read (& bo -> base .base .refcount ));
111
119
112
120
if (bo -> mmu_mapped ) {
113
121
drm_WARN_ON (& vdev -> drm , !bo -> ctx );
@@ -125,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
125
133
if (drm_gem_is_imported (& bo -> base .base ))
126
134
return ;
127
135
128
- dma_resv_lock (bo -> base .base .resv , NULL );
129
136
if (bo -> base .sgt ) {
130
137
dma_unmap_sgtable (vdev -> drm .dev , bo -> base .sgt , DMA_BIDIRECTIONAL , 0 );
131
138
sg_free_table (bo -> base .sgt );
132
139
kfree (bo -> base .sgt );
133
140
bo -> base .sgt = NULL ;
134
141
}
135
- dma_resv_unlock (bo -> base .base .resv );
136
142
}
137
143
138
144
void ivpu_bo_unbind_all_bos_from_context (struct ivpu_device * vdev , struct ivpu_mmu_context * ctx )
@@ -144,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
144
150
145
151
mutex_lock (& vdev -> bo_list_lock );
146
152
list_for_each_entry (bo , & vdev -> bo_list , bo_list_node ) {
147
- mutex_lock ( & bo -> lock );
153
+ ivpu_bo_lock ( bo );
148
154
if (bo -> ctx == ctx ) {
149
155
ivpu_dbg_bo (vdev , bo , "unbind" );
150
156
ivpu_bo_unbind_locked (bo );
151
157
}
152
- mutex_unlock ( & bo -> lock );
158
+ ivpu_bo_unlock ( bo );
153
159
}
154
160
mutex_unlock (& vdev -> bo_list_lock );
155
161
}
@@ -169,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
169
175
bo -> base .pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
170
176
171
177
INIT_LIST_HEAD (& bo -> bo_list_node );
172
- mutex_init (& bo -> lock );
173
178
174
179
return & bo -> base .base ;
175
180
}
@@ -215,7 +220,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
215
220
return ERR_PTR (ret );
216
221
}
217
222
218
- static struct ivpu_bo * ivpu_bo_alloc (struct ivpu_device * vdev , u64 size , u32 flags )
223
+ static struct ivpu_bo * ivpu_bo_alloc (struct ivpu_device * vdev , u64 size , u32 flags , u32 ctx_id )
219
224
{
220
225
struct drm_gem_shmem_object * shmem ;
221
226
struct ivpu_bo * bo ;
@@ -233,13 +238,16 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
233
238
return ERR_CAST (shmem );
234
239
235
240
bo = to_ivpu_bo (& shmem -> base );
241
+ bo -> ctx_id = ctx_id ;
236
242
bo -> base .map_wc = flags & DRM_IVPU_BO_WC ;
237
243
bo -> flags = flags ;
238
244
239
245
mutex_lock (& vdev -> bo_list_lock );
240
246
list_add_tail (& bo -> bo_list_node , & vdev -> bo_list );
241
247
mutex_unlock (& vdev -> bo_list_lock );
242
248
249
+ ivpu_dbg_bo (vdev , bo , "alloc" );
250
+
243
251
return bo ;
244
252
}
245
253
@@ -277,10 +285,14 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
277
285
list_del (& bo -> bo_list_node );
278
286
mutex_unlock (& vdev -> bo_list_lock );
279
287
280
- drm_WARN_ON (& vdev -> drm , !dma_resv_test_signaled (obj -> resv , DMA_RESV_USAGE_READ ));
288
+ drm_WARN_ON (& vdev -> drm , !drm_gem_is_imported (& bo -> base .base ) &&
289
+ !dma_resv_test_signaled (obj -> resv , DMA_RESV_USAGE_READ ));
290
+ drm_WARN_ON (& vdev -> drm , ivpu_bo_size (bo ) == 0 );
291
+ drm_WARN_ON (& vdev -> drm , bo -> base .vaddr );
281
292
282
293
ivpu_bo_unbind_locked (bo );
283
- mutex_destroy (& bo -> lock );
294
+ drm_WARN_ON (& vdev -> drm , bo -> mmu_mapped );
295
+ drm_WARN_ON (& vdev -> drm , bo -> ctx );
284
296
285
297
drm_WARN_ON (obj -> dev , refcount_read (& bo -> base .pages_use_count ) > 1 );
286
298
drm_gem_shmem_free (& bo -> base );
@@ -314,15 +326,18 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
314
326
if (size == 0 )
315
327
return - EINVAL ;
316
328
317
- bo = ivpu_bo_alloc (vdev , size , args -> flags );
329
+ bo = ivpu_bo_alloc (vdev , size , args -> flags , file_priv -> ctx . id );
318
330
if (IS_ERR (bo )) {
319
331
ivpu_err (vdev , "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)" ,
320
332
bo , file_priv -> ctx .id , args -> size , args -> flags );
321
333
return PTR_ERR (bo );
322
334
}
323
335
324
336
ret = drm_gem_handle_create (file , & bo -> base .base , & args -> handle );
325
- if (!ret )
337
+ if (ret )
338
+ ivpu_err (vdev , "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)" ,
339
+ bo , file_priv -> ctx .id , args -> size , args -> flags );
340
+ else
326
341
args -> vpu_addr = bo -> vpu_addr ;
327
342
328
343
drm_gem_object_put (& bo -> base .base );
@@ -345,7 +360,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
345
360
drm_WARN_ON (& vdev -> drm , !PAGE_ALIGNED (range -> end ));
346
361
drm_WARN_ON (& vdev -> drm , !PAGE_ALIGNED (size ));
347
362
348
- bo = ivpu_bo_alloc (vdev , size , flags );
363
+ bo = ivpu_bo_alloc (vdev , size , flags , IVPU_GLOBAL_CONTEXT_MMU_SSID );
349
364
if (IS_ERR (bo )) {
350
365
ivpu_err (vdev , "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)" ,
351
366
bo , range -> start , size , flags );
@@ -361,9 +376,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
361
376
goto err_put ;
362
377
363
378
if (flags & DRM_IVPU_BO_MAPPABLE ) {
364
- dma_resv_lock (bo -> base . base . resv , NULL );
379
+ ivpu_bo_lock (bo );
365
380
ret = drm_gem_shmem_vmap_locked (& bo -> base , & map );
366
- dma_resv_unlock (bo -> base . base . resv );
381
+ ivpu_bo_unlock (bo );
367
382
368
383
if (ret )
369
384
goto err_put ;
@@ -386,9 +401,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
386
401
struct iosys_map map = IOSYS_MAP_INIT_VADDR (bo -> base .vaddr );
387
402
388
403
if (bo -> flags & DRM_IVPU_BO_MAPPABLE ) {
389
- dma_resv_lock (bo -> base . base . resv , NULL );
404
+ ivpu_bo_lock (bo );
390
405
drm_gem_shmem_vunmap_locked (& bo -> base , & map );
391
- dma_resv_unlock (bo -> base . base . resv );
406
+ ivpu_bo_unlock (bo );
392
407
}
393
408
394
409
drm_gem_object_put (& bo -> base .base );
@@ -407,12 +422,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
407
422
408
423
bo = to_ivpu_bo (obj );
409
424
410
- mutex_lock ( & bo -> lock );
425
+ ivpu_bo_lock ( bo );
411
426
args -> flags = bo -> flags ;
412
427
args -> mmap_offset = drm_vma_node_offset_addr (& obj -> vma_node );
413
428
args -> vpu_addr = bo -> vpu_addr ;
414
429
args -> size = obj -> size ;
415
- mutex_unlock ( & bo -> lock );
430
+ ivpu_bo_unlock ( bo );
416
431
417
432
drm_gem_object_put (obj );
418
433
return ret ;
@@ -449,10 +464,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
449
464
450
465
static void ivpu_bo_print_info (struct ivpu_bo * bo , struct drm_printer * p )
451
466
{
452
- mutex_lock ( & bo -> lock );
467
+ ivpu_bo_lock ( bo );
453
468
454
469
drm_printf (p , "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u" ,
455
- bo , bo -> ctx ? bo -> ctx -> id : 0 , bo -> vpu_addr , bo -> base .base .size ,
470
+ bo , bo -> ctx_id , bo -> vpu_addr , bo -> base .base .size ,
456
471
bo -> flags , kref_read (& bo -> base .base .refcount ));
457
472
458
473
if (bo -> base .pages )
@@ -466,7 +481,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
466
481
467
482
drm_printf (p , "\n" );
468
483
469
- mutex_unlock ( & bo -> lock );
484
+ ivpu_bo_unlock ( bo );
470
485
}
471
486
472
487
void ivpu_bo_list (struct drm_device * dev , struct drm_printer * p )
0 commit comments