@@ -69,6 +69,8 @@ struct mtk_crtc {
69
69
/* lock for display hardware access */
70
70
struct mutex hw_lock ;
71
71
bool config_updating ;
72
+ /* lock for config_updating to cmd buffer */
73
+ spinlock_t config_lock ;
72
74
};
73
75
74
76
struct mtk_crtc_state {
@@ -106,59 +108,26 @@ static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
106
108
107
109
static void mtk_drm_finish_page_flip (struct mtk_crtc * mtk_crtc )
108
110
{
111
+ unsigned long flags ;
112
+
109
113
drm_crtc_handle_vblank (& mtk_crtc -> base );
114
+
115
+ spin_lock_irqsave (& mtk_crtc -> config_lock , flags );
110
116
if (!mtk_crtc -> config_updating && mtk_crtc -> pending_needs_vblank ) {
111
117
mtk_crtc_finish_page_flip (mtk_crtc );
112
118
mtk_crtc -> pending_needs_vblank = false;
113
119
}
120
+ spin_unlock_irqrestore (& mtk_crtc -> config_lock , flags );
114
121
}
115
122
116
- #if IS_REACHABLE (CONFIG_MTK_CMDQ )
117
- static int mtk_drm_cmdq_pkt_create (struct cmdq_client * client , struct cmdq_pkt * pkt ,
118
- size_t size )
119
- {
120
- struct device * dev ;
121
- dma_addr_t dma_addr ;
122
-
123
- pkt -> va_base = kzalloc (size , GFP_KERNEL );
124
- if (!pkt -> va_base )
125
- return - ENOMEM ;
126
-
127
- pkt -> buf_size = size ;
128
- pkt -> cl = (void * )client ;
129
-
130
- dev = client -> chan -> mbox -> dev ;
131
- dma_addr = dma_map_single (dev , pkt -> va_base , pkt -> buf_size ,
132
- DMA_TO_DEVICE );
133
- if (dma_mapping_error (dev , dma_addr )) {
134
- dev_err (dev , "dma map failed, size=%u\n" , (u32 )(u64 )size );
135
- kfree (pkt -> va_base );
136
- return - ENOMEM ;
137
- }
138
-
139
- pkt -> pa_base = dma_addr ;
140
-
141
- return 0 ;
142
- }
143
-
144
- static void mtk_drm_cmdq_pkt_destroy (struct cmdq_pkt * pkt )
145
- {
146
- struct cmdq_client * client = (struct cmdq_client * )pkt -> cl ;
147
-
148
- dma_unmap_single (client -> chan -> mbox -> dev , pkt -> pa_base , pkt -> buf_size ,
149
- DMA_TO_DEVICE );
150
- kfree (pkt -> va_base );
151
- }
152
- #endif
153
-
154
123
static void mtk_crtc_destroy (struct drm_crtc * crtc )
155
124
{
156
125
struct mtk_crtc * mtk_crtc = to_mtk_crtc (crtc );
157
126
int i ;
158
127
159
128
mtk_mutex_put (mtk_crtc -> mutex );
160
129
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
161
- mtk_drm_cmdq_pkt_destroy ( & mtk_crtc -> cmdq_handle );
130
+ cmdq_pkt_destroy ( & mtk_crtc -> cmdq_client , & mtk_crtc -> cmdq_handle );
162
131
163
132
if (mtk_crtc -> cmdq_client .chan ) {
164
133
mbox_free_channel (mtk_crtc -> cmdq_client .chan );
@@ -308,12 +277,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
308
277
struct mtk_crtc * mtk_crtc = container_of (cmdq_cl , struct mtk_crtc , cmdq_client );
309
278
struct mtk_crtc_state * state ;
310
279
unsigned int i ;
280
+ unsigned long flags ;
311
281
312
282
if (data -> sta < 0 )
313
283
return ;
314
284
315
285
state = to_mtk_crtc_state (mtk_crtc -> base .state );
316
286
287
+ spin_lock_irqsave (& mtk_crtc -> config_lock , flags );
288
+ if (mtk_crtc -> config_updating ) {
289
+ spin_unlock_irqrestore (& mtk_crtc -> config_lock , flags );
290
+ goto ddp_cmdq_cb_out ;
291
+ }
292
+
317
293
state -> pending_config = false;
318
294
319
295
if (mtk_crtc -> pending_planes ) {
@@ -340,6 +316,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
340
316
mtk_crtc -> pending_async_planes = false;
341
317
}
342
318
319
+ spin_unlock_irqrestore (& mtk_crtc -> config_lock , flags );
320
+
321
+ ddp_cmdq_cb_out :
322
+
343
323
mtk_crtc -> cmdq_vblank_cnt = 0 ;
344
324
wake_up (& mtk_crtc -> cb_blocking_queue );
345
325
}
@@ -449,6 +429,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
449
429
{
450
430
struct drm_device * drm = mtk_crtc -> base .dev ;
451
431
struct drm_crtc * crtc = & mtk_crtc -> base ;
432
+ unsigned long flags ;
452
433
int i ;
453
434
454
435
for (i = 0 ; i < mtk_crtc -> ddp_comp_nr ; i ++ ) {
@@ -480,10 +461,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
480
461
pm_runtime_put (drm -> dev );
481
462
482
463
if (crtc -> state -> event && !crtc -> state -> active ) {
483
- spin_lock_irq (& crtc -> dev -> event_lock );
464
+ spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
484
465
drm_crtc_send_vblank_event (crtc , crtc -> state -> event );
485
466
crtc -> state -> event = NULL ;
486
- spin_unlock_irq (& crtc -> dev -> event_lock );
467
+ spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
487
468
}
488
469
}
489
470
@@ -569,9 +550,14 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
569
550
struct mtk_drm_private * priv = crtc -> dev -> dev_private ;
570
551
unsigned int pending_planes = 0 , pending_async_planes = 0 ;
571
552
int i ;
553
+ unsigned long flags ;
572
554
573
555
mutex_lock (& mtk_crtc -> hw_lock );
556
+
557
+ spin_lock_irqsave (& mtk_crtc -> config_lock , flags );
574
558
mtk_crtc -> config_updating = true;
559
+ spin_unlock_irqrestore (& mtk_crtc -> config_lock , flags );
560
+
575
561
if (needs_vblank )
576
562
mtk_crtc -> pending_needs_vblank = true;
577
563
@@ -607,7 +593,7 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
607
593
cmdq_pkt_clear_event (cmdq_handle , mtk_crtc -> cmdq_event );
608
594
cmdq_pkt_wfe (cmdq_handle , mtk_crtc -> cmdq_event , false);
609
595
mtk_crtc_ddp_config (crtc , cmdq_handle );
610
- cmdq_pkt_finalize (cmdq_handle );
596
+ cmdq_pkt_eoc (cmdq_handle );
611
597
dma_sync_single_for_device (mtk_crtc -> cmdq_client .chan -> mbox -> dev ,
612
598
cmdq_handle -> pa_base ,
613
599
cmdq_handle -> cmd_buf_size ,
@@ -625,7 +611,10 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
625
611
mbox_client_txdone (mtk_crtc -> cmdq_client .chan , 0 );
626
612
}
627
613
#endif
614
+ spin_lock_irqsave (& mtk_crtc -> config_lock , flags );
628
615
mtk_crtc -> config_updating = false;
616
+ spin_unlock_irqrestore (& mtk_crtc -> config_lock , flags );
617
+
629
618
mutex_unlock (& mtk_crtc -> hw_lock );
630
619
}
631
620
@@ -925,7 +914,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
925
914
mtk_crtc_plane_type (mtk_crtc -> layer_nr , num_planes ),
926
915
mtk_ddp_comp_supported_rotations (comp ),
927
916
mtk_ddp_comp_get_formats (comp ),
928
- mtk_ddp_comp_get_num_formats (comp ));
917
+ mtk_ddp_comp_get_num_formats (comp ), i );
929
918
if (ret )
930
919
return ret ;
931
920
@@ -1068,6 +1057,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
1068
1057
drm_mode_crtc_set_gamma_size (& mtk_crtc -> base , gamma_lut_size );
1069
1058
drm_crtc_enable_color_mgmt (& mtk_crtc -> base , 0 , has_ctm , gamma_lut_size );
1070
1059
mutex_init (& mtk_crtc -> hw_lock );
1060
+ spin_lock_init (& mtk_crtc -> config_lock );
1071
1061
1072
1062
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
1073
1063
i = priv -> mbox_index ++ ;
@@ -1094,9 +1084,9 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
1094
1084
mbox_free_channel (mtk_crtc -> cmdq_client .chan );
1095
1085
mtk_crtc -> cmdq_client .chan = NULL ;
1096
1086
} else {
1097
- ret = mtk_drm_cmdq_pkt_create (& mtk_crtc -> cmdq_client ,
1098
- & mtk_crtc -> cmdq_handle ,
1099
- PAGE_SIZE );
1087
+ ret = cmdq_pkt_create (& mtk_crtc -> cmdq_client ,
1088
+ & mtk_crtc -> cmdq_handle ,
1089
+ PAGE_SIZE );
1100
1090
if (ret ) {
1101
1091
dev_dbg (dev , "mtk_crtc %d failed to create cmdq packet\n" ,
1102
1092
drm_crtc_index (& mtk_crtc -> base ));
0 commit comments