4
4
*/
5
5
6
6
#include <linux/clk.h>
7
+ #include <linux/dma-mapping.h>
8
+ #include <linux/mailbox_controller.h>
7
9
#include <linux/pm_runtime.h>
8
10
#include <linux/soc/mediatek/mtk-cmdq.h>
9
11
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -50,8 +52,10 @@ struct mtk_drm_crtc {
50
52
bool pending_async_planes ;
51
53
52
54
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
53
- struct cmdq_client * cmdq_client ;
55
+ struct cmdq_client cmdq_client ;
56
+ struct cmdq_pkt cmdq_handle ;
54
57
u32 cmdq_event ;
58
+ u32 cmdq_vblank_cnt ;
55
59
#endif
56
60
57
61
struct device * mmsys_dev ;
@@ -104,12 +108,60 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
104
108
}
105
109
}
106
110
111
+ #if IS_REACHABLE (CONFIG_MTK_CMDQ )
112
+ static int mtk_drm_cmdq_pkt_create (struct cmdq_client * client , struct cmdq_pkt * pkt ,
113
+ size_t size )
114
+ {
115
+ struct device * dev ;
116
+ dma_addr_t dma_addr ;
117
+
118
+ pkt -> va_base = kzalloc (size , GFP_KERNEL );
119
+ if (!pkt -> va_base ) {
120
+ kfree (pkt );
121
+ return - ENOMEM ;
122
+ }
123
+ pkt -> buf_size = size ;
124
+ pkt -> cl = (void * )client ;
125
+
126
+ dev = client -> chan -> mbox -> dev ;
127
+ dma_addr = dma_map_single (dev , pkt -> va_base , pkt -> buf_size ,
128
+ DMA_TO_DEVICE );
129
+ if (dma_mapping_error (dev , dma_addr )) {
130
+ dev_err (dev , "dma map failed, size=%u\n" , (u32 )(u64 )size );
131
+ kfree (pkt -> va_base );
132
+ kfree (pkt );
133
+ return - ENOMEM ;
134
+ }
135
+
136
+ pkt -> pa_base = dma_addr ;
137
+
138
+ return 0 ;
139
+ }
140
+
141
+ static void mtk_drm_cmdq_pkt_destroy (struct cmdq_pkt * pkt )
142
+ {
143
+ struct cmdq_client * client = (struct cmdq_client * )pkt -> cl ;
144
+
145
+ dma_unmap_single (client -> chan -> mbox -> dev , pkt -> pa_base , pkt -> buf_size ,
146
+ DMA_TO_DEVICE );
147
+ kfree (pkt -> va_base );
148
+ kfree (pkt );
149
+ }
150
+ #endif
151
+
107
152
static void mtk_drm_crtc_destroy (struct drm_crtc * crtc )
108
153
{
109
154
struct mtk_drm_crtc * mtk_crtc = to_mtk_crtc (crtc );
110
155
111
156
mtk_mutex_put (mtk_crtc -> mutex );
157
+ #if IS_REACHABLE (CONFIG_MTK_CMDQ )
158
+ mtk_drm_cmdq_pkt_destroy (& mtk_crtc -> cmdq_handle );
112
159
160
+ if (mtk_crtc -> cmdq_client .chan ) {
161
+ mbox_free_channel (mtk_crtc -> cmdq_client .chan );
162
+ mtk_crtc -> cmdq_client .chan = NULL ;
163
+ }
164
+ #endif
113
165
drm_crtc_cleanup (crtc );
114
166
}
115
167
@@ -222,9 +274,46 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
222
274
}
223
275
224
276
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
225
- static void ddp_cmdq_cb (struct cmdq_cb_data data )
277
+ static void ddp_cmdq_cb (struct mbox_client * cl , void * mssg )
226
278
{
227
- cmdq_pkt_destroy (data .data );
279
+ struct cmdq_cb_data * data = mssg ;
280
+ struct cmdq_client * cmdq_cl = container_of (cl , struct cmdq_client , client );
281
+ struct mtk_drm_crtc * mtk_crtc = container_of (cmdq_cl , struct mtk_drm_crtc , cmdq_client );
282
+ struct mtk_crtc_state * state ;
283
+ unsigned int i ;
284
+
285
+ if (data -> sta < 0 )
286
+ return ;
287
+
288
+ state = to_mtk_crtc_state (mtk_crtc -> base .state );
289
+
290
+ state -> pending_config = false;
291
+
292
+ if (mtk_crtc -> pending_planes ) {
293
+ for (i = 0 ; i < mtk_crtc -> layer_nr ; i ++ ) {
294
+ struct drm_plane * plane = & mtk_crtc -> planes [i ];
295
+ struct mtk_plane_state * plane_state ;
296
+
297
+ plane_state = to_mtk_plane_state (plane -> state );
298
+
299
+ plane_state -> pending .config = false;
300
+ }
301
+ mtk_crtc -> pending_planes = false;
302
+ }
303
+
304
+ if (mtk_crtc -> pending_async_planes ) {
305
+ for (i = 0 ; i < mtk_crtc -> layer_nr ; i ++ ) {
306
+ struct drm_plane * plane = & mtk_crtc -> planes [i ];
307
+ struct mtk_plane_state * plane_state ;
308
+
309
+ plane_state = to_mtk_plane_state (plane -> state );
310
+
311
+ plane_state -> pending .async_config = false;
312
+ }
313
+ mtk_crtc -> pending_async_planes = false;
314
+ }
315
+
316
+ mtk_crtc -> cmdq_vblank_cnt = 0 ;
228
317
}
229
318
#endif
230
319
@@ -378,7 +467,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
378
467
state -> pending_vrefresh , 0 ,
379
468
cmdq_handle );
380
469
381
- state -> pending_config = false;
470
+ if (!cmdq_handle )
471
+ state -> pending_config = false;
382
472
}
383
473
384
474
if (mtk_crtc -> pending_planes ) {
@@ -398,9 +488,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
398
488
mtk_ddp_comp_layer_config (comp , local_layer ,
399
489
plane_state ,
400
490
cmdq_handle );
401
- plane_state -> pending .config = false;
491
+ if (!cmdq_handle )
492
+ plane_state -> pending .config = false;
402
493
}
403
- mtk_crtc -> pending_planes = false;
494
+
495
+ if (!cmdq_handle )
496
+ mtk_crtc -> pending_planes = false;
404
497
}
405
498
406
499
if (mtk_crtc -> pending_async_planes ) {
@@ -420,17 +513,20 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
420
513
mtk_ddp_comp_layer_config (comp , local_layer ,
421
514
plane_state ,
422
515
cmdq_handle );
423
- plane_state -> pending .async_config = false;
516
+ if (!cmdq_handle )
517
+ plane_state -> pending .async_config = false;
424
518
}
425
- mtk_crtc -> pending_async_planes = false;
519
+
520
+ if (!cmdq_handle )
521
+ mtk_crtc -> pending_async_planes = false;
426
522
}
427
523
}
428
524
429
525
static void mtk_drm_crtc_update_config (struct mtk_drm_crtc * mtk_crtc ,
430
526
bool needs_vblank )
431
527
{
432
528
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
433
- struct cmdq_pkt * cmdq_handle ;
529
+ struct cmdq_pkt * cmdq_handle = & mtk_crtc -> cmdq_handle ;
434
530
#endif
435
531
struct drm_crtc * crtc = & mtk_crtc -> base ;
436
532
struct mtk_drm_private * priv = crtc -> dev -> dev_private ;
@@ -468,14 +564,28 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
468
564
mtk_mutex_release (mtk_crtc -> mutex );
469
565
}
470
566
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
471
- if (mtk_crtc -> cmdq_client ) {
472
- mbox_flush (mtk_crtc -> cmdq_client -> chan , 2000 );
473
- cmdq_handle = cmdq_pkt_create ( mtk_crtc -> cmdq_client , PAGE_SIZE ) ;
567
+ if (mtk_crtc -> cmdq_client . chan ) {
568
+ mbox_flush (mtk_crtc -> cmdq_client . chan , 2000 );
569
+ cmdq_handle -> cmd_buf_size = 0 ;
474
570
cmdq_pkt_clear_event (cmdq_handle , mtk_crtc -> cmdq_event );
475
571
cmdq_pkt_wfe (cmdq_handle , mtk_crtc -> cmdq_event , false);
476
572
mtk_crtc_ddp_config (crtc , cmdq_handle );
477
573
cmdq_pkt_finalize (cmdq_handle );
478
- cmdq_pkt_flush_async (cmdq_handle , ddp_cmdq_cb , cmdq_handle );
574
+ dma_sync_single_for_device (mtk_crtc -> cmdq_client .chan -> mbox -> dev ,
575
+ cmdq_handle -> pa_base ,
576
+ cmdq_handle -> cmd_buf_size ,
577
+ DMA_TO_DEVICE );
578
+ /*
579
+ * CMDQ command should execute in next 3 vblank.
580
+ * One vblank interrupt before send message (occasionally)
581
+ * and one vblank interrupt after cmdq done,
582
+ * so it's timeout after 3 vblank interrupt.
583
+ * If it fail to execute in next 3 vblank, timeout happen.
584
+ */
585
+ mtk_crtc -> cmdq_vblank_cnt = 3 ;
586
+
587
+ mbox_send_message (mtk_crtc -> cmdq_client .chan , cmdq_handle );
588
+ mbox_client_txdone (mtk_crtc -> cmdq_client .chan , 0 );
479
589
}
480
590
#endif
481
591
mtk_crtc -> config_updating = false;
@@ -489,12 +599,15 @@ static void mtk_crtc_ddp_irq(void *data)
489
599
struct mtk_drm_private * priv = crtc -> dev -> dev_private ;
490
600
491
601
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
492
- if (!priv -> data -> shadow_register && !mtk_crtc -> cmdq_client )
602
+ if (!priv -> data -> shadow_register && !mtk_crtc -> cmdq_client .chan )
603
+ mtk_crtc_ddp_config (crtc , NULL );
604
+ else if (mtk_crtc -> cmdq_vblank_cnt > 0 && -- mtk_crtc -> cmdq_vblank_cnt == 0 )
605
+ DRM_ERROR ("mtk_crtc %d CMDQ execute command timeout!\n" ,
606
+ drm_crtc_index (& mtk_crtc -> base ));
493
607
#else
494
608
if (!priv -> data -> shadow_register )
495
- #endif
496
609
mtk_crtc_ddp_config (crtc , NULL );
497
-
610
+ #endif
498
611
mtk_drm_finish_page_flip (mtk_crtc );
499
612
}
500
613
@@ -829,25 +942,39 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
829
942
mutex_init (& mtk_crtc -> hw_lock );
830
943
831
944
#if IS_REACHABLE (CONFIG_MTK_CMDQ )
832
- mtk_crtc -> cmdq_client =
833
- cmdq_mbox_create (mtk_crtc -> mmsys_dev ,
834
- drm_crtc_index (& mtk_crtc -> base ));
835
- if (IS_ERR (mtk_crtc -> cmdq_client )) {
945
+ mtk_crtc -> cmdq_client .client .dev = mtk_crtc -> mmsys_dev ;
946
+ mtk_crtc -> cmdq_client .client .tx_block = false;
947
+ mtk_crtc -> cmdq_client .client .knows_txdone = true;
948
+ mtk_crtc -> cmdq_client .client .rx_callback = ddp_cmdq_cb ;
949
+ mtk_crtc -> cmdq_client .chan =
950
+ mbox_request_channel (& mtk_crtc -> cmdq_client .client ,
951
+ drm_crtc_index (& mtk_crtc -> base ));
952
+ if (IS_ERR (mtk_crtc -> cmdq_client .chan )) {
836
953
dev_dbg (dev , "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n" ,
837
954
drm_crtc_index (& mtk_crtc -> base ));
838
- mtk_crtc -> cmdq_client = NULL ;
955
+ mtk_crtc -> cmdq_client . chan = NULL ;
839
956
}
840
957
841
- if (mtk_crtc -> cmdq_client ) {
958
+ if (mtk_crtc -> cmdq_client . chan ) {
842
959
ret = of_property_read_u32_index (priv -> mutex_node ,
843
960
"mediatek,gce-events" ,
844
961
drm_crtc_index (& mtk_crtc -> base ),
845
962
& mtk_crtc -> cmdq_event );
846
963
if (ret ) {
847
964
dev_dbg (dev , "mtk_crtc %d failed to get mediatek,gce-events property\n" ,
848
965
drm_crtc_index (& mtk_crtc -> base ));
849
- cmdq_mbox_destroy (mtk_crtc -> cmdq_client );
850
- mtk_crtc -> cmdq_client = NULL ;
966
+ mbox_free_channel (mtk_crtc -> cmdq_client .chan );
967
+ mtk_crtc -> cmdq_client .chan = NULL ;
968
+ } else {
969
+ ret = mtk_drm_cmdq_pkt_create (& mtk_crtc -> cmdq_client ,
970
+ & mtk_crtc -> cmdq_handle ,
971
+ PAGE_SIZE );
972
+ if (ret ) {
973
+ dev_dbg (dev , "mtk_crtc %d failed to create cmdq packet\n" ,
974
+ drm_crtc_index (& mtk_crtc -> base ));
975
+ mbox_free_channel (mtk_crtc -> cmdq_client .chan );
976
+ mtk_crtc -> cmdq_client .chan = NULL ;
977
+ }
851
978
}
852
979
}
853
980
#endif
0 commit comments