@@ -75,8 +75,22 @@ struct cmdq {
75
75
struct cmdq_thread * thread ;
76
76
struct clk * clock ;
77
77
bool suspended ;
78
+ u8 shift_pa ;
78
79
};
79
80
81
+ struct gce_plat {
82
+ u32 thread_nr ;
83
+ u8 shift ;
84
+ };
85
+
86
+ u8 cmdq_get_shift_pa (struct mbox_chan * chan )
87
+ {
88
+ struct cmdq * cmdq = container_of (chan -> mbox , struct cmdq , mbox );
89
+
90
+ return cmdq -> shift_pa ;
91
+ }
92
+ EXPORT_SYMBOL (cmdq_get_shift_pa );
93
+
80
94
static int cmdq_thread_suspend (struct cmdq * cmdq , struct cmdq_thread * thread )
81
95
{
82
96
u32 status ;
@@ -183,13 +197,15 @@ static void cmdq_task_handle_error(struct cmdq_task *task)
183
197
{
184
198
struct cmdq_thread * thread = task -> thread ;
185
199
struct cmdq_task * next_task ;
200
+ struct cmdq * cmdq = task -> cmdq ;
186
201
187
- dev_err (task -> cmdq -> mbox .dev , "task 0x%p error\n" , task );
188
- WARN_ON (cmdq_thread_suspend (task -> cmdq , thread ) < 0 );
202
+ dev_err (cmdq -> mbox .dev , "task 0x%p error\n" , task );
203
+ WARN_ON (cmdq_thread_suspend (cmdq , thread ) < 0 );
189
204
next_task = list_first_entry_or_null (& thread -> task_busy_list ,
190
205
struct cmdq_task , list_entry );
191
206
if (next_task )
192
- writel (next_task -> pa_base , thread -> base + CMDQ_THR_CURR_ADDR );
207
+ writel (next_task -> pa_base >> cmdq -> shift_pa ,
208
+ thread -> base + CMDQ_THR_CURR_ADDR );
193
209
cmdq_thread_resume (thread );
194
210
}
195
211
@@ -219,7 +235,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
219
235
else
220
236
return ;
221
237
222
- curr_pa = readl (thread -> base + CMDQ_THR_CURR_ADDR );
238
+ curr_pa = readl (thread -> base + CMDQ_THR_CURR_ADDR ) << cmdq -> shift_pa ;
223
239
224
240
list_for_each_entry_safe (task , tmp , & thread -> task_busy_list ,
225
241
list_entry ) {
@@ -333,29 +349,39 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
333
349
334
350
if (list_empty (& thread -> task_busy_list )) {
335
351
WARN_ON (clk_enable (cmdq -> clock ) < 0 );
352
+ /*
353
+ * The thread reset will clear thread related register to 0,
354
+ * including pc, end, priority, irq, suspend and enable. Thus
355
+ * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
356
+ * thread and make it running.
357
+ */
336
358
WARN_ON (cmdq_thread_reset (cmdq , thread ) < 0 );
337
359
338
- writel (task -> pa_base , thread -> base + CMDQ_THR_CURR_ADDR );
339
- writel (task -> pa_base + pkt -> cmd_buf_size ,
360
+ writel (task -> pa_base >> cmdq -> shift_pa ,
361
+ thread -> base + CMDQ_THR_CURR_ADDR );
362
+ writel ((task -> pa_base + pkt -> cmd_buf_size ) >> cmdq -> shift_pa ,
340
363
thread -> base + CMDQ_THR_END_ADDR );
364
+
341
365
writel (thread -> priority , thread -> base + CMDQ_THR_PRIORITY );
342
366
writel (CMDQ_THR_IRQ_EN , thread -> base + CMDQ_THR_IRQ_ENABLE );
343
367
writel (CMDQ_THR_ENABLED , thread -> base + CMDQ_THR_ENABLE_TASK );
344
368
} else {
345
369
WARN_ON (cmdq_thread_suspend (cmdq , thread ) < 0 );
346
- curr_pa = readl (thread -> base + CMDQ_THR_CURR_ADDR );
347
- end_pa = readl (thread -> base + CMDQ_THR_END_ADDR );
370
+ curr_pa = readl (thread -> base + CMDQ_THR_CURR_ADDR ) <<
371
+ cmdq -> shift_pa ;
372
+ end_pa = readl (thread -> base + CMDQ_THR_END_ADDR ) <<
373
+ cmdq -> shift_pa ;
348
374
/* check boundary */
349
375
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
350
376
curr_pa == end_pa ) {
351
377
/* set to this task directly */
352
- writel (task -> pa_base ,
378
+ writel (task -> pa_base >> cmdq -> shift_pa ,
353
379
thread -> base + CMDQ_THR_CURR_ADDR );
354
380
} else {
355
381
cmdq_task_insert_into_thread (task );
356
382
smp_mb (); /* modify jump before enable thread */
357
383
}
358
- writel (task -> pa_base + pkt -> cmd_buf_size ,
384
+ writel (( task -> pa_base + pkt -> cmd_buf_size ) >> cmdq -> shift_pa ,
359
385
thread -> base + CMDQ_THR_END_ADDR );
360
386
cmdq_thread_resume (thread );
361
387
}
@@ -371,6 +397,38 @@ static int cmdq_mbox_startup(struct mbox_chan *chan)
371
397
372
398
static void cmdq_mbox_shutdown (struct mbox_chan * chan )
373
399
{
400
+ struct cmdq_thread * thread = (struct cmdq_thread * )chan -> con_priv ;
401
+ struct cmdq * cmdq = dev_get_drvdata (chan -> mbox -> dev );
402
+ struct cmdq_task * task , * tmp ;
403
+ unsigned long flags ;
404
+
405
+ spin_lock_irqsave (& thread -> chan -> lock , flags );
406
+ if (list_empty (& thread -> task_busy_list ))
407
+ goto done ;
408
+
409
+ WARN_ON (cmdq_thread_suspend (cmdq , thread ) < 0 );
410
+
411
+ /* make sure executed tasks have success callback */
412
+ cmdq_thread_irq_handler (cmdq , thread );
413
+ if (list_empty (& thread -> task_busy_list ))
414
+ goto done ;
415
+
416
+ list_for_each_entry_safe (task , tmp , & thread -> task_busy_list ,
417
+ list_entry ) {
418
+ cmdq_task_exec_done (task , CMDQ_CB_ERROR );
419
+ kfree (task );
420
+ }
421
+
422
+ cmdq_thread_disable (cmdq , thread );
423
+ clk_disable (cmdq -> clock );
424
+ done :
425
+ /*
426
+ * The thread->task_busy_list empty means thread already disable. The
427
+ * cmdq_mbox_send_data() always reset thread which clear disable and
428
+ * suspend statue when first pkt send to channel, so there is no need
429
+ * to do any operation here, only unlock and leave.
430
+ */
431
+ spin_unlock_irqrestore (& thread -> chan -> lock , flags );
374
432
}
375
433
376
434
static int cmdq_mbox_flush (struct mbox_chan * chan , unsigned long timeout )
@@ -453,6 +511,7 @@ static int cmdq_probe(struct platform_device *pdev)
453
511
struct resource * res ;
454
512
struct cmdq * cmdq ;
455
513
int err , i ;
514
+ struct gce_plat * plat_data ;
456
515
457
516
cmdq = devm_kzalloc (dev , sizeof (* cmdq ), GFP_KERNEL );
458
517
if (!cmdq )
@@ -471,7 +530,14 @@ static int cmdq_probe(struct platform_device *pdev)
471
530
return - EINVAL ;
472
531
}
473
532
474
- cmdq -> thread_nr = (u32 )(unsigned long )of_device_get_match_data (dev );
533
+ plat_data = (struct gce_plat * )of_device_get_match_data (dev );
534
+ if (!plat_data ) {
535
+ dev_err (dev , "failed to get match data\n" );
536
+ return - EINVAL ;
537
+ }
538
+
539
+ cmdq -> thread_nr = plat_data -> thread_nr ;
540
+ cmdq -> shift_pa = plat_data -> shift ;
475
541
cmdq -> irq_mask = GENMASK (cmdq -> thread_nr - 1 , 0 );
476
542
err = devm_request_irq (dev , cmdq -> irq , cmdq_irq_handler , IRQF_SHARED ,
477
543
"mtk_cmdq" , cmdq );
@@ -534,9 +600,14 @@ static const struct dev_pm_ops cmdq_pm_ops = {
534
600
.resume = cmdq_resume ,
535
601
};
536
602
603
+ static const struct gce_plat gce_plat_v2 = {.thread_nr = 16 };
604
+ static const struct gce_plat gce_plat_v3 = {.thread_nr = 24 };
605
+ static const struct gce_plat gce_plat_v4 = {.thread_nr = 24 , .shift = 3 };
606
+
537
607
static const struct of_device_id cmdq_of_ids [] = {
538
- {.compatible = "mediatek,mt8173-gce" , .data = (void * )16 },
539
- {.compatible = "mediatek,mt8183-gce" , .data = (void * )24 },
608
+ {.compatible = "mediatek,mt8173-gce" , .data = (void * )& gce_plat_v2 },
609
+ {.compatible = "mediatek,mt8183-gce" , .data = (void * )& gce_plat_v3 },
610
+ {.compatible = "mediatek,mt6779-gce" , .data = (void * )& gce_plat_v4 },
540
611
{}
541
612
};
542
613
0 commit comments