@@ -386,6 +386,69 @@ static int acp_memory_init(struct snd_sof_dev *sdev)
386
386
return 0 ;
387
387
}
388
388
389
+ static void amd_sof_handle_acp70_sdw_wake_event (struct acp_dev_data * adata )
390
+ {
391
+ struct amd_sdw_manager * amd_manager ;
392
+
393
+ if (adata -> acp70_sdw0_wake_event ) {
394
+ amd_manager = dev_get_drvdata (& adata -> sdw -> pdev [0 ]-> dev );
395
+ if (amd_manager )
396
+ pm_request_resume (amd_manager -> dev );
397
+ adata -> acp70_sdw0_wake_event = 0 ;
398
+ }
399
+
400
+ if (adata -> acp70_sdw1_wake_event ) {
401
+ amd_manager = dev_get_drvdata (& adata -> sdw -> pdev [1 ]-> dev );
402
+ if (amd_manager )
403
+ pm_request_resume (amd_manager -> dev );
404
+ adata -> acp70_sdw1_wake_event = 0 ;
405
+ }
406
+ }
407
+
408
+ static int amd_sof_check_and_handle_acp70_sdw_wake_irq (struct snd_sof_dev * sdev )
409
+ {
410
+ const struct sof_amd_acp_desc * desc = get_chip_info (sdev -> pdata );
411
+ struct acp_dev_data * adata = sdev -> pdata -> hw_pdata ;
412
+ u32 ext_intr_stat1 ;
413
+ int irq_flag = 0 ;
414
+ bool sdw_wake_irq = false;
415
+
416
+ ext_intr_stat1 = snd_sof_dsp_read (sdev , ACP_DSP_BAR , desc -> ext_intr_stat1 );
417
+ if (ext_intr_stat1 & ACP70_SDW0_HOST_WAKE_STAT ) {
418
+ snd_sof_dsp_write (sdev , ACP_DSP_BAR , desc -> ext_intr_stat1 ,
419
+ ACP70_SDW0_HOST_WAKE_STAT );
420
+ adata -> acp70_sdw0_wake_event = true;
421
+ sdw_wake_irq = true;
422
+ }
423
+
424
+ if (ext_intr_stat1 & ACP70_SDW1_HOST_WAKE_STAT ) {
425
+ snd_sof_dsp_write (sdev , ACP_DSP_BAR , desc -> ext_intr_stat1 ,
426
+ ACP70_SDW1_HOST_WAKE_STAT );
427
+ adata -> acp70_sdw1_wake_event = true;
428
+ sdw_wake_irq = true;
429
+ }
430
+
431
+ if (ext_intr_stat1 & ACP70_SDW0_PME_STAT ) {
432
+ snd_sof_dsp_write (sdev , ACP_DSP_BAR , ACP70_SW0_WAKE_EN , 0 );
433
+ snd_sof_dsp_write (sdev , ACP_DSP_BAR , desc -> ext_intr_stat1 , ACP70_SDW0_PME_STAT );
434
+ adata -> acp70_sdw0_wake_event = true;
435
+ sdw_wake_irq = true;
436
+ }
437
+
438
+ if (ext_intr_stat1 & ACP70_SDW1_PME_STAT ) {
439
+ snd_sof_dsp_write (sdev , ACP_DSP_BAR , ACP70_SW1_WAKE_EN , 0 );
440
+ snd_sof_dsp_write (sdev , ACP_DSP_BAR , desc -> ext_intr_stat1 , ACP70_SDW1_PME_STAT );
441
+ adata -> acp70_sdw1_wake_event = true;
442
+ sdw_wake_irq = true;
443
+ }
444
+
445
+ if (sdw_wake_irq ) {
446
+ amd_sof_handle_acp70_sdw_wake_event (adata );
447
+ irq_flag = 1 ;
448
+ }
449
+ return irq_flag ;
450
+ }
451
+
389
452
static irqreturn_t acp_irq_thread (int irq , void * context )
390
453
{
391
454
struct snd_sof_dev * sdev = context ;
@@ -418,7 +481,7 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
418
481
struct acp_dev_data * adata = sdev -> pdata -> hw_pdata ;
419
482
unsigned int base = desc -> dsp_intr_base ;
420
483
unsigned int val ;
421
- int irq_flag = 0 ;
484
+ int irq_flag = 0 , wake_irq_flag = 0 ;
422
485
423
486
val = snd_sof_dsp_read (sdev , ACP_DSP_BAR , base + DSP_SW_INTR_STAT_OFFSET );
424
487
if (val & ACP_DSP_TO_HOST_IRQ ) {
@@ -456,8 +519,14 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
456
519
schedule_work (& amd_manager -> amd_sdw_irq_thread );
457
520
irq_flag = 1 ;
458
521
}
522
+ switch (adata -> pci_rev ) {
523
+ case ACP70_PCI_ID :
524
+ case ACP71_PCI_ID :
525
+ wake_irq_flag = amd_sof_check_and_handle_acp70_sdw_wake_irq (sdev );
526
+ break ;
527
+ }
459
528
}
460
- if (irq_flag )
529
+ if (irq_flag || wake_irq_flag )
461
530
return IRQ_HANDLED ;
462
531
else
463
532
return IRQ_NONE ;
0 commit comments