@@ -89,6 +89,8 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
89
89
*/
90
90
void intel_guc_ct_init_early (struct intel_guc_ct * ct )
91
91
{
92
+ spin_lock_init (& ct -> ctbs .send .lock );
93
+ spin_lock_init (& ct -> ctbs .recv .lock );
92
94
spin_lock_init (& ct -> requests .lock );
93
95
INIT_LIST_HEAD (& ct -> requests .pending );
94
96
INIT_LIST_HEAD (& ct -> requests .incoming );
@@ -473,17 +475,22 @@ static int ct_send(struct intel_guc_ct *ct,
473
475
GEM_BUG_ON (len & ~GUC_CT_MSG_LEN_MASK );
474
476
GEM_BUG_ON (!response_buf && response_buf_size );
475
477
478
+ spin_lock_irqsave (& ct -> ctbs .send .lock , flags );
479
+
476
480
fence = ct_get_next_fence (ct );
477
481
request .fence = fence ;
478
482
request .status = 0 ;
479
483
request .response_len = response_buf_size ;
480
484
request .response_buf = response_buf ;
481
485
482
- spin_lock_irqsave (& ct -> requests .lock , flags );
486
+ spin_lock (& ct -> requests .lock );
483
487
list_add_tail (& request .link , & ct -> requests .pending );
484
- spin_unlock_irqrestore (& ct -> requests .lock , flags );
488
+ spin_unlock (& ct -> requests .lock );
485
489
486
490
err = ct_write (ct , action , len , fence );
491
+
492
+ spin_unlock_irqrestore (& ct -> ctbs .send .lock , flags );
493
+
487
494
if (unlikely (err ))
488
495
goto unlink ;
489
496
@@ -819,6 +826,7 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
819
826
void intel_guc_ct_event_handler (struct intel_guc_ct * ct )
820
827
{
821
828
u32 msg [GUC_CT_MSG_LEN_MASK + 1 ]; /* one extra dw for the header */
829
+ unsigned long flags ;
822
830
int err = 0 ;
823
831
824
832
if (unlikely (!ct -> enabled )) {
@@ -827,7 +835,9 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
827
835
}
828
836
829
837
do {
838
+ spin_lock_irqsave (& ct -> ctbs .recv .lock , flags );
830
839
err = ct_read (ct , msg );
840
+ spin_unlock_irqrestore (& ct -> ctbs .recv .lock , flags );
831
841
if (err )
832
842
break ;
833
843
0 commit comments