@@ -72,8 +72,9 @@ struct ct_request {
72
72
u32 * response_buf ;
73
73
};
74
74
75
- struct ct_incoming_request {
75
+ struct ct_incoming_msg {
76
76
struct list_head link ;
77
+ u32 size ;
77
78
u32 msg [];
78
79
};
79
80
@@ -597,7 +598,26 @@ static inline bool ct_header_is_response(u32 header)
597
598
return !!(header & GUC_CT_MSG_IS_RESPONSE );
598
599
}
599
600
600
- static int ct_read (struct intel_guc_ct * ct , u32 * data )
601
+ static struct ct_incoming_msg * ct_alloc_msg (u32 num_dwords )
602
+ {
603
+ struct ct_incoming_msg * msg ;
604
+
605
+ msg = kmalloc (sizeof (* msg ) + sizeof (u32 ) * num_dwords , GFP_ATOMIC );
606
+ if (msg )
607
+ msg -> size = num_dwords ;
608
+ return msg ;
609
+ }
610
+
611
+ static void ct_free_msg (struct ct_incoming_msg * msg )
612
+ {
613
+ kfree (msg );
614
+ }
615
+
616
+ /*
617
+ * Return: number available remaining dwords to read (0 if empty)
618
+ * or a negative error code on failure
619
+ */
620
+ static int ct_read (struct intel_guc_ct * ct , struct ct_incoming_msg * * msg )
601
621
{
602
622
struct intel_guc_ct_buffer * ctb = & ct -> ctbs .recv ;
603
623
struct guc_ct_buffer_desc * desc = ctb -> desc ;
@@ -608,6 +628,7 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
608
628
s32 available ;
609
629
unsigned int len ;
610
630
unsigned int i ;
631
+ u32 header ;
611
632
612
633
if (unlikely (desc -> is_in_error ))
613
634
return - EPIPE ;
@@ -623,35 +644,50 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
623
644
624
645
/* tail == head condition indicates empty */
625
646
available = tail - head ;
626
- if (unlikely (available == 0 ))
627
- return - ENODATA ;
647
+ if (unlikely (available == 0 )) {
648
+ * msg = NULL ;
649
+ return 0 ;
650
+ }
628
651
629
652
/* beware of buffer wrap case */
630
653
if (unlikely (available < 0 ))
631
654
available += size ;
632
655
CT_DEBUG (ct , "available %d (%u:%u)\n" , available , head , tail );
633
656
GEM_BUG_ON (available < 0 );
634
657
635
- data [ 0 ] = cmds [head ];
658
+ header = cmds [head ];
636
659
head = (head + 1 ) % size ;
637
660
638
661
/* message len with header */
639
- len = ct_header_get_len (data [ 0 ] ) + 1 ;
662
+ len = ct_header_get_len (header ) + 1 ;
640
663
if (unlikely (len > (u32 )available )) {
641
664
CT_ERROR (ct , "Incomplete message %*ph %*ph %*ph\n" ,
642
- 4 , data ,
665
+ 4 , & header ,
643
666
4 * (head + available - 1 > size ?
644
667
size - head : available - 1 ), & cmds [head ],
645
668
4 * (head + available - 1 > size ?
646
669
available - 1 - size + head : 0 ), & cmds [0 ]);
647
670
goto corrupted ;
648
671
}
649
672
673
+ * msg = ct_alloc_msg (len );
674
+ if (!* msg ) {
675
+ CT_ERROR (ct , "No memory for message %*ph %*ph %*ph\n" ,
676
+ 4 , & header ,
677
+ 4 * (head + available - 1 > size ?
678
+ size - head : available - 1 ), & cmds [head ],
679
+ 4 * (head + available - 1 > size ?
680
+ available - 1 - size + head : 0 ), & cmds [0 ]);
681
+ return available ;
682
+ }
683
+
684
+ (* msg )-> msg [0 ] = header ;
685
+
650
686
for (i = 1 ; i < len ; i ++ ) {
651
- data [i ] = cmds [head ];
687
+ ( * msg ) -> msg [i ] = cmds [head ];
652
688
head = (head + 1 ) % size ;
653
689
}
654
- CT_DEBUG (ct , "received %*ph\n" , 4 * len , data );
690
+ CT_DEBUG (ct , "received %*ph\n" , 4 * len , ( * msg ) -> msg );
655
691
656
692
desc -> head = head * 4 ;
657
693
return available - len ;
@@ -681,33 +717,33 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
681
717
* ^-----------------------len-----------------------^
682
718
*/
683
719
684
- static int ct_handle_response (struct intel_guc_ct * ct , const u32 * msg )
720
+ static int ct_handle_response (struct intel_guc_ct * ct , struct ct_incoming_msg * response )
685
721
{
686
- u32 header = msg [0 ];
722
+ u32 header = response -> msg [0 ];
687
723
u32 len = ct_header_get_len (header );
688
- u32 msgsize = (len + 1 ) * sizeof (u32 ); /* msg size in bytes w/header */
689
724
u32 fence ;
690
725
u32 status ;
691
726
u32 datalen ;
692
727
struct ct_request * req ;
693
728
unsigned long flags ;
694
729
bool found = false;
730
+ int err = 0 ;
695
731
696
732
GEM_BUG_ON (!ct_header_is_response (header ));
697
733
698
734
/* Response payload shall at least include fence and status */
699
735
if (unlikely (len < 2 )) {
700
- CT_ERROR (ct , "Corrupted response %*ph \n" , msgsize , msg );
736
+ CT_ERROR (ct , "Corrupted response (len %u) \n" , len );
701
737
return - EPROTO ;
702
738
}
703
739
704
- fence = msg [1 ];
705
- status = msg [2 ];
740
+ fence = response -> msg [1 ];
741
+ status = response -> msg [2 ];
706
742
datalen = len - 2 ;
707
743
708
744
/* Format of the status follows RESPONSE message */
709
745
if (unlikely (!INTEL_GUC_MSG_IS_RESPONSE (status ))) {
710
- CT_ERROR (ct , "Corrupted response %*ph \n" , msgsize , msg );
746
+ CT_ERROR (ct , "Corrupted response (status %#x) \n" , status );
711
747
return - EPROTO ;
712
748
}
713
749
@@ -721,58 +757,75 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
721
757
continue ;
722
758
}
723
759
if (unlikely (datalen > req -> response_len )) {
724
- CT_ERROR (ct , "Response for %u is too long %*ph\n" ,
725
- req -> fence , msgsize , msg );
726
- datalen = 0 ;
760
+ CT_ERROR (ct , "Response %u too long (datalen %u > %u)\n" ,
761
+ req -> fence , datalen , req -> response_len );
762
+ datalen = min (datalen , req -> response_len );
763
+ err = - EMSGSIZE ;
727
764
}
728
765
if (datalen )
729
- memcpy (req -> response_buf , msg + 3 , 4 * datalen );
766
+ memcpy (req -> response_buf , response -> msg + 3 , 4 * datalen );
730
767
req -> response_len = datalen ;
731
768
WRITE_ONCE (req -> status , status );
732
769
found = true;
733
770
break ;
734
771
}
735
772
spin_unlock_irqrestore (& ct -> requests .lock , flags );
736
773
737
- if (!found )
738
- CT_ERROR (ct , "Unsolicited response %*ph\n" , msgsize , msg );
774
+ if (!found ) {
775
+ CT_ERROR (ct , "Unsolicited response (fence %u)\n" , fence );
776
+ return - ENOKEY ;
777
+ }
778
+
779
+ if (unlikely (err ))
780
+ return err ;
781
+
782
+ ct_free_msg (response );
739
783
return 0 ;
740
784
}
741
785
742
- static void ct_process_request (struct intel_guc_ct * ct ,
743
- u32 action , u32 len , const u32 * payload )
786
+ static int ct_process_request (struct intel_guc_ct * ct , struct ct_incoming_msg * request )
744
787
{
745
788
struct intel_guc * guc = ct_to_guc (ct );
789
+ u32 header , action , len ;
790
+ const u32 * payload ;
746
791
int ret ;
747
792
793
+ header = request -> msg [0 ];
794
+ payload = & request -> msg [1 ];
795
+ action = ct_header_get_action (header );
796
+ len = ct_header_get_len (header );
797
+
748
798
CT_DEBUG (ct , "request %x %*ph\n" , action , 4 * len , payload );
749
799
750
800
switch (action ) {
751
801
case INTEL_GUC_ACTION_DEFAULT :
752
802
ret = intel_guc_to_host_process_recv_msg (guc , payload , len );
753
- if (unlikely (ret ))
754
- goto fail_unexpected ;
755
803
break ;
756
-
757
804
default :
758
- fail_unexpected :
759
- CT_ERROR (ct , "Unexpected request %x %*ph\n" ,
760
- action , 4 * len , payload );
805
+ ret = - EOPNOTSUPP ;
761
806
break ;
762
807
}
808
+
809
+ if (unlikely (ret )) {
810
+ CT_ERROR (ct , "Failed to process request %04x (%pe)\n" ,
811
+ action , ERR_PTR (ret ));
812
+ return ret ;
813
+ }
814
+
815
+ ct_free_msg (request );
816
+ return 0 ;
763
817
}
764
818
765
819
static bool ct_process_incoming_requests (struct intel_guc_ct * ct )
766
820
{
767
821
unsigned long flags ;
768
- struct ct_incoming_request * request ;
769
- u32 header ;
770
- u32 * payload ;
822
+ struct ct_incoming_msg * request ;
771
823
bool done ;
824
+ int err ;
772
825
773
826
spin_lock_irqsave (& ct -> requests .lock , flags );
774
827
request = list_first_entry_or_null (& ct -> requests .incoming ,
775
- struct ct_incoming_request , link );
828
+ struct ct_incoming_msg , link );
776
829
if (request )
777
830
list_del (& request -> link );
778
831
done = !!list_empty (& ct -> requests .incoming );
@@ -781,14 +834,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
781
834
if (!request )
782
835
return true;
783
836
784
- header = request -> msg [ 0 ] ;
785
- payload = & request -> msg [ 1 ];
786
- ct_process_request (ct ,
787
- ct_header_get_action ( header ),
788
- ct_header_get_len ( header ),
789
- payload );
837
+ err = ct_process_request ( ct , request ) ;
838
+ if ( unlikely ( err )) {
839
+ CT_ERROR (ct , "Failed to process CT message (%pe) %*ph\n" ,
840
+ ERR_PTR ( err ), 4 * request -> size , request -> msg );
841
+ ct_free_msg ( request );
842
+ }
790
843
791
- kfree (request );
792
844
return done ;
793
845
}
794
846
@@ -821,22 +873,11 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
821
873
* ^-----------------------len-----------------------^
822
874
*/
823
875
824
- static int ct_handle_request (struct intel_guc_ct * ct , const u32 * msg )
876
+ static int ct_handle_request (struct intel_guc_ct * ct , struct ct_incoming_msg * request )
825
877
{
826
- u32 header = msg [0 ];
827
- u32 len = ct_header_get_len (header );
828
- u32 msgsize = (len + 1 ) * sizeof (u32 ); /* msg size in bytes w/header */
829
- struct ct_incoming_request * request ;
830
878
unsigned long flags ;
831
879
832
- GEM_BUG_ON (ct_header_is_response (header ));
833
-
834
- request = kmalloc (sizeof (* request ) + msgsize , GFP_ATOMIC );
835
- if (unlikely (!request )) {
836
- CT_ERROR (ct , "Dropping request %*ph\n" , msgsize , msg );
837
- return 0 ; /* XXX: -ENOMEM ? */
838
- }
839
- memcpy (request -> msg , msg , msgsize );
880
+ GEM_BUG_ON (ct_header_is_response (request -> msg [0 ]));
840
881
841
882
spin_lock_irqsave (& ct -> requests .lock , flags );
842
883
list_add_tail (& request -> link , & ct -> requests .incoming );
@@ -846,22 +887,41 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
846
887
return 0 ;
847
888
}
848
889
890
+ static void ct_handle_msg (struct intel_guc_ct * ct , struct ct_incoming_msg * msg )
891
+ {
892
+ u32 header = msg -> msg [0 ];
893
+ int err ;
894
+
895
+ if (ct_header_is_response (header ))
896
+ err = ct_handle_response (ct , msg );
897
+ else
898
+ err = ct_handle_request (ct , msg );
899
+
900
+ if (unlikely (err )) {
901
+ CT_ERROR (ct , "Failed to process CT message (%pe) %*ph\n" ,
902
+ ERR_PTR (err ), 4 * msg -> size , msg -> msg );
903
+ ct_free_msg (msg );
904
+ }
905
+ }
906
+
907
+ /*
908
+ * Return: number available remaining dwords to read (0 if empty)
909
+ * or a negative error code on failure
910
+ */
849
911
static int ct_receive (struct intel_guc_ct * ct )
850
912
{
851
- u32 msg [ GUC_CT_MSG_LEN_MASK + 1 ]; /* one extra dw for the header */
913
+ struct ct_incoming_msg * msg = NULL ;
852
914
unsigned long flags ;
853
915
int ret ;
854
916
855
917
spin_lock_irqsave (& ct -> ctbs .recv .lock , flags );
856
- ret = ct_read (ct , msg );
918
+ ret = ct_read (ct , & msg );
857
919
spin_unlock_irqrestore (& ct -> ctbs .recv .lock , flags );
858
920
if (ret < 0 )
859
921
return ret ;
860
922
861
- if (ct_header_is_response (msg [0 ]))
862
- ct_handle_response (ct , msg );
863
- else
864
- ct_handle_request (ct , msg );
923
+ if (msg )
924
+ ct_handle_msg (ct , msg );
865
925
866
926
return ret ;
867
927
}
0 commit comments