@@ -674,6 +674,105 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
674674 schedule_work (& entry -> work );
675675}
676676
677+ /* Room for 8 entries */
678+ #define CXL_CPER_PROT_ERR_FIFO_DEPTH 8
679+ static DEFINE_KFIFO (cxl_cper_prot_err_fifo , struct cxl_cper_prot_err_work_data ,
680+ CXL_CPER_PROT_ERR_FIFO_DEPTH ) ;
681+
682+ /* Synchronize schedule_work() with cxl_cper_prot_err_work changes */
683+ static DEFINE_SPINLOCK (cxl_cper_prot_err_work_lock );
684+ struct work_struct * cxl_cper_prot_err_work ;
685+
686+ static void cxl_cper_post_prot_err (struct cxl_cper_sec_prot_err * prot_err ,
687+ int severity )
688+ {
689+ #ifdef CONFIG_ACPI_APEI_PCIEAER
690+ struct cxl_cper_prot_err_work_data wd ;
691+ u8 * dvsec_start , * cap_start ;
692+
693+ if (!(prot_err -> valid_bits & PROT_ERR_VALID_AGENT_ADDRESS )) {
694+ pr_err_ratelimited ("CXL CPER invalid agent type\n" );
695+ return ;
696+ }
697+
698+ if (!(prot_err -> valid_bits & PROT_ERR_VALID_ERROR_LOG )) {
699+ pr_err_ratelimited ("CXL CPER invalid protocol error log\n" );
700+ return ;
701+ }
702+
703+ if (prot_err -> err_len != sizeof (struct cxl_ras_capability_regs )) {
704+ pr_err_ratelimited ("CXL CPER invalid RAS Cap size (%u)\n" ,
705+ prot_err -> err_len );
706+ return ;
707+ }
708+
709+ if (!(prot_err -> valid_bits & PROT_ERR_VALID_SERIAL_NUMBER ))
710+ pr_warn (FW_WARN "CXL CPER no device serial number\n" );
711+
712+ guard (spinlock_irqsave )(& cxl_cper_prot_err_work_lock );
713+
714+ if (!cxl_cper_prot_err_work )
715+ return ;
716+
717+ switch (prot_err -> agent_type ) {
718+ case RCD :
719+ case DEVICE :
720+ case LD :
721+ case FMLD :
722+ case RP :
723+ case DSP :
724+ case USP :
725+ memcpy (& wd .prot_err , prot_err , sizeof (wd .prot_err ));
726+
727+ dvsec_start = (u8 * )(prot_err + 1 );
728+ cap_start = dvsec_start + prot_err -> dvsec_len ;
729+
730+ memcpy (& wd .ras_cap , cap_start , sizeof (wd .ras_cap ));
731+ wd .severity = cper_severity_to_aer (severity );
732+ break ;
733+ default :
734+ pr_err_ratelimited ("CXL CPER invalid agent type: %d\n" ,
735+ prot_err -> agent_type );
736+ return ;
737+ }
738+
739+ if (!kfifo_put (& cxl_cper_prot_err_fifo , wd )) {
740+ pr_err_ratelimited ("CXL CPER kfifo overflow\n" );
741+ return ;
742+ }
743+
744+ schedule_work (cxl_cper_prot_err_work );
745+ #endif
746+ }
747+
748+ int cxl_cper_register_prot_err_work (struct work_struct * work )
749+ {
750+ if (cxl_cper_prot_err_work )
751+ return - EINVAL ;
752+
753+ guard (spinlock )(& cxl_cper_prot_err_work_lock );
754+ cxl_cper_prot_err_work = work ;
755+ return 0 ;
756+ }
757+ EXPORT_SYMBOL_NS_GPL (cxl_cper_register_prot_err_work , "CXL" );
758+
759+ int cxl_cper_unregister_prot_err_work (struct work_struct * work )
760+ {
761+ if (cxl_cper_prot_err_work != work )
762+ return - EINVAL ;
763+
764+ guard (spinlock )(& cxl_cper_prot_err_work_lock );
765+ cxl_cper_prot_err_work = NULL ;
766+ return 0 ;
767+ }
768+ EXPORT_SYMBOL_NS_GPL (cxl_cper_unregister_prot_err_work , "CXL" );
769+
770+ int cxl_cper_prot_err_kfifo_get (struct cxl_cper_prot_err_work_data * wd )
771+ {
772+ return kfifo_get (& cxl_cper_prot_err_fifo , wd );
773+ }
774+ EXPORT_SYMBOL_NS_GPL (cxl_cper_prot_err_kfifo_get , "CXL" );
775+
677776/* Room for 8 entries for each of the 4 event log queues */
678777#define CXL_CPER_FIFO_DEPTH 32
679778DEFINE_KFIFO (cxl_cper_fifo , struct cxl_cper_work_data , CXL_CPER_FIFO_DEPTH );
@@ -777,6 +876,10 @@ static bool ghes_do_proc(struct ghes *ghes,
777876 }
778877 else if (guid_equal (sec_type , & CPER_SEC_PROC_ARM )) {
779878 queued = ghes_handle_arm_hw_error (gdata , sev , sync );
879+ } else if (guid_equal (sec_type , & CPER_SEC_CXL_PROT_ERR )) {
880+ struct cxl_cper_sec_prot_err * prot_err = acpi_hest_get_payload (gdata );
881+
882+ cxl_cper_post_prot_err (prot_err , gdata -> error_severity );
780883 } else if (guid_equal (sec_type , & CPER_SEC_CXL_GEN_MEDIA_GUID )) {
781884 struct cxl_cper_event_rec * rec = acpi_hest_get_payload (gdata );
782885
0 commit comments