|
26 | 26 | #include <linux/interrupt.h>
|
27 | 27 | #include <linux/timer.h>
|
28 | 28 | #include <linux/cper.h>
|
| 29 | +#include <linux/cleanup.h> |
| 30 | +#include <linux/cxl-event.h> |
29 | 31 | #include <linux/platform_device.h>
|
30 | 32 | #include <linux/mutex.h>
|
31 | 33 | #include <linux/ratelimit.h>
|
32 | 34 | #include <linux/vmalloc.h>
|
33 | 35 | #include <linux/irq_work.h>
|
34 | 36 | #include <linux/llist.h>
|
35 | 37 | #include <linux/genalloc.h>
|
| 38 | +#include <linux/kfifo.h> |
36 | 39 | #include <linux/pci.h>
|
37 | 40 | #include <linux/pfn.h>
|
38 | 41 | #include <linux/aer.h>
|
@@ -673,6 +676,101 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
|
673 | 676 | schedule_work(&entry->work);
|
674 | 677 | }
|
675 | 678 |
|
| 679 | +/* CXL Event record UUIDs are formated as GUIDs and reported in section type */ |
| 680 | + |
| 681 | +/* |
| 682 | + * General Media Event Record |
| 683 | + * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 |
| 684 | + */ |
| 685 | +#define CPER_SEC_CXL_GEN_MEDIA_GUID \ |
| 686 | + GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ |
| 687 | + 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) |
| 688 | + |
| 689 | +/* |
| 690 | + * DRAM Event Record |
| 691 | + * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 |
| 692 | + */ |
| 693 | +#define CPER_SEC_CXL_DRAM_GUID \ |
| 694 | + GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ |
| 695 | + 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) |
| 696 | + |
| 697 | +/* |
| 698 | + * Memory Module Event Record |
| 699 | + * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 |
| 700 | + */ |
| 701 | +#define CPER_SEC_CXL_MEM_MODULE_GUID \ |
| 702 | + GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ |
| 703 | + 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) |
| 704 | + |
| 705 | +/* Room for 8 entries for each of the 4 event log queues */ |
| 706 | +#define CXL_CPER_FIFO_DEPTH 32 |
| 707 | +DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH); |
| 708 | + |
| 709 | +/* Synchronize schedule_work() with cxl_cper_work changes */ |
| 710 | +static DEFINE_SPINLOCK(cxl_cper_work_lock); |
| 711 | +struct work_struct *cxl_cper_work; |
| 712 | + |
| 713 | +static void cxl_cper_post_event(enum cxl_event_type event_type, |
| 714 | + struct cxl_cper_event_rec *rec) |
| 715 | +{ |
| 716 | + struct cxl_cper_work_data wd; |
| 717 | + |
| 718 | + if (rec->hdr.length <= sizeof(rec->hdr) || |
| 719 | + rec->hdr.length > sizeof(*rec)) { |
| 720 | + pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n", |
| 721 | + rec->hdr.length); |
| 722 | + return; |
| 723 | + } |
| 724 | + |
| 725 | + if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) { |
| 726 | + pr_err(FW_WARN "CXL CPER invalid event\n"); |
| 727 | + return; |
| 728 | + } |
| 729 | + |
| 730 | + guard(spinlock_irqsave)(&cxl_cper_work_lock); |
| 731 | + |
| 732 | + if (!cxl_cper_work) |
| 733 | + return; |
| 734 | + |
| 735 | + wd.event_type = event_type; |
| 736 | + memcpy(&wd.rec, rec, sizeof(wd.rec)); |
| 737 | + |
| 738 | + if (!kfifo_put(&cxl_cper_fifo, wd)) { |
| 739 | + pr_err_ratelimited("CXL CPER kfifo overflow\n"); |
| 740 | + return; |
| 741 | + } |
| 742 | + |
| 743 | + schedule_work(cxl_cper_work); |
| 744 | +} |
| 745 | + |
| 746 | +int cxl_cper_register_work(struct work_struct *work) |
| 747 | +{ |
| 748 | + if (cxl_cper_work) |
| 749 | + return -EINVAL; |
| 750 | + |
| 751 | + guard(spinlock)(&cxl_cper_work_lock); |
| 752 | + cxl_cper_work = work; |
| 753 | + return 0; |
| 754 | +} |
| 755 | +EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL); |
| 756 | + |
| 757 | +int cxl_cper_unregister_work(struct work_struct *work) |
| 758 | +{ |
| 759 | + if (cxl_cper_work != work) |
| 760 | + return -EINVAL; |
| 761 | + |
| 762 | + guard(spinlock)(&cxl_cper_work_lock); |
| 763 | + cxl_cper_work = NULL; |
| 764 | + return 0; |
| 765 | +} |
| 766 | +EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL); |
| 767 | + |
| 768 | +int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) |
| 769 | +{ |
| 770 | + return kfifo_get(&cxl_cper_fifo, wd); |
| 771 | +} |
| 772 | +EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL); |
| 773 | + |
676 | 774 | static bool ghes_do_proc(struct ghes *ghes,
|
677 | 775 | const struct acpi_hest_generic_status *estatus)
|
678 | 776 | {
|
@@ -707,6 +805,18 @@ static bool ghes_do_proc(struct ghes *ghes,
|
707 | 805 | }
|
708 | 806 | else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
|
709 | 807 | queued = ghes_handle_arm_hw_error(gdata, sev, sync);
|
| 808 | + } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) { |
| 809 | + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); |
| 810 | + |
| 811 | + cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec); |
| 812 | + } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) { |
| 813 | + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); |
| 814 | + |
| 815 | + cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec); |
| 816 | + } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) { |
| 817 | + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); |
| 818 | + |
| 819 | + cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); |
710 | 820 | } else {
|
711 | 821 | void *err = acpi_hest_get_payload(gdata);
|
712 | 822 |
|
|
0 commit comments