@@ -531,6 +531,268 @@ struct kvm_pmu_event_filter {
531
531
#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0)
532
532
#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
533
533
534
+ /* for KVM_CAP_MCE */
535
+ struct kvm_x86_mce {
536
+ __u64 status ;
537
+ __u64 addr ;
538
+ __u64 misc ;
539
+ __u64 mcg_status ;
540
+ __u8 bank ;
541
+ __u8 pad1 [7 ];
542
+ __u64 pad2 [3 ];
543
+ };
544
+
545
+ /* for KVM_CAP_XEN_HVM */
546
+ #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
547
+ #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
548
+ #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
549
+ #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
550
+ #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
551
+ #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
552
+ #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
553
+ #define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
554
+
555
+ struct kvm_xen_hvm_config {
556
+ __u32 flags ;
557
+ __u32 msr ;
558
+ __u64 blob_addr_32 ;
559
+ __u64 blob_addr_64 ;
560
+ __u8 blob_size_32 ;
561
+ __u8 blob_size_64 ;
562
+ __u8 pad2 [30 ];
563
+ };
564
+
565
+ struct kvm_xen_hvm_attr {
566
+ __u16 type ;
567
+ __u16 pad [3 ];
568
+ union {
569
+ __u8 long_mode ;
570
+ __u8 vector ;
571
+ __u8 runstate_update_flag ;
572
+ struct {
573
+ __u64 gfn ;
574
+ #define KVM_XEN_INVALID_GFN ((__u64)-1)
575
+ } shared_info ;
576
+ struct {
577
+ __u32 send_port ;
578
+ __u32 type ; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
579
+ __u32 flags ;
580
+ #define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
581
+ #define KVM_XEN_EVTCHN_UPDATE (1 << 1)
582
+ #define KVM_XEN_EVTCHN_RESET (1 << 2)
583
+ /*
584
+ * Events sent by the guest are either looped back to
585
+ * the guest itself (potentially on a different port#)
586
+ * or signalled via an eventfd.
587
+ */
588
+ union {
589
+ struct {
590
+ __u32 port ;
591
+ __u32 vcpu ;
592
+ __u32 priority ;
593
+ } port ;
594
+ struct {
595
+ __u32 port ; /* Zero for eventfd */
596
+ __s32 fd ;
597
+ } eventfd ;
598
+ __u32 padding [4 ];
599
+ } deliver ;
600
+ } evtchn ;
601
+ __u32 xen_version ;
602
+ __u64 pad [8 ];
603
+ } u ;
604
+ };
605
+
606
+
607
+ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
608
+ #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
609
+ #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
610
+ #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
611
+ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
612
+ #define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
613
+ #define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
614
+ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
615
+ #define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
616
+
617
+ struct kvm_xen_vcpu_attr {
618
+ __u16 type ;
619
+ __u16 pad [3 ];
620
+ union {
621
+ __u64 gpa ;
622
+ #define KVM_XEN_INVALID_GPA ((__u64)-1)
623
+ __u64 pad [8 ];
624
+ struct {
625
+ __u64 state ;
626
+ __u64 state_entry_time ;
627
+ __u64 time_running ;
628
+ __u64 time_runnable ;
629
+ __u64 time_blocked ;
630
+ __u64 time_offline ;
631
+ } runstate ;
632
+ __u32 vcpu_id ;
633
+ struct {
634
+ __u32 port ;
635
+ __u32 priority ;
636
+ __u64 expires_ns ;
637
+ } timer ;
638
+ __u8 vector ;
639
+ } u ;
640
+ };
641
+
642
+ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
643
+ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
644
+ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
645
+ #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
646
+ #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
647
+ #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
648
+ #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
649
+ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
650
+ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
651
+ #define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
652
+ #define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
653
+
654
+ /* Secure Encrypted Virtualization command */
655
+ enum sev_cmd_id {
656
+ /* Guest initialization commands */
657
+ KVM_SEV_INIT = 0 ,
658
+ KVM_SEV_ES_INIT ,
659
+ /* Guest launch commands */
660
+ KVM_SEV_LAUNCH_START ,
661
+ KVM_SEV_LAUNCH_UPDATE_DATA ,
662
+ KVM_SEV_LAUNCH_UPDATE_VMSA ,
663
+ KVM_SEV_LAUNCH_SECRET ,
664
+ KVM_SEV_LAUNCH_MEASURE ,
665
+ KVM_SEV_LAUNCH_FINISH ,
666
+ /* Guest migration commands (outgoing) */
667
+ KVM_SEV_SEND_START ,
668
+ KVM_SEV_SEND_UPDATE_DATA ,
669
+ KVM_SEV_SEND_UPDATE_VMSA ,
670
+ KVM_SEV_SEND_FINISH ,
671
+ /* Guest migration commands (incoming) */
672
+ KVM_SEV_RECEIVE_START ,
673
+ KVM_SEV_RECEIVE_UPDATE_DATA ,
674
+ KVM_SEV_RECEIVE_UPDATE_VMSA ,
675
+ KVM_SEV_RECEIVE_FINISH ,
676
+ /* Guest status and debug commands */
677
+ KVM_SEV_GUEST_STATUS ,
678
+ KVM_SEV_DBG_DECRYPT ,
679
+ KVM_SEV_DBG_ENCRYPT ,
680
+ /* Guest certificates commands */
681
+ KVM_SEV_CERT_EXPORT ,
682
+ /* Attestation report */
683
+ KVM_SEV_GET_ATTESTATION_REPORT ,
684
+ /* Guest Migration Extension */
685
+ KVM_SEV_SEND_CANCEL ,
686
+
687
+ KVM_SEV_NR_MAX ,
688
+ };
689
+
690
+ struct kvm_sev_cmd {
691
+ __u32 id ;
692
+ __u64 data ;
693
+ __u32 error ;
694
+ __u32 sev_fd ;
695
+ };
696
+
697
+ struct kvm_sev_launch_start {
698
+ __u32 handle ;
699
+ __u32 policy ;
700
+ __u64 dh_uaddr ;
701
+ __u32 dh_len ;
702
+ __u64 session_uaddr ;
703
+ __u32 session_len ;
704
+ };
705
+
706
+ struct kvm_sev_launch_update_data {
707
+ __u64 uaddr ;
708
+ __u32 len ;
709
+ };
710
+
711
+
712
+ struct kvm_sev_launch_secret {
713
+ __u64 hdr_uaddr ;
714
+ __u32 hdr_len ;
715
+ __u64 guest_uaddr ;
716
+ __u32 guest_len ;
717
+ __u64 trans_uaddr ;
718
+ __u32 trans_len ;
719
+ };
720
+
721
+ struct kvm_sev_launch_measure {
722
+ __u64 uaddr ;
723
+ __u32 len ;
724
+ };
725
+
726
+ struct kvm_sev_guest_status {
727
+ __u32 handle ;
728
+ __u32 policy ;
729
+ __u32 state ;
730
+ };
731
+
732
+ struct kvm_sev_dbg {
733
+ __u64 src_uaddr ;
734
+ __u64 dst_uaddr ;
735
+ __u32 len ;
736
+ };
737
+
738
+ struct kvm_sev_attestation_report {
739
+ __u8 mnonce [16 ];
740
+ __u64 uaddr ;
741
+ __u32 len ;
742
+ };
743
+
744
+ struct kvm_sev_send_start {
745
+ __u32 policy ;
746
+ __u64 pdh_cert_uaddr ;
747
+ __u32 pdh_cert_len ;
748
+ __u64 plat_certs_uaddr ;
749
+ __u32 plat_certs_len ;
750
+ __u64 amd_certs_uaddr ;
751
+ __u32 amd_certs_len ;
752
+ __u64 session_uaddr ;
753
+ __u32 session_len ;
754
+ };
755
+
756
+ struct kvm_sev_send_update_data {
757
+ __u64 hdr_uaddr ;
758
+ __u32 hdr_len ;
759
+ __u64 guest_uaddr ;
760
+ __u32 guest_len ;
761
+ __u64 trans_uaddr ;
762
+ __u32 trans_len ;
763
+ };
764
+
765
+ struct kvm_sev_receive_start {
766
+ __u32 handle ;
767
+ __u32 policy ;
768
+ __u64 pdh_uaddr ;
769
+ __u32 pdh_len ;
770
+ __u64 session_uaddr ;
771
+ __u32 session_len ;
772
+ };
773
+
774
+ struct kvm_sev_receive_update_data {
775
+ __u64 hdr_uaddr ;
776
+ __u32 hdr_len ;
777
+ __u64 guest_uaddr ;
778
+ __u32 guest_len ;
779
+ __u64 trans_uaddr ;
780
+ __u32 trans_len ;
781
+ };
782
+
783
+ #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
784
+ #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
785
+
786
+ struct kvm_hyperv_eventfd {
787
+ __u32 conn_id ;
788
+ __s32 fd ;
789
+ __u32 flags ;
790
+ __u32 padding [3 ];
791
+ };
792
+
793
+ #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
794
+ #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
795
+
534
796
/*
535
797
* Masked event layout.
536
798
* Bits Description
0 commit comments