@@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
238238 return index ;
239239}
240240
241+ struct kvm_sysreg_masks ;
242+
243+ enum fgt_group_id {
244+ __NO_FGT_GROUP__ ,
245+ HFGxTR_GROUP ,
246+ HDFGRTR_GROUP ,
247+ HDFGWTR_GROUP = HDFGRTR_GROUP ,
248+ HFGITR_GROUP ,
249+ HAFGRTR_GROUP ,
250+
251+ /* Must be last */
252+ __NR_FGT_GROUP_IDS__
253+ };
254+
241255struct kvm_arch {
242256 struct kvm_s2_mmu mmu ;
243257
258+ /*
259+ * Fine-Grained UNDEF, mimicking the FGT layout defined by the
260+ * architecture. We track them globally, as we present the
261+ * same feature-set to all vcpus.
262+ *
263+ * Index 0 is currently spare.
264+ */
265+ u64 fgu [__NR_FGT_GROUP_IDS__ ];
266+
244267 /* Interrupt controller */
245268 struct vgic_dist vgic ;
246269
@@ -274,6 +297,8 @@ struct kvm_arch {
274297#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
275298 /* Initial ID reg values loaded */
276299#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
300+ /* Fine-Grained UNDEF initialised */
301+ #define KVM_ARCH_FLAG_FGU_INITIALIZED 8
277302 unsigned long flags ;
278303
279304 /* VM-wide vCPU feature set */
@@ -294,6 +319,9 @@ struct kvm_arch {
294319 /* PMCR_EL0.N value for the guest */
295320 u8 pmcr_n ;
296321
322+ /* Iterator for idreg debugfs */
323+ u8 idreg_debugfs_iter ;
324+
297325 /* Hypercall features firmware registers' descriptor */
298326 struct kvm_smccc_features smccc_feat ;
299327 struct maple_tree smccc_filter ;
@@ -312,6 +340,9 @@ struct kvm_arch {
312340#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
313341 u64 id_regs [KVM_ARM_ID_REG_NUM ];
314342
343+ /* Masks for VNCR-baked sysregs */
344+ struct kvm_sysreg_masks * sysreg_masks ;
345+
315346 /*
316347 * For an untrusted host VM, 'pkvm.handle' is used to lookup
317348 * the associated pKVM instance in the hypervisor.
@@ -474,6 +505,13 @@ enum vcpu_sysreg {
474505 NR_SYS_REGS /* Nothing after this line! */
475506};
476507
508+ struct kvm_sysreg_masks {
509+ struct {
510+ u64 res0 ;
511+ u64 res1 ;
512+ } mask [NR_SYS_REGS - __VNCR_START__ ];
513+ };
514+
477515struct kvm_cpu_context {
478516 struct user_pt_regs regs ; /* sp = sp_el0 */
479517
@@ -549,6 +587,7 @@ struct kvm_vcpu_arch {
549587
550588 /* Values of trap registers for the guest. */
551589 u64 hcr_el2 ;
590+ u64 hcrx_el2 ;
552591 u64 mdcr_el2 ;
553592 u64 cptr_el2 ;
554593
@@ -868,7 +907,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
868907
869908#define ctxt_sys_reg (c ,r ) (*__ctxt_sys_reg(c,r))
870909
871- #define __vcpu_sys_reg (v ,r ) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
910+ u64 kvm_vcpu_sanitise_vncr_reg (const struct kvm_vcpu * , enum vcpu_sysreg );
911+ #define __vcpu_sys_reg (v ,r ) \
912+ (*({ \
913+ const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
914+ u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
915+ if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
916+ *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
917+ __r; \
918+ }))
872919
873920u64 vcpu_read_sys_reg (const struct kvm_vcpu * vcpu , int reg );
874921void vcpu_write_sys_reg (struct kvm_vcpu * vcpu , u64 val , int reg );
@@ -1055,14 +1102,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
10551102int kvm_handle_sys_reg (struct kvm_vcpu * vcpu );
10561103int kvm_handle_cp10_id (struct kvm_vcpu * vcpu );
10571104
1105+ void kvm_sys_regs_create_debugfs (struct kvm * kvm );
10581106void kvm_reset_sys_regs (struct kvm_vcpu * vcpu );
10591107
10601108int __init kvm_sys_reg_table_init (void );
1109+ struct sys_reg_desc ;
1110+ int __init populate_sysreg_config (const struct sys_reg_desc * sr ,
1111+ unsigned int idx );
10611112int __init populate_nv_trap_config (void );
10621113
10631114bool lock_all_vcpus (struct kvm * kvm );
10641115void unlock_all_vcpus (struct kvm * kvm );
10651116
1117+ void kvm_init_sysreg (struct kvm_vcpu * );
1118+
10661119/* MMIO helpers */
10671120void kvm_mmio_write_buf (void * buf , unsigned int len , unsigned long data );
10681121unsigned long kvm_mmio_read_buf (const void * buf , unsigned int len );
@@ -1233,4 +1286,48 @@ static inline void kvm_hyp_reserve(void) { }
12331286void kvm_arm_vcpu_power_off (struct kvm_vcpu * vcpu );
12341287bool kvm_arm_vcpu_stopped (struct kvm_vcpu * vcpu );
12351288
1289+ #define __expand_field_sign_unsigned (id , fld , val ) \
1290+ ((u64)SYS_FIELD_VALUE(id, fld, val))
1291+
1292+ #define __expand_field_sign_signed (id , fld , val ) \
1293+ ({ \
1294+ u64 __val = SYS_FIELD_VALUE(id, fld, val); \
1295+ sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1296+ })
1297+
1298+ #define expand_field_sign (id , fld , val ) \
1299+ (id##_##fld##_SIGNED ? \
1300+ __expand_field_sign_signed(id, fld, val) : \
1301+ __expand_field_sign_unsigned(id, fld, val))
1302+
1303+ #define get_idreg_field_unsigned (kvm , id , fld ) \
1304+ ({ \
1305+ u64 __val = IDREG((kvm), SYS_##id); \
1306+ FIELD_GET(id##_##fld##_MASK, __val); \
1307+ })
1308+
1309+ #define get_idreg_field_signed (kvm , id , fld ) \
1310+ ({ \
1311+ u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
1312+ sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1313+ })
1314+
1315+ #define get_idreg_field_enum (kvm , id , fld ) \
1316+ get_idreg_field_unsigned(kvm, id, fld)
1317+
1318+ #define get_idreg_field (kvm , id , fld ) \
1319+ (id##_##fld##_SIGNED ? \
1320+ get_idreg_field_signed(kvm, id, fld) : \
1321+ get_idreg_field_unsigned(kvm, id, fld))
1322+
1323+ #define kvm_has_feat (kvm , id , fld , limit ) \
1324+ (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
1325+
1326+ #define kvm_has_feat_enum (kvm , id , fld , val ) \
1327+ (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
1328+
1329+ #define kvm_has_feat_range (kvm , id , fld , min , max ) \
1330+ (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
1331+ get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
1332+
12361333#endif /* __ARM64_KVM_HOST_H__ */
0 commit comments