@@ -205,6 +205,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
205205 if (is_protected_kvm_enabled ())
206206 pkvm_destroy_hyp_vm (kvm );
207207
208+ kfree (kvm -> arch .mpidr_data );
208209 kvm_destroy_vcpus (kvm );
209210
210211 kvm_unshare_hyp (kvm , kvm + 1 );
@@ -437,9 +438,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
437438 * We might get preempted before the vCPU actually runs, but
438439 * over-invalidation doesn't affect correctness.
439440 */
440- if (* last_ran != vcpu -> vcpu_id ) {
441+ if (* last_ran != vcpu -> vcpu_idx ) {
441442 kvm_call_hyp (__kvm_flush_cpu_context , mmu );
442- * last_ran = vcpu -> vcpu_id ;
443+ * last_ran = vcpu -> vcpu_idx ;
443444 }
444445
445446 vcpu -> cpu = cpu ;
@@ -577,6 +578,57 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
577578 return vcpu_get_flag (vcpu , VCPU_INITIALIZED );
578579}
579580
581+ static void kvm_init_mpidr_data (struct kvm * kvm )
582+ {
583+ struct kvm_mpidr_data * data = NULL ;
584+ unsigned long c , mask , nr_entries ;
585+ u64 aff_set = 0 , aff_clr = ~0UL ;
586+ struct kvm_vcpu * vcpu ;
587+
588+ mutex_lock (& kvm -> arch .config_lock );
589+
590+ if (kvm -> arch .mpidr_data || atomic_read (& kvm -> online_vcpus ) == 1 )
591+ goto out ;
592+
593+ kvm_for_each_vcpu (c , vcpu , kvm ) {
594+ u64 aff = kvm_vcpu_get_mpidr_aff (vcpu );
595+ aff_set |= aff ;
596+ aff_clr &= aff ;
597+ }
598+
599+ /*
600+ * A significant bit can be either 0 or 1, and will only appear in
601+ * aff_set. Use aff_clr to weed out the useless stuff.
602+ */
603+ mask = aff_set ^ aff_clr ;
604+ nr_entries = BIT_ULL (hweight_long (mask ));
605+
606+ /*
607+ * Don't let userspace fool us. If we need more than a single page
608+ * to describe the compressed MPIDR array, just fall back to the
609+ * iterative method. Single vcpu VMs do not need this either.
610+ */
611+ if (struct_size (data , cmpidr_to_idx , nr_entries ) <= PAGE_SIZE )
612+ data = kzalloc (struct_size (data , cmpidr_to_idx , nr_entries ),
613+ GFP_KERNEL_ACCOUNT );
614+
615+ if (!data )
616+ goto out ;
617+
618+ data -> mpidr_mask = mask ;
619+
620+ kvm_for_each_vcpu (c , vcpu , kvm ) {
621+ u64 aff = kvm_vcpu_get_mpidr_aff (vcpu );
622+ u16 index = kvm_mpidr_index (data , aff );
623+
624+ data -> cmpidr_to_idx [index ] = c ;
625+ }
626+
627+ kvm -> arch .mpidr_data = data ;
628+ out :
629+ mutex_unlock (& kvm -> arch .config_lock );
630+ }
631+
580632/*
581633 * Handle both the initialisation that is being done when the vcpu is
582634 * run for the first time, as well as the updates that must be
@@ -600,6 +652,8 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
600652 if (likely (vcpu_has_run_once (vcpu )))
601653 return 0 ;
602654
655+ kvm_init_mpidr_data (kvm );
656+
603657 kvm_arm_vcpu_init_debug (vcpu );
604658
605659 if (likely (irqchip_in_kernel (kvm ))) {
@@ -1136,27 +1190,23 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
11361190 bool line_status )
11371191{
11381192 u32 irq = irq_level -> irq ;
1139- unsigned int irq_type , vcpu_idx , irq_num ;
1140- int nrcpus = atomic_read (& kvm -> online_vcpus );
1193+ unsigned int irq_type , vcpu_id , irq_num ;
11411194 struct kvm_vcpu * vcpu = NULL ;
11421195 bool level = irq_level -> level ;
11431196
11441197 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT ) & KVM_ARM_IRQ_TYPE_MASK ;
1145- vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT ) & KVM_ARM_IRQ_VCPU_MASK ;
1146- vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT ) & KVM_ARM_IRQ_VCPU2_MASK ) * (KVM_ARM_IRQ_VCPU_MASK + 1 );
1198+ vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT ) & KVM_ARM_IRQ_VCPU_MASK ;
1199+ vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT ) & KVM_ARM_IRQ_VCPU2_MASK ) * (KVM_ARM_IRQ_VCPU_MASK + 1 );
11471200 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT ) & KVM_ARM_IRQ_NUM_MASK ;
11481201
1149- trace_kvm_irq_line (irq_type , vcpu_idx , irq_num , irq_level -> level );
1202+ trace_kvm_irq_line (irq_type , vcpu_id , irq_num , irq_level -> level );
11501203
11511204 switch (irq_type ) {
11521205 case KVM_ARM_IRQ_TYPE_CPU :
11531206 if (irqchip_in_kernel (kvm ))
11541207 return - ENXIO ;
11551208
1156- if (vcpu_idx >= nrcpus )
1157- return - EINVAL ;
1158-
1159- vcpu = kvm_get_vcpu (kvm , vcpu_idx );
1209+ vcpu = kvm_get_vcpu_by_id (kvm , vcpu_id );
11601210 if (!vcpu )
11611211 return - EINVAL ;
11621212
@@ -1168,25 +1218,22 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
11681218 if (!irqchip_in_kernel (kvm ))
11691219 return - ENXIO ;
11701220
1171- if (vcpu_idx >= nrcpus )
1172- return - EINVAL ;
1173-
1174- vcpu = kvm_get_vcpu (kvm , vcpu_idx );
1221+ vcpu = kvm_get_vcpu_by_id (kvm , vcpu_id );
11751222 if (!vcpu )
11761223 return - EINVAL ;
11771224
11781225 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS )
11791226 return - EINVAL ;
11801227
1181- return kvm_vgic_inject_irq (kvm , vcpu -> vcpu_id , irq_num , level , NULL );
1228+ return kvm_vgic_inject_irq (kvm , vcpu , irq_num , level , NULL );
11821229 case KVM_ARM_IRQ_TYPE_SPI :
11831230 if (!irqchip_in_kernel (kvm ))
11841231 return - ENXIO ;
11851232
11861233 if (irq_num < VGIC_NR_PRIVATE_IRQS )
11871234 return - EINVAL ;
11881235
1189- return kvm_vgic_inject_irq (kvm , 0 , irq_num , level , NULL );
1236+ return kvm_vgic_inject_irq (kvm , NULL , irq_num , level , NULL );
11901237 }
11911238
11921239 return - EINVAL ;
@@ -2378,6 +2425,18 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
23782425 unsigned long i ;
23792426
23802427 mpidr &= MPIDR_HWID_BITMASK ;
2428+
2429+ if (kvm -> arch .mpidr_data ) {
2430+ u16 idx = kvm_mpidr_index (kvm -> arch .mpidr_data , mpidr );
2431+
2432+ vcpu = kvm_get_vcpu (kvm ,
2433+ kvm -> arch .mpidr_data -> cmpidr_to_idx [idx ]);
2434+ if (mpidr != kvm_vcpu_get_mpidr_aff (vcpu ))
2435+ vcpu = NULL ;
2436+
2437+ return vcpu ;
2438+ }
2439+
23812440 kvm_for_each_vcpu (i , vcpu , kvm ) {
23822441 if (mpidr == kvm_vcpu_get_mpidr_aff (vcpu ))
23832442 return vcpu ;
0 commit comments