@@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
378
378
return ret ;
379
379
}
380
380
381
+ static struct kvm_vcpu * collection_to_vcpu (struct kvm * kvm ,
382
+ struct its_collection * col )
383
+ {
384
+ return kvm_get_vcpu_by_id (kvm , col -> target_addr );
385
+ }
386
+
381
387
/*
382
388
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
383
389
* is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
391
397
if (!its_is_collection_mapped (ite -> collection ))
392
398
return ;
393
399
394
- vcpu = kvm_get_vcpu (kvm , ite -> collection -> target_addr );
400
+ vcpu = collection_to_vcpu (kvm , ite -> collection );
395
401
update_affinity (ite -> irq , vcpu );
396
402
}
397
403
@@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
679
685
if (!ite || !its_is_collection_mapped (ite -> collection ))
680
686
return E_ITS_INT_UNMAPPED_INTERRUPT ;
681
687
682
- vcpu = kvm_get_vcpu (kvm , ite -> collection -> target_addr );
688
+ vcpu = collection_to_vcpu (kvm , ite -> collection );
683
689
if (!vcpu )
684
690
return E_ITS_INT_UNMAPPED_INTERRUPT ;
685
691
@@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
887
893
return E_ITS_MOVI_UNMAPPED_COLLECTION ;
888
894
889
895
ite -> collection = collection ;
890
- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
896
+ vcpu = collection_to_vcpu (kvm , collection );
891
897
892
898
vgic_its_invalidate_cache (kvm );
893
899
@@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1121
1127
}
1122
1128
1123
1129
if (its_is_collection_mapped (collection ))
1124
- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
1130
+ vcpu = collection_to_vcpu (kvm , collection );
1125
1131
1126
1132
irq = vgic_add_lpi (kvm , lpi_nr , vcpu );
1127
1133
if (IS_ERR (irq )) {
@@ -1242,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1242
1248
u64 * its_cmd )
1243
1249
{
1244
1250
u16 coll_id ;
1245
- u32 target_addr ;
1246
1251
struct its_collection * collection ;
1247
1252
bool valid ;
1248
1253
1249
1254
valid = its_cmd_get_validbit (its_cmd );
1250
1255
coll_id = its_cmd_get_collection (its_cmd );
1251
- target_addr = its_cmd_get_target_addr (its_cmd );
1252
-
1253
- if (target_addr >= atomic_read (& kvm -> online_vcpus ))
1254
- return E_ITS_MAPC_PROCNUM_OOR ;
1255
1256
1256
1257
if (!valid ) {
1257
1258
vgic_its_free_collection (its , coll_id );
1258
1259
vgic_its_invalidate_cache (kvm );
1259
1260
} else {
1261
+ struct kvm_vcpu * vcpu ;
1262
+
1263
+ vcpu = kvm_get_vcpu_by_id (kvm , its_cmd_get_target_addr (its_cmd ));
1264
+ if (!vcpu )
1265
+ return E_ITS_MAPC_PROCNUM_OOR ;
1266
+
1260
1267
collection = find_collection (its , coll_id );
1261
1268
1262
1269
if (!collection ) {
@@ -1270,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1270
1277
coll_id );
1271
1278
if (ret )
1272
1279
return ret ;
1273
- collection -> target_addr = target_addr ;
1280
+ collection -> target_addr = vcpu -> vcpu_id ;
1274
1281
} else {
1275
- collection -> target_addr = target_addr ;
1282
+ collection -> target_addr = vcpu -> vcpu_id ;
1276
1283
update_affinity_collection (kvm , its , collection );
1277
1284
}
1278
1285
}
@@ -1382,7 +1389,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1382
1389
if (!its_is_collection_mapped (collection ))
1383
1390
return E_ITS_INVALL_UNMAPPED_COLLECTION ;
1384
1391
1385
- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
1392
+ vcpu = collection_to_vcpu (kvm , collection );
1386
1393
vgic_its_invall (vcpu );
1387
1394
1388
1395
return 0 ;
@@ -1399,23 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1399
1406
static int vgic_its_cmd_handle_movall (struct kvm * kvm , struct vgic_its * its ,
1400
1407
u64 * its_cmd )
1401
1408
{
1402
- u32 target1_addr = its_cmd_get_target_addr (its_cmd );
1403
- u32 target2_addr = its_cmd_mask_field (its_cmd , 3 , 16 , 32 );
1404
1409
struct kvm_vcpu * vcpu1 , * vcpu2 ;
1405
1410
struct vgic_irq * irq ;
1406
1411
u32 * intids ;
1407
1412
int irq_count , i ;
1408
1413
1409
- if (target1_addr >= atomic_read (& kvm -> online_vcpus ) ||
1410
- target2_addr >= atomic_read (& kvm -> online_vcpus ))
1414
+ /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1415
+ vcpu1 = kvm_get_vcpu_by_id (kvm , its_cmd_get_target_addr (its_cmd ));
1416
+ vcpu2 = kvm_get_vcpu_by_id (kvm , its_cmd_mask_field (its_cmd , 3 , 16 , 32 ));
1417
+
1418
+ if (!vcpu1 || !vcpu2 )
1411
1419
return E_ITS_MOVALL_PROCNUM_OOR ;
1412
1420
1413
- if (target1_addr == target2_addr )
1421
+ if (vcpu1 == vcpu2 )
1414
1422
return 0 ;
1415
1423
1416
- vcpu1 = kvm_get_vcpu (kvm , target1_addr );
1417
- vcpu2 = kvm_get_vcpu (kvm , target2_addr );
1418
-
1419
1424
irq_count = vgic_copy_lpi_list (kvm , vcpu1 , & intids );
1420
1425
if (irq_count < 0 )
1421
1426
return irq_count ;
@@ -2258,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2258
2263
return PTR_ERR (ite );
2259
2264
2260
2265
if (its_is_collection_mapped (collection ))
2261
- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
2266
+ vcpu = kvm_get_vcpu_by_id (kvm , collection -> target_addr );
2262
2267
2263
2268
irq = vgic_add_lpi (kvm , lpi_id , vcpu );
2264
2269
if (IS_ERR (irq )) {
@@ -2573,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2573
2578
coll_id = val & KVM_ITS_CTE_ICID_MASK ;
2574
2579
2575
2580
if (target_addr != COLLECTION_NOT_MAPPED &&
2576
- target_addr >= atomic_read ( & kvm -> online_vcpus ))
2581
+ ! kvm_get_vcpu_by_id ( kvm , target_addr ))
2577
2582
return - EINVAL ;
2578
2583
2579
2584
collection = find_collection (its , coll_id );
0 commit comments