@@ -189,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida);
189
189
#define gic_data_rdist_rd_base () (gic_data_rdist()->rd_base)
190
190
#define gic_data_rdist_vlpi_base () (gic_data_rdist_rd_base() + SZ_128K)
191
191
192
+ /*
193
+ * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
194
+ * always have vSGIs mapped.
195
+ */
196
+ static bool require_its_list_vmovp (struct its_vm * vm , struct its_node * its )
197
+ {
198
+ return (gic_rdists -> has_rvpeid || vm -> vlpi_count [its -> list_nr ]);
199
+ }
200
+
192
201
static u16 get_its_list (struct its_vm * vm )
193
202
{
194
203
struct its_node * its ;
@@ -198,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm)
198
207
if (!is_v4 (its ))
199
208
continue ;
200
209
201
- if (vm -> vlpi_count [ its -> list_nr ] )
210
+ if (require_its_list_vmovp ( vm , its ) )
202
211
__set_bit (its -> list_nr , & its_list );
203
212
}
204
213
@@ -1295,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
1295
1304
if (!is_v4 (its ))
1296
1305
continue ;
1297
1306
1298
- if (!vpe -> its_vm -> vlpi_count [ its -> list_nr ] )
1307
+ if (!require_its_list_vmovp ( vpe -> its_vm , its ) )
1299
1308
continue ;
1300
1309
1301
1310
desc .its_vmovp_cmd .col = & its -> collections [col_id ];
@@ -1586,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
1586
1595
return 0 ;
1587
1596
}
1588
1597
1598
+ /*
1599
+ * Two favourable cases:
1600
+ *
1601
+ * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1602
+ * for vSGI delivery
1603
+ *
1604
+ * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1605
+ * and we're better off mapping all VPEs always
1606
+ *
1607
+ * If neither (a) nor (b) is true, then we map vPEs on demand.
1608
+ *
1609
+ */
1610
+ static bool gic_requires_eager_mapping (void )
1611
+ {
1612
+ if (!its_list_map || gic_rdists -> has_rvpeid )
1613
+ return true;
1614
+
1615
+ return false;
1616
+ }
1617
+
1589
1618
static void its_map_vm (struct its_node * its , struct its_vm * vm )
1590
1619
{
1591
1620
unsigned long flags ;
1592
1621
1593
- /* Not using the ITS list? Everything is always mapped. */
1594
- if (!its_list_map )
1622
+ if (gic_requires_eager_mapping ())
1595
1623
return ;
1596
1624
1597
1625
raw_spin_lock_irqsave (& vmovp_lock , flags );
@@ -1625,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1625
1653
unsigned long flags ;
1626
1654
1627
1655
/* Not using the ITS list? Everything is always mapped. */
1628
- if (! its_list_map )
1656
+ if (gic_requires_eager_mapping () )
1629
1657
return ;
1630
1658
1631
1659
raw_spin_lock_irqsave (& vmovp_lock , flags );
@@ -4282,8 +4310,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4282
4310
struct its_vpe * vpe = irq_data_get_irq_chip_data (d );
4283
4311
struct its_node * its ;
4284
4312
4285
- /* If we use the list map, we issue VMAPP on demand... */
4286
- if (its_list_map )
4313
+ /*
4314
+ * If we use the list map, we issue VMAPP on demand... Unless
4315
+ * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4316
+ * so that VSGIs can work.
4317
+ */
4318
+ if (!gic_requires_eager_mapping ())
4287
4319
return 0 ;
4288
4320
4289
4321
/* Map the VPE to the first possible CPU */
@@ -4309,10 +4341,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4309
4341
struct its_node * its ;
4310
4342
4311
4343
/*
4312
- * If we use the list map, we unmap the VPE once no VLPIs are
4313
- * associated with the VM.
4344
+ * If we use the list map on GICv4.0 , we unmap the VPE once no
4345
+ * VLPIs are associated with the VM.
4314
4346
*/
4315
- if (its_list_map )
4347
+ if (! gic_requires_eager_mapping () )
4316
4348
return ;
4317
4349
4318
4350
list_for_each_entry (its , & its_nodes , entry ) {
0 commit comments