43
43
#include <linux/pci-ecam.h>
44
44
#include <linux/delay.h>
45
45
#include <linux/semaphore.h>
46
- #include <linux/irqdomain.h>
47
- #include <asm/irqdomain.h>
48
- #include <asm/apic.h>
49
46
#include <linux/irq.h>
50
47
#include <linux/msi.h>
51
48
#include <linux/hyperv.h>
@@ -583,6 +580,42 @@ struct hv_pci_compl {
583
580
584
581
static void hv_pci_onchannelcallback (void * context );
585
582
583
+ #ifdef CONFIG_X86
584
+ #define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
585
+ #define FLOW_HANDLER handle_edge_irq
586
+ #define FLOW_NAME "edge"
587
+
588
+ static int hv_pci_irqchip_init (void )
589
+ {
590
+ return 0 ;
591
+ }
592
+
593
+ static struct irq_domain * hv_pci_get_root_domain (void )
594
+ {
595
+ return x86_vector_domain ;
596
+ }
597
+
598
+ static unsigned int hv_msi_get_int_vector (struct irq_data * data )
599
+ {
600
+ struct irq_cfg * cfg = irqd_cfg (data );
601
+
602
+ return cfg -> vector ;
603
+ }
604
+
605
+ static void hv_set_msi_entry_from_desc (union hv_msi_entry * msi_entry ,
606
+ struct msi_desc * msi_desc )
607
+ {
608
+ msi_entry -> address .as_uint32 = msi_desc -> msg .address_lo ;
609
+ msi_entry -> data .as_uint32 = msi_desc -> msg .data ;
610
+ }
611
+
612
+ static int hv_msi_prepare (struct irq_domain * domain , struct device * dev ,
613
+ int nvec , msi_alloc_info_t * info )
614
+ {
615
+ return pci_msi_prepare (domain , dev , nvec , info );
616
+ }
617
+ #endif /* CONFIG_X86 */
618
+
586
619
/**
587
620
* hv_pci_generic_compl() - Invoked for a completion packet
588
621
* @context: Set up by the sender of the packet.
@@ -1191,14 +1224,6 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1191
1224
put_pcichild (hpdev );
1192
1225
}
1193
1226
1194
- static int hv_set_affinity (struct irq_data * data , const struct cpumask * dest ,
1195
- bool force )
1196
- {
1197
- struct irq_data * parent = data -> parent_data ;
1198
-
1199
- return parent -> chip -> irq_set_affinity (parent , dest , force );
1200
- }
1201
-
1202
1227
static void hv_irq_mask (struct irq_data * data )
1203
1228
{
1204
1229
pci_msi_mask_irq (data );
@@ -1217,7 +1242,6 @@ static void hv_irq_mask(struct irq_data *data)
1217
1242
static void hv_irq_unmask (struct irq_data * data )
1218
1243
{
1219
1244
struct msi_desc * msi_desc = irq_data_get_msi_desc (data );
1220
- struct irq_cfg * cfg = irqd_cfg (data );
1221
1245
struct hv_retarget_device_interrupt * params ;
1222
1246
struct hv_pcibus_device * hbus ;
1223
1247
struct cpumask * dest ;
@@ -1246,7 +1270,7 @@ static void hv_irq_unmask(struct irq_data *data)
1246
1270
(hbus -> hdev -> dev_instance .b [7 ] << 8 ) |
1247
1271
(hbus -> hdev -> dev_instance .b [6 ] & 0xf8 ) |
1248
1272
PCI_FUNC (pdev -> devfn );
1249
- params -> int_target .vector = cfg -> vector ;
1273
+ params -> int_target .vector = hv_msi_get_int_vector ( data ) ;
1250
1274
1251
1275
/*
1252
1276
* Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
@@ -1347,7 +1371,7 @@ static u32 hv_compose_msi_req_v1(
1347
1371
int_pkt -> wslot .slot = slot ;
1348
1372
int_pkt -> int_desc .vector = vector ;
1349
1373
int_pkt -> int_desc .vector_count = 1 ;
1350
- int_pkt -> int_desc .delivery_mode = APIC_DELIVERY_MODE_FIXED ;
1374
+ int_pkt -> int_desc .delivery_mode = DELIVERY_MODE ;
1351
1375
1352
1376
/*
1353
1377
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1377,7 +1401,7 @@ static u32 hv_compose_msi_req_v2(
1377
1401
int_pkt -> wslot .slot = slot ;
1378
1402
int_pkt -> int_desc .vector = vector ;
1379
1403
int_pkt -> int_desc .vector_count = 1 ;
1380
- int_pkt -> int_desc .delivery_mode = APIC_DELIVERY_MODE_FIXED ;
1404
+ int_pkt -> int_desc .delivery_mode = DELIVERY_MODE ;
1381
1405
cpu = hv_compose_msi_req_get_cpu (affinity );
1382
1406
int_pkt -> int_desc .processor_array [0 ] =
1383
1407
hv_cpu_number_to_vp_number (cpu );
@@ -1397,7 +1421,7 @@ static u32 hv_compose_msi_req_v3(
1397
1421
int_pkt -> int_desc .vector = vector ;
1398
1422
int_pkt -> int_desc .reserved = 0 ;
1399
1423
int_pkt -> int_desc .vector_count = 1 ;
1400
- int_pkt -> int_desc .delivery_mode = APIC_DELIVERY_MODE_FIXED ;
1424
+ int_pkt -> int_desc .delivery_mode = DELIVERY_MODE ;
1401
1425
cpu = hv_compose_msi_req_get_cpu (affinity );
1402
1426
int_pkt -> int_desc .processor_array [0 ] =
1403
1427
hv_cpu_number_to_vp_number (cpu );
@@ -1419,7 +1443,6 @@ static u32 hv_compose_msi_req_v3(
1419
1443
*/
1420
1444
static void hv_compose_msi_msg (struct irq_data * data , struct msi_msg * msg )
1421
1445
{
1422
- struct irq_cfg * cfg = irqd_cfg (data );
1423
1446
struct hv_pcibus_device * hbus ;
1424
1447
struct vmbus_channel * channel ;
1425
1448
struct hv_pci_dev * hpdev ;
@@ -1470,22 +1493,22 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1470
1493
size = hv_compose_msi_req_v1 (& ctxt .int_pkts .v1 ,
1471
1494
dest ,
1472
1495
hpdev -> desc .win_slot .slot ,
1473
- cfg -> vector );
1496
+ hv_msi_get_int_vector ( data ) );
1474
1497
break ;
1475
1498
1476
1499
case PCI_PROTOCOL_VERSION_1_2 :
1477
1500
case PCI_PROTOCOL_VERSION_1_3 :
1478
1501
size = hv_compose_msi_req_v2 (& ctxt .int_pkts .v2 ,
1479
1502
dest ,
1480
1503
hpdev -> desc .win_slot .slot ,
1481
- cfg -> vector );
1504
+ hv_msi_get_int_vector ( data ) );
1482
1505
break ;
1483
1506
1484
1507
case PCI_PROTOCOL_VERSION_1_4 :
1485
1508
size = hv_compose_msi_req_v3 (& ctxt .int_pkts .v3 ,
1486
1509
dest ,
1487
1510
hpdev -> desc .win_slot .slot ,
1488
- cfg -> vector );
1511
+ hv_msi_get_int_vector ( data ) );
1489
1512
break ;
1490
1513
1491
1514
default :
@@ -1594,14 +1617,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1594
1617
static struct irq_chip hv_msi_irq_chip = {
1595
1618
.name = "Hyper-V PCIe MSI" ,
1596
1619
.irq_compose_msi_msg = hv_compose_msi_msg ,
1597
- .irq_set_affinity = hv_set_affinity ,
1620
+ .irq_set_affinity = irq_chip_set_affinity_parent ,
1598
1621
.irq_ack = irq_chip_ack_parent ,
1599
1622
.irq_mask = hv_irq_mask ,
1600
1623
.irq_unmask = hv_irq_unmask ,
1601
1624
};
1602
1625
1603
1626
static struct msi_domain_ops hv_msi_ops = {
1604
- .msi_prepare = pci_msi_prepare ,
1627
+ .msi_prepare = hv_msi_prepare ,
1605
1628
.msi_free = hv_msi_free ,
1606
1629
};
1607
1630
@@ -1625,12 +1648,12 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
1625
1648
hbus -> msi_info .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
1626
1649
MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
1627
1650
MSI_FLAG_PCI_MSIX );
1628
- hbus -> msi_info .handler = handle_edge_irq ;
1629
- hbus -> msi_info .handler_name = "edge" ;
1651
+ hbus -> msi_info .handler = FLOW_HANDLER ;
1652
+ hbus -> msi_info .handler_name = FLOW_NAME ;
1630
1653
hbus -> msi_info .data = hbus ;
1631
1654
hbus -> irq_domain = pci_msi_create_irq_domain (hbus -> fwnode ,
1632
1655
& hbus -> msi_info ,
1633
- x86_vector_domain );
1656
+ hv_pci_get_root_domain () );
1634
1657
if (!hbus -> irq_domain ) {
1635
1658
dev_err (& hbus -> hdev -> device ,
1636
1659
"Failed to build an MSI IRQ domain\n" );
@@ -3542,9 +3565,15 @@ static void __exit exit_hv_pci_drv(void)
3542
3565
3543
3566
static int __init init_hv_pci_drv (void )
3544
3567
{
3568
+ int ret ;
3569
+
3545
3570
if (!hv_is_hyperv_initialized ())
3546
3571
return - ENODEV ;
3547
3572
3573
+ ret = hv_pci_irqchip_init ();
3574
+ if (ret )
3575
+ return ret ;
3576
+
3548
3577
/* Set the invalid domain number's bit, so it will not be used */
3549
3578
set_bit (HVPCI_DOM_INVALID , hvpci_dom_map );
3550
3579
0 commit comments