Skip to content

Commit 94d924e

Browse files
sunilmutallenpais
authored andcommitted
PCI: hv: Add arm64 Hyper-V vPCI support
Add arm64 Hyper-V vPCI support by implementing the arch specific interfaces. Introduce an IRQ domain and chip specific to Hyper-v vPCI that is based on SPIs. The IRQ domain parents itself to the arch GIC IRQ domain for basic vector management. [bhelgaas: squash in fix from Yang Li <[email protected]>: https://lore.kernel.org/r/[email protected]] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sunil Muthuswamy <[email protected]> Signed-off-by: Lorenzo Pieralisi <[email protected]> Signed-off-by: Bjorn Helgaas <[email protected]> Reviewed-by: Marc Zyngier <[email protected]> Reviewed-by: Michael Kelley <[email protected]> Signed-off-by: Tyler Hicks <[email protected]> Signed-off-by: Allen Pais <[email protected]>
1 parent e4ef737 commit 94d924e

File tree

4 files changed

+247
-32
lines changed

4 files changed

+247
-32
lines changed

arch/arm64/include/asm/hyperv-tlfs.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,15 @@
6464
#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
6565
#define HV_REGISTER_STIMER0_COUNT 0x000B0001
6666

67+
union hv_msi_entry {
68+
u64 as_uint64[2];
69+
struct {
70+
u64 address;
71+
u32 data;
72+
u32 reserved;
73+
} __packed;
74+
};
75+
6776
#include <asm-generic/hyperv-tlfs.h>
6877

6978
#endif

drivers/pci/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ config PCI_LABEL
184184

185185
config PCI_HYPERV
186186
tristate "Hyper-V PCI Frontend"
187-
depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
187+
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
188188
select PCI_HYPERV_INTERFACE
189189
help
190190
The PCI device frontend driver allows the kernel to import arbitrary

drivers/pci/controller/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ config PCIE_BRCMSTB
280280

281281
config PCI_HYPERV_INTERFACE
282282
tristate "Hyper-V PCI Interface"
283-
depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
283+
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN
284284
help
285285
The Hyper-V PCI Interface is a helper driver allows other drivers to
286286
have a common interface with the Hyper-V PCI frontend driver.

drivers/pci/controller/pci-hyperv.c

Lines changed: 236 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@
4747
#include <linux/msi.h>
4848
#include <linux/hyperv.h>
4949
#include <linux/refcount.h>
50+
#include <linux/irqdomain.h>
51+
#include <linux/acpi.h>
5052
#include <asm/mshyperv.h>
5153

5254
/*
@@ -602,19 +604,237 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
602604
return cfg->vector;
603605
}
604606

605-
static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
606-
struct msi_desc *msi_desc)
607+
static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
608+
int nvec, msi_alloc_info_t *info)
607609
{
608-
msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
609-
msi_entry->data.as_uint32 = msi_desc->msg.data;
610+
int ret = pci_msi_prepare(domain, dev, nvec, info);
611+
612+
/*
613+
* By using the interrupt remapper in the hypervisor IOMMU, contiguous
614+
* CPU vectors is not needed for multi-MSI
615+
*/
616+
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
617+
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
618+
619+
return ret;
610620
}
611621

612-
static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
613-
int nvec, msi_alloc_info_t *info)
622+
#elif defined(CONFIG_ARM64)
623+
/*
624+
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
625+
* of room at the start to allow for SPIs to be specified through ACPI and
626+
* starting with a power of two to satisfy power of 2 multi-MSI requirement.
627+
*/
628+
#define HV_PCI_MSI_SPI_START 64
629+
#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
630+
#define DELIVERY_MODE 0
631+
#define FLOW_HANDLER NULL
632+
#define FLOW_NAME NULL
633+
#define hv_msi_prepare NULL
634+
635+
struct hv_pci_chip_data {
636+
DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
637+
struct mutex map_lock;
638+
};
639+
640+
/* Hyper-V vPCI MSI GIC IRQ domain */
641+
static struct irq_domain *hv_msi_gic_irq_domain;
642+
643+
/* Hyper-V PCI MSI IRQ chip */
644+
static struct irq_chip hv_arm64_msi_irq_chip = {
645+
.name = "MSI",
646+
.irq_set_affinity = irq_chip_set_affinity_parent,
647+
.irq_eoi = irq_chip_eoi_parent,
648+
.irq_mask = irq_chip_mask_parent,
649+
.irq_unmask = irq_chip_unmask_parent
650+
};
651+
652+
static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
653+
{
654+
return irqd->parent_data->hwirq;
655+
}
656+
657+
/*
658+
* @nr_bm_irqs: Indicates the number of IRQs that were allocated from
659+
* the bitmap.
660+
* @nr_dom_irqs: Indicates the number of IRQs that were allocated from
661+
* the parent domain.
662+
*/
663+
static void hv_pci_vec_irq_free(struct irq_domain *domain,
664+
unsigned int virq,
665+
unsigned int nr_bm_irqs,
666+
unsigned int nr_dom_irqs)
667+
{
668+
struct hv_pci_chip_data *chip_data = domain->host_data;
669+
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
670+
int first = d->hwirq - HV_PCI_MSI_SPI_START;
671+
int i;
672+
673+
mutex_lock(&chip_data->map_lock);
674+
bitmap_release_region(chip_data->spi_map,
675+
first,
676+
get_count_order(nr_bm_irqs));
677+
mutex_unlock(&chip_data->map_lock);
678+
for (i = 0; i < nr_dom_irqs; i++) {
679+
if (i)
680+
d = irq_domain_get_irq_data(domain, virq + i);
681+
irq_domain_reset_irq_data(d);
682+
}
683+
684+
irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
685+
}
686+
687+
static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
688+
unsigned int virq,
689+
unsigned int nr_irqs)
690+
{
691+
hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
692+
}
693+
694+
static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
695+
unsigned int nr_irqs,
696+
irq_hw_number_t *hwirq)
697+
{
698+
struct hv_pci_chip_data *chip_data = domain->host_data;
699+
int index;
700+
701+
/* Find and allocate region from the SPI bitmap */
702+
mutex_lock(&chip_data->map_lock);
703+
index = bitmap_find_free_region(chip_data->spi_map,
704+
HV_PCI_MSI_SPI_NR,
705+
get_count_order(nr_irqs));
706+
mutex_unlock(&chip_data->map_lock);
707+
if (index < 0)
708+
return -ENOSPC;
709+
710+
*hwirq = index + HV_PCI_MSI_SPI_START;
711+
712+
return 0;
713+
}
714+
715+
static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
716+
unsigned int virq,
717+
irq_hw_number_t hwirq)
718+
{
719+
struct irq_fwspec fwspec;
720+
struct irq_data *d;
721+
int ret;
722+
723+
fwspec.fwnode = domain->parent->fwnode;
724+
fwspec.param_count = 2;
725+
fwspec.param[0] = hwirq;
726+
fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
727+
728+
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
729+
if (ret)
730+
return ret;
731+
732+
/*
733+
* Since the interrupt specifier is not coming from ACPI or DT, the
734+
* trigger type will need to be set explicitly. Otherwise, it will be
735+
* set to whatever is in the GIC configuration.
736+
*/
737+
d = irq_domain_get_irq_data(domain->parent, virq);
738+
739+
return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
740+
}
741+
742+
static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
743+
unsigned int virq, unsigned int nr_irqs,
744+
void *args)
745+
{
746+
irq_hw_number_t hwirq;
747+
unsigned int i;
748+
int ret;
749+
750+
ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
751+
if (ret)
752+
return ret;
753+
754+
for (i = 0; i < nr_irqs; i++) {
755+
ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
756+
hwirq + i);
757+
if (ret) {
758+
hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
759+
return ret;
760+
}
761+
762+
irq_domain_set_hwirq_and_chip(domain, virq + i,
763+
hwirq + i,
764+
&hv_arm64_msi_irq_chip,
765+
domain->host_data);
766+
pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
767+
}
768+
769+
return 0;
770+
}
771+
772+
/*
773+
* Pick the first cpu as the irq affinity that can be temporarily used for
774+
* composing MSI from the hypervisor. GIC will eventually set the right
775+
* affinity for the irq and the 'unmask' will retarget the interrupt to that
776+
* cpu.
777+
*/
778+
static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
779+
struct irq_data *irqd, bool reserve)
614780
{
615-
return pci_msi_prepare(domain, dev, nvec, info);
781+
int cpu = cpumask_first(cpu_present_mask);
782+
783+
irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
784+
785+
return 0;
616786
}
617-
#endif /* CONFIG_X86 */
787+
788+
static const struct irq_domain_ops hv_pci_domain_ops = {
789+
.alloc = hv_pci_vec_irq_domain_alloc,
790+
.free = hv_pci_vec_irq_domain_free,
791+
.activate = hv_pci_vec_irq_domain_activate,
792+
};
793+
794+
static int hv_pci_irqchip_init(void)
795+
{
796+
static struct hv_pci_chip_data *chip_data;
797+
struct fwnode_handle *fn = NULL;
798+
int ret = -ENOMEM;
799+
800+
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
801+
if (!chip_data)
802+
return ret;
803+
804+
mutex_init(&chip_data->map_lock);
805+
fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
806+
if (!fn)
807+
goto free_chip;
808+
809+
/*
810+
* IRQ domain once enabled, should not be removed since there is no
811+
* way to ensure that all the corresponding devices are also gone and
812+
* no interrupts will be generated.
813+
*/
814+
hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
815+
fn, &hv_pci_domain_ops,
816+
chip_data);
817+
818+
if (!hv_msi_gic_irq_domain) {
819+
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
820+
goto free_chip;
821+
}
822+
823+
return 0;
824+
825+
free_chip:
826+
kfree(chip_data);
827+
if (fn)
828+
irq_domain_free_fwnode(fn);
829+
830+
return ret;
831+
}
832+
833+
static struct irq_domain *hv_pci_get_root_domain(void)
834+
{
835+
return hv_msi_gic_irq_domain;
836+
}
837+
#endif /* CONFIG_ARM64 */
618838

619839
/**
620840
* hv_pci_generic_compl() - Invoked for a completion packet
@@ -1231,28 +1451,8 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
12311451
static void hv_irq_mask(struct irq_data *data)
12321452
{
12331453
pci_msi_mask_irq(data);
1234-
}
1235-
1236-
static unsigned int hv_msi_get_int_vector(struct irq_data *data)
1237-
{
1238-
struct irq_cfg *cfg = irqd_cfg(data);
1239-
1240-
return cfg->vector;
1241-
}
1242-
1243-
static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
1244-
int nvec, msi_alloc_info_t *info)
1245-
{
1246-
int ret = pci_msi_prepare(domain, dev, nvec, info);
1247-
1248-
/*
1249-
* By using the interrupt remapper in the hypervisor IOMMU, contiguous
1250-
* CPU vectors is not needed for multi-MSI
1251-
*/
1252-
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
1253-
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
1254-
1255-
return ret;
1454+
if (data->parent_data->chip->irq_mask)
1455+
irq_chip_mask_parent(data);
12561456
}
12571457

12581458
/**
@@ -1372,6 +1572,8 @@ static void hv_irq_unmask(struct irq_data *data)
13721572
dev_err(&hbus->hdev->device,
13731573
"%s() failed: %#llx", __func__, res);
13741574

1575+
if (data->parent_data->chip->irq_unmask)
1576+
irq_chip_unmask_parent(data);
13751577
pci_msi_unmask_irq(data);
13761578
}
13771579

@@ -1685,7 +1887,11 @@ static struct irq_chip hv_msi_irq_chip = {
16851887
.name = "Hyper-V PCIe MSI",
16861888
.irq_compose_msi_msg = hv_compose_msi_msg,
16871889
.irq_set_affinity = irq_chip_set_affinity_parent,
1890+
#ifdef CONFIG_X86
16881891
.irq_ack = irq_chip_ack_parent,
1892+
#elif defined(CONFIG_ARM64)
1893+
.irq_eoi = irq_chip_eoi_parent,
1894+
#endif
16891895
.irq_mask = hv_irq_mask,
16901896
.irq_unmask = hv_irq_unmask,
16911897
};

0 commit comments

Comments
 (0)