|
47 | 47 | #include <linux/msi.h> |
48 | 48 | #include <linux/hyperv.h> |
49 | 49 | #include <linux/refcount.h> |
| 50 | +#include <linux/irqdomain.h> |
| 51 | +#include <linux/acpi.h> |
50 | 52 | #include <asm/mshyperv.h> |
51 | 53 |
|
52 | 54 | /* |
@@ -602,19 +604,237 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) |
602 | 604 | return cfg->vector; |
603 | 605 | } |
604 | 606 |
|
605 | | -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, |
606 | | - struct msi_desc *msi_desc) |
| 607 | +static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
| 608 | + int nvec, msi_alloc_info_t *info) |
607 | 609 | { |
608 | | - msi_entry->address.as_uint32 = msi_desc->msg.address_lo; |
609 | | - msi_entry->data.as_uint32 = msi_desc->msg.data; |
| 610 | + int ret = pci_msi_prepare(domain, dev, nvec, info); |
| 611 | + |
| 612 | + /* |
| 613 | + * By using the interrupt remapper in the hypervisor IOMMU, contiguous |
| 614 | + * CPU vectors is not needed for multi-MSI |
| 615 | + */ |
| 616 | + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) |
| 617 | + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; |
| 618 | + |
| 619 | + return ret; |
610 | 620 | } |
611 | 621 |
|
612 | | -static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
613 | | - int nvec, msi_alloc_info_t *info) |
| 622 | +#elif defined(CONFIG_ARM64) |
| 623 | +/* |
| 624 | + * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit |
| 625 | + * of room at the start to allow for SPIs to be specified through ACPI and |
| 626 | + * starting with a power of two to satisfy power of 2 multi-MSI requirement. |
| 627 | + */ |
| 628 | +#define HV_PCI_MSI_SPI_START 64 |
| 629 | +#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) |
| 630 | +#define DELIVERY_MODE 0 |
| 631 | +#define FLOW_HANDLER NULL |
| 632 | +#define FLOW_NAME NULL |
| 633 | +#define hv_msi_prepare NULL |
| 634 | + |
| 635 | +struct hv_pci_chip_data { |
| 636 | + DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR); |
| 637 | + struct mutex map_lock; |
| 638 | +}; |
| 639 | + |
| 640 | +/* Hyper-V vPCI MSI GIC IRQ domain */ |
| 641 | +static struct irq_domain *hv_msi_gic_irq_domain; |
| 642 | + |
| 643 | +/* Hyper-V PCI MSI IRQ chip */ |
| 644 | +static struct irq_chip hv_arm64_msi_irq_chip = { |
| 645 | + .name = "MSI", |
| 646 | + .irq_set_affinity = irq_chip_set_affinity_parent, |
| 647 | + .irq_eoi = irq_chip_eoi_parent, |
| 648 | + .irq_mask = irq_chip_mask_parent, |
| 649 | + .irq_unmask = irq_chip_unmask_parent |
| 650 | +}; |
| 651 | + |
| 652 | +static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) |
| 653 | +{ |
| 654 | + return irqd->parent_data->hwirq; |
| 655 | +} |
| 656 | + |
| 657 | +/* |
| 658 | + * @nr_bm_irqs: Indicates the number of IRQs that were allocated from |
| 659 | + * the bitmap. |
| 660 | + * @nr_dom_irqs: Indicates the number of IRQs that were allocated from |
| 661 | + * the parent domain. |
| 662 | + */ |
| 663 | +static void hv_pci_vec_irq_free(struct irq_domain *domain, |
| 664 | + unsigned int virq, |
| 665 | + unsigned int nr_bm_irqs, |
| 666 | + unsigned int nr_dom_irqs) |
| 667 | +{ |
| 668 | + struct hv_pci_chip_data *chip_data = domain->host_data; |
| 669 | + struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
| 670 | + int first = d->hwirq - HV_PCI_MSI_SPI_START; |
| 671 | + int i; |
| 672 | + |
| 673 | + mutex_lock(&chip_data->map_lock); |
| 674 | + bitmap_release_region(chip_data->spi_map, |
| 675 | + first, |
| 676 | + get_count_order(nr_bm_irqs)); |
| 677 | + mutex_unlock(&chip_data->map_lock); |
| 678 | + for (i = 0; i < nr_dom_irqs; i++) { |
| 679 | + if (i) |
| 680 | + d = irq_domain_get_irq_data(domain, virq + i); |
| 681 | + irq_domain_reset_irq_data(d); |
| 682 | + } |
| 683 | + |
| 684 | + irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs); |
| 685 | +} |
| 686 | + |
| 687 | +static void hv_pci_vec_irq_domain_free(struct irq_domain *domain, |
| 688 | + unsigned int virq, |
| 689 | + unsigned int nr_irqs) |
| 690 | +{ |
| 691 | + hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs); |
| 692 | +} |
| 693 | + |
| 694 | +static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain, |
| 695 | + unsigned int nr_irqs, |
| 696 | + irq_hw_number_t *hwirq) |
| 697 | +{ |
| 698 | + struct hv_pci_chip_data *chip_data = domain->host_data; |
| 699 | + int index; |
| 700 | + |
| 701 | + /* Find and allocate region from the SPI bitmap */ |
| 702 | + mutex_lock(&chip_data->map_lock); |
| 703 | + index = bitmap_find_free_region(chip_data->spi_map, |
| 704 | + HV_PCI_MSI_SPI_NR, |
| 705 | + get_count_order(nr_irqs)); |
| 706 | + mutex_unlock(&chip_data->map_lock); |
| 707 | + if (index < 0) |
| 708 | + return -ENOSPC; |
| 709 | + |
| 710 | + *hwirq = index + HV_PCI_MSI_SPI_START; |
| 711 | + |
| 712 | + return 0; |
| 713 | +} |
| 714 | + |
| 715 | +static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, |
| 716 | + unsigned int virq, |
| 717 | + irq_hw_number_t hwirq) |
| 718 | +{ |
| 719 | + struct irq_fwspec fwspec; |
| 720 | + struct irq_data *d; |
| 721 | + int ret; |
| 722 | + |
| 723 | + fwspec.fwnode = domain->parent->fwnode; |
| 724 | + fwspec.param_count = 2; |
| 725 | + fwspec.param[0] = hwirq; |
| 726 | + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; |
| 727 | + |
| 728 | + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
| 729 | + if (ret) |
| 730 | + return ret; |
| 731 | + |
| 732 | + /* |
| 733 | + * Since the interrupt specifier is not coming from ACPI or DT, the |
| 734 | + * trigger type will need to be set explicitly. Otherwise, it will be |
| 735 | + * set to whatever is in the GIC configuration. |
| 736 | + */ |
| 737 | + d = irq_domain_get_irq_data(domain->parent, virq); |
| 738 | + |
| 739 | + return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); |
| 740 | +} |
| 741 | + |
| 742 | +static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain, |
| 743 | + unsigned int virq, unsigned int nr_irqs, |
| 744 | + void *args) |
| 745 | +{ |
| 746 | + irq_hw_number_t hwirq; |
| 747 | + unsigned int i; |
| 748 | + int ret; |
| 749 | + |
| 750 | + ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); |
| 751 | + if (ret) |
| 752 | + return ret; |
| 753 | + |
| 754 | + for (i = 0; i < nr_irqs; i++) { |
| 755 | + ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i, |
| 756 | + hwirq + i); |
| 757 | + if (ret) { |
| 758 | + hv_pci_vec_irq_free(domain, virq, nr_irqs, i); |
| 759 | + return ret; |
| 760 | + } |
| 761 | + |
| 762 | + irq_domain_set_hwirq_and_chip(domain, virq + i, |
| 763 | + hwirq + i, |
| 764 | + &hv_arm64_msi_irq_chip, |
| 765 | + domain->host_data); |
| 766 | + pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i); |
| 767 | + } |
| 768 | + |
| 769 | + return 0; |
| 770 | +} |
| 771 | + |
| 772 | +/* |
| 773 | + * Pick the first cpu as the irq affinity that can be temporarily used for |
| 774 | + * composing MSI from the hypervisor. GIC will eventually set the right |
| 775 | + * affinity for the irq and the 'unmask' will retarget the interrupt to that |
| 776 | + * cpu. |
| 777 | + */ |
| 778 | +static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain, |
| 779 | + struct irq_data *irqd, bool reserve) |
614 | 780 | { |
615 | | - return pci_msi_prepare(domain, dev, nvec, info); |
| 781 | + int cpu = cpumask_first(cpu_present_mask); |
| 782 | + |
| 783 | + irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); |
| 784 | + |
| 785 | + return 0; |
616 | 786 | } |
617 | | -#endif /* CONFIG_X86 */ |
| 787 | + |
| 788 | +static const struct irq_domain_ops hv_pci_domain_ops = { |
| 789 | + .alloc = hv_pci_vec_irq_domain_alloc, |
| 790 | + .free = hv_pci_vec_irq_domain_free, |
| 791 | + .activate = hv_pci_vec_irq_domain_activate, |
| 792 | +}; |
| 793 | + |
| 794 | +static int hv_pci_irqchip_init(void) |
| 795 | +{ |
| 796 | + static struct hv_pci_chip_data *chip_data; |
| 797 | + struct fwnode_handle *fn = NULL; |
| 798 | + int ret = -ENOMEM; |
| 799 | + |
| 800 | + chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); |
| 801 | + if (!chip_data) |
| 802 | + return ret; |
| 803 | + |
| 804 | + mutex_init(&chip_data->map_lock); |
| 805 | + fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); |
| 806 | + if (!fn) |
| 807 | + goto free_chip; |
| 808 | + |
| 809 | + /* |
| 810 | + * IRQ domain once enabled, should not be removed since there is no |
| 811 | + * way to ensure that all the corresponding devices are also gone and |
| 812 | + * no interrupts will be generated. |
| 813 | + */ |
| 814 | + hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, |
| 815 | + fn, &hv_pci_domain_ops, |
| 816 | + chip_data); |
| 817 | + |
| 818 | + if (!hv_msi_gic_irq_domain) { |
| 819 | + pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); |
| 820 | + goto free_chip; |
| 821 | + } |
| 822 | + |
| 823 | + return 0; |
| 824 | + |
| 825 | +free_chip: |
| 826 | + kfree(chip_data); |
| 827 | + if (fn) |
| 828 | + irq_domain_free_fwnode(fn); |
| 829 | + |
| 830 | + return ret; |
| 831 | +} |
| 832 | + |
| 833 | +static struct irq_domain *hv_pci_get_root_domain(void) |
| 834 | +{ |
| 835 | + return hv_msi_gic_irq_domain; |
| 836 | +} |
| 837 | +#endif /* CONFIG_ARM64 */ |
618 | 838 |
|
619 | 839 | /** |
620 | 840 | * hv_pci_generic_compl() - Invoked for a completion packet |
@@ -1231,28 +1451,8 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, |
1231 | 1451 | static void hv_irq_mask(struct irq_data *data) |
1232 | 1452 | { |
1233 | 1453 | pci_msi_mask_irq(data); |
1234 | | -} |
1235 | | - |
1236 | | -static unsigned int hv_msi_get_int_vector(struct irq_data *data) |
1237 | | -{ |
1238 | | - struct irq_cfg *cfg = irqd_cfg(data); |
1239 | | - |
1240 | | - return cfg->vector; |
1241 | | -} |
1242 | | - |
1243 | | -static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
1244 | | - int nvec, msi_alloc_info_t *info) |
1245 | | -{ |
1246 | | - int ret = pci_msi_prepare(domain, dev, nvec, info); |
1247 | | - |
1248 | | - /* |
1249 | | - * By using the interrupt remapper in the hypervisor IOMMU, contiguous |
1250 | | - * CPU vectors is not needed for multi-MSI |
1251 | | - */ |
1252 | | - if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) |
1253 | | - info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; |
1254 | | - |
1255 | | - return ret; |
| 1454 | + if (data->parent_data->chip->irq_mask) |
| 1455 | + irq_chip_mask_parent(data); |
1256 | 1456 | } |
1257 | 1457 |
|
1258 | 1458 | /** |
@@ -1372,6 +1572,8 @@ static void hv_irq_unmask(struct irq_data *data) |
1372 | 1572 | dev_err(&hbus->hdev->device, |
1373 | 1573 | "%s() failed: %#llx", __func__, res); |
1374 | 1574 |
|
| 1575 | + if (data->parent_data->chip->irq_unmask) |
| 1576 | + irq_chip_unmask_parent(data); |
1375 | 1577 | pci_msi_unmask_irq(data); |
1376 | 1578 | } |
1377 | 1579 |
|
@@ -1685,7 +1887,11 @@ static struct irq_chip hv_msi_irq_chip = { |
1685 | 1887 | .name = "Hyper-V PCIe MSI", |
1686 | 1888 | .irq_compose_msi_msg = hv_compose_msi_msg, |
1687 | 1889 | .irq_set_affinity = irq_chip_set_affinity_parent, |
| 1890 | +#ifdef CONFIG_X86 |
1688 | 1891 | .irq_ack = irq_chip_ack_parent, |
| 1892 | +#elif defined(CONFIG_ARM64) |
| 1893 | + .irq_eoi = irq_chip_eoi_parent, |
| 1894 | +#endif |
1689 | 1895 | .irq_mask = hv_irq_mask, |
1690 | 1896 | .irq_unmask = hv_irq_unmask, |
1691 | 1897 | }; |
|
0 commit comments