|
47 | 47 | #include <linux/msi.h>
|
48 | 48 | #include <linux/hyperv.h>
|
49 | 49 | #include <linux/refcount.h>
|
| 50 | +#include <linux/irqdomain.h> |
| 51 | +#include <linux/acpi.h> |
50 | 52 | #include <asm/mshyperv.h>
|
51 | 53 |
|
52 | 54 | /*
|
@@ -614,7 +616,230 @@ static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
|
614 | 616 | {
|
615 | 617 | return pci_msi_prepare(domain, dev, nvec, info);
|
616 | 618 | }
|
617 |
| -#endif /* CONFIG_X86 */ |
| 619 | +#elif defined(CONFIG_ARM64) |
| 620 | +/* |
| 621 | + * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit |
| 622 | + * of room at the start to allow for SPIs to be specified through ACPI and |
| 623 | + * starting with a power of two to satisfy power of 2 multi-MSI requirement. |
| 624 | + */ |
| 625 | +#define HV_PCI_MSI_SPI_START 64 |
| 626 | +#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) |
| 627 | +#define DELIVERY_MODE 0 |
| 628 | +#define FLOW_HANDLER NULL |
| 629 | +#define FLOW_NAME NULL |
| 630 | +#define hv_msi_prepare NULL |
| 631 | + |
| 632 | +struct hv_pci_chip_data { |
| 633 | + DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR); |
| 634 | + struct mutex map_lock; |
| 635 | +}; |
| 636 | + |
| 637 | +/* Hyper-V vPCI MSI GIC IRQ domain */ |
| 638 | +static struct irq_domain *hv_msi_gic_irq_domain; |
| 639 | + |
| 640 | +/* Hyper-V PCI MSI IRQ chip */ |
| 641 | +static struct irq_chip hv_arm64_msi_irq_chip = { |
| 642 | + .name = "MSI", |
| 643 | + .irq_set_affinity = irq_chip_set_affinity_parent, |
| 644 | + .irq_eoi = irq_chip_eoi_parent, |
| 645 | + .irq_mask = irq_chip_mask_parent, |
| 646 | + .irq_unmask = irq_chip_unmask_parent |
| 647 | +}; |
| 648 | + |
| 649 | +static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) |
| 650 | +{ |
| 651 | + return irqd->parent_data->hwirq; |
| 652 | +} |
| 653 | + |
| 654 | +static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, |
| 655 | + struct msi_desc *msi_desc) |
| 656 | +{ |
| 657 | + msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) | |
| 658 | + msi_desc->msg.address_lo; |
| 659 | + msi_entry->data = msi_desc->msg.data; |
| 660 | +} |
| 661 | + |
| 662 | +/* |
| 663 | + * @nr_bm_irqs: Indicates the number of IRQs that were allocated from |
| 664 | + * the bitmap. |
| 665 | + * @nr_dom_irqs: Indicates the number of IRQs that were allocated from |
| 666 | + * the parent domain. |
| 667 | + */ |
| 668 | +static void hv_pci_vec_irq_free(struct irq_domain *domain, |
| 669 | + unsigned int virq, |
| 670 | + unsigned int nr_bm_irqs, |
| 671 | + unsigned int nr_dom_irqs) |
| 672 | +{ |
| 673 | + struct hv_pci_chip_data *chip_data = domain->host_data; |
| 674 | + struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
| 675 | + int first = d->hwirq - HV_PCI_MSI_SPI_START; |
| 676 | + int i; |
| 677 | + |
| 678 | + mutex_lock(&chip_data->map_lock); |
| 679 | + bitmap_release_region(chip_data->spi_map, |
| 680 | + first, |
| 681 | + get_count_order(nr_bm_irqs)); |
| 682 | + mutex_unlock(&chip_data->map_lock); |
| 683 | + for (i = 0; i < nr_dom_irqs; i++) { |
| 684 | + if (i) |
| 685 | + d = irq_domain_get_irq_data(domain, virq + i); |
| 686 | + irq_domain_reset_irq_data(d); |
| 687 | + } |
| 688 | + |
| 689 | + irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs); |
| 690 | +} |
| 691 | + |
| 692 | +static void hv_pci_vec_irq_domain_free(struct irq_domain *domain, |
| 693 | + unsigned int virq, |
| 694 | + unsigned int nr_irqs) |
| 695 | +{ |
| 696 | + hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs); |
| 697 | +} |
| 698 | + |
| 699 | +static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain, |
| 700 | + unsigned int nr_irqs, |
| 701 | + irq_hw_number_t *hwirq) |
| 702 | +{ |
| 703 | + struct hv_pci_chip_data *chip_data = domain->host_data; |
| 704 | + int index; |
| 705 | + |
| 706 | + /* Find and allocate region from the SPI bitmap */ |
| 707 | + mutex_lock(&chip_data->map_lock); |
| 708 | + index = bitmap_find_free_region(chip_data->spi_map, |
| 709 | + HV_PCI_MSI_SPI_NR, |
| 710 | + get_count_order(nr_irqs)); |
| 711 | + mutex_unlock(&chip_data->map_lock); |
| 712 | + if (index < 0) |
| 713 | + return -ENOSPC; |
| 714 | + |
| 715 | + *hwirq = index + HV_PCI_MSI_SPI_START; |
| 716 | + |
| 717 | + return 0; |
| 718 | +} |
| 719 | + |
| 720 | +static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, |
| 721 | + unsigned int virq, |
| 722 | + irq_hw_number_t hwirq) |
| 723 | +{ |
| 724 | + struct irq_fwspec fwspec; |
| 725 | + struct irq_data *d; |
| 726 | + int ret; |
| 727 | + |
| 728 | + fwspec.fwnode = domain->parent->fwnode; |
| 729 | + fwspec.param_count = 2; |
| 730 | + fwspec.param[0] = hwirq; |
| 731 | + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; |
| 732 | + |
| 733 | + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
| 734 | + if (ret) |
| 735 | + return ret; |
| 736 | + |
| 737 | + /* |
| 738 | + * Since the interrupt specifier is not coming from ACPI or DT, the |
| 739 | + * trigger type will need to be set explicitly. Otherwise, it will be |
| 740 | + * set to whatever is in the GIC configuration. |
| 741 | + */ |
| 742 | + d = irq_domain_get_irq_data(domain->parent, virq); |
| 743 | + |
| 744 | + return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); |
| 745 | +} |
| 746 | + |
| 747 | +static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain, |
| 748 | + unsigned int virq, unsigned int nr_irqs, |
| 749 | + void *args) |
| 750 | +{ |
| 751 | + irq_hw_number_t hwirq; |
| 752 | + unsigned int i; |
| 753 | + int ret; |
| 754 | + |
| 755 | + ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); |
| 756 | + if (ret) |
| 757 | + return ret; |
| 758 | + |
| 759 | + for (i = 0; i < nr_irqs; i++) { |
| 760 | + ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i, |
| 761 | + hwirq + i); |
| 762 | + if (ret) { |
| 763 | + hv_pci_vec_irq_free(domain, virq, nr_irqs, i); |
| 764 | + return ret; |
| 765 | + } |
| 766 | + |
| 767 | + irq_domain_set_hwirq_and_chip(domain, virq + i, |
| 768 | + hwirq + i, |
| 769 | + &hv_arm64_msi_irq_chip, |
| 770 | + domain->host_data); |
| 771 | + pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i); |
| 772 | + } |
| 773 | + |
| 774 | + return 0; |
| 775 | +} |
| 776 | + |
| 777 | +/* |
| 778 | + * Pick the first cpu as the irq affinity that can be temporarily used for |
| 779 | + * composing MSI from the hypervisor. GIC will eventually set the right |
| 780 | + * affinity for the irq and the 'unmask' will retarget the interrupt to that |
| 781 | + * cpu. |
| 782 | + */ |
| 783 | +static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain, |
| 784 | + struct irq_data *irqd, bool reserve) |
| 785 | +{ |
| 786 | + int cpu = cpumask_first(cpu_present_mask); |
| 787 | + |
| 788 | + irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); |
| 789 | + |
| 790 | + return 0; |
| 791 | +} |
| 792 | + |
| 793 | +static const struct irq_domain_ops hv_pci_domain_ops = { |
| 794 | + .alloc = hv_pci_vec_irq_domain_alloc, |
| 795 | + .free = hv_pci_vec_irq_domain_free, |
| 796 | + .activate = hv_pci_vec_irq_domain_activate, |
| 797 | +}; |
| 798 | + |
| 799 | +static int hv_pci_irqchip_init(void) |
| 800 | +{ |
| 801 | + static struct hv_pci_chip_data *chip_data; |
| 802 | + struct fwnode_handle *fn = NULL; |
| 803 | + int ret = -ENOMEM; |
| 804 | + |
| 805 | + chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); |
| 806 | + if (!chip_data) |
| 807 | + return ret; |
| 808 | + |
| 809 | + mutex_init(&chip_data->map_lock); |
| 810 | + fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); |
| 811 | + if (!fn) |
| 812 | + goto free_chip; |
| 813 | + |
| 814 | + /* |
| 815 | + * IRQ domain once enabled, should not be removed since there is no |
| 816 | + * way to ensure that all the corresponding devices are also gone and |
| 817 | + * no interrupts will be generated. |
| 818 | + */ |
| 819 | + hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, |
| 820 | + fn, &hv_pci_domain_ops, |
| 821 | + chip_data); |
| 822 | + |
| 823 | + if (!hv_msi_gic_irq_domain) { |
| 824 | + pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); |
| 825 | + goto free_chip; |
| 826 | + } |
| 827 | + |
| 828 | + return 0; |
| 829 | + |
| 830 | +free_chip: |
| 831 | + kfree(chip_data); |
| 832 | + if (fn) |
| 833 | + irq_domain_free_fwnode(fn); |
| 834 | + |
| 835 | + return ret; |
| 836 | +} |
| 837 | + |
| 838 | +static struct irq_domain *hv_pci_get_root_domain(void) |
| 839 | +{ |
| 840 | + return hv_msi_gic_irq_domain; |
| 841 | +} |
| 842 | +#endif /* CONFIG_ARM64 */ |
618 | 843 |
|
619 | 844 | /**
|
620 | 845 | * hv_pci_generic_compl() - Invoked for a completion packet
|
@@ -1227,6 +1452,8 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
|
1227 | 1452 | static void hv_irq_mask(struct irq_data *data)
|
1228 | 1453 | {
|
1229 | 1454 | pci_msi_mask_irq(data);
|
| 1455 | + if (data->parent_data->chip->irq_mask) |
| 1456 | + irq_chip_mask_parent(data); |
1230 | 1457 | }
|
1231 | 1458 |
|
1232 | 1459 | /**
|
@@ -1343,6 +1570,8 @@ static void hv_irq_unmask(struct irq_data *data)
|
1343 | 1570 | dev_err(&hbus->hdev->device,
|
1344 | 1571 | "%s() failed: %#llx", __func__, res);
|
1345 | 1572 |
|
| 1573 | + if (data->parent_data->chip->irq_unmask) |
| 1574 | + irq_chip_unmask_parent(data); |
1346 | 1575 | pci_msi_unmask_irq(data);
|
1347 | 1576 | }
|
1348 | 1577 |
|
@@ -1618,7 +1847,11 @@ static struct irq_chip hv_msi_irq_chip = {
|
1618 | 1847 | .name = "Hyper-V PCIe MSI",
|
1619 | 1848 | .irq_compose_msi_msg = hv_compose_msi_msg,
|
1620 | 1849 | .irq_set_affinity = irq_chip_set_affinity_parent,
|
| 1850 | +#ifdef CONFIG_X86 |
1621 | 1851 | .irq_ack = irq_chip_ack_parent,
|
| 1852 | +#elif defined(CONFIG_ARM64) |
| 1853 | + .irq_eoi = irq_chip_eoi_parent, |
| 1854 | +#endif |
1622 | 1855 | .irq_mask = hv_irq_mask,
|
1623 | 1856 | .irq_unmask = hv_irq_unmask,
|
1624 | 1857 | };
|
|
0 commit comments