|
16 | 16 |
|
17 | 17 | #include "dma-iommu.h"
|
18 | 18 |
|
19 |
| -static const struct iommu_ops s390_iommu_ops; |
| 19 | +static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops; |
20 | 20 |
|
21 | 21 | static struct kmem_cache *dma_region_table_cache;
|
22 | 22 | static struct kmem_cache *dma_page_table_cache;
|
@@ -432,9 +432,11 @@ static int blocking_domain_attach_device(struct iommu_domain *domain,
|
432 | 432 | return 0;
|
433 | 433 |
|
434 | 434 | s390_domain = to_s390_domain(zdev->s390_domain);
|
435 |
| - spin_lock_irqsave(&s390_domain->list_lock, flags); |
436 |
| - list_del_rcu(&zdev->iommu_list); |
437 |
| - spin_unlock_irqrestore(&s390_domain->list_lock, flags); |
| 435 | + if (zdev->dma_table) { |
| 436 | + spin_lock_irqsave(&s390_domain->list_lock, flags); |
| 437 | + list_del_rcu(&zdev->iommu_list); |
| 438 | + spin_unlock_irqrestore(&s390_domain->list_lock, flags); |
| 439 | + } |
438 | 440 |
|
439 | 441 | zpci_unregister_ioat(zdev, 0);
|
440 | 442 | zdev->dma_table = NULL;
|
@@ -762,7 +764,13 @@ int zpci_init_iommu(struct zpci_dev *zdev)
|
762 | 764 | if (rc)
|
763 | 765 | goto out_err;
|
764 | 766 |
|
765 |
| - rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL); |
| 767 | + if (zdev->rtr_avail) { |
| 768 | + rc = iommu_device_register(&zdev->iommu_dev, |
| 769 | + &s390_iommu_rtr_ops, NULL); |
| 770 | + } else { |
| 771 | + rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, |
| 772 | + NULL); |
| 773 | + } |
766 | 774 | if (rc)
|
767 | 775 | goto out_sysfs;
|
768 | 776 |
|
@@ -826,30 +834,71 @@ static int __init s390_iommu_init(void)
|
826 | 834 | }
|
827 | 835 | subsys_initcall(s390_iommu_init);
|
828 | 836 |
|
| 837 | +static int s390_attach_dev_identity(struct iommu_domain *domain, |
| 838 | + struct device *dev) |
| 839 | +{ |
| 840 | + struct zpci_dev *zdev = to_zpci_dev(dev); |
| 841 | + u8 status; |
| 842 | + int cc; |
| 843 | + |
| 844 | + blocking_domain_attach_device(&blocking_domain, dev); |
| 845 | + |
| 846 | + /* If we fail now DMA remains blocked via blocking domain */ |
| 847 | + cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); |
| 848 | + |
| 849 | + /* |
| 850 | + * If the device is undergoing error recovery the reset code |
| 851 | + * will re-establish the new domain. |
| 852 | + */ |
| 853 | + if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) |
| 854 | + return -EIO; |
| 855 | + |
| 856 | + zdev_s390_domain_update(zdev, domain); |
| 857 | + |
| 858 | + return 0; |
| 859 | +} |
| 860 | + |
| 861 | +static const struct iommu_domain_ops s390_identity_ops = { |
| 862 | + .attach_dev = s390_attach_dev_identity, |
| 863 | +}; |
| 864 | + |
| 865 | +static struct iommu_domain s390_identity_domain = { |
| 866 | + .type = IOMMU_DOMAIN_IDENTITY, |
| 867 | + .ops = &s390_identity_ops, |
| 868 | +}; |
| 869 | + |
829 | 870 | static struct iommu_domain blocking_domain = {
|
830 | 871 | .type = IOMMU_DOMAIN_BLOCKED,
|
831 | 872 | .ops = &(const struct iommu_domain_ops) {
|
832 | 873 | .attach_dev = blocking_domain_attach_device,
|
833 | 874 | }
|
834 | 875 | };
|
835 | 876 |
|
836 |
| -static const struct iommu_ops s390_iommu_ops = { |
837 |
| - .blocked_domain = &blocking_domain, |
838 |
| - .release_domain = &blocking_domain, |
839 |
| - .capable = s390_iommu_capable, |
840 |
| - .domain_alloc_paging = s390_domain_alloc_paging, |
841 |
| - .probe_device = s390_iommu_probe_device, |
842 |
| - .device_group = generic_device_group, |
843 |
| - .pgsize_bitmap = SZ_4K, |
844 |
| - .get_resv_regions = s390_iommu_get_resv_regions, |
845 |
| - .default_domain_ops = &(const struct iommu_domain_ops) { |
846 |
| - .attach_dev = s390_iommu_attach_device, |
847 |
| - .map_pages = s390_iommu_map_pages, |
848 |
| - .unmap_pages = s390_iommu_unmap_pages, |
849 |
| - .flush_iotlb_all = s390_iommu_flush_iotlb_all, |
850 |
| - .iotlb_sync = s390_iommu_iotlb_sync, |
851 |
| - .iotlb_sync_map = s390_iommu_iotlb_sync_map, |
852 |
| - .iova_to_phys = s390_iommu_iova_to_phys, |
853 |
| - .free = s390_domain_free, |
| 877 | +#define S390_IOMMU_COMMON_OPS() \ |
| 878 | + .blocked_domain = &blocking_domain, \ |
| 879 | + .release_domain = &blocking_domain, \ |
| 880 | + .capable = s390_iommu_capable, \ |
| 881 | + .domain_alloc_paging = s390_domain_alloc_paging, \ |
| 882 | + .probe_device = s390_iommu_probe_device, \ |
| 883 | + .device_group = generic_device_group, \ |
| 884 | + .pgsize_bitmap = SZ_4K, \ |
| 885 | + .get_resv_regions = s390_iommu_get_resv_regions, \ |
| 886 | + .default_domain_ops = &(const struct iommu_domain_ops) { \ |
| 887 | + .attach_dev = s390_iommu_attach_device, \ |
| 888 | + .map_pages = s390_iommu_map_pages, \ |
| 889 | + .unmap_pages = s390_iommu_unmap_pages, \ |
| 890 | + .flush_iotlb_all = s390_iommu_flush_iotlb_all, \ |
| 891 | + .iotlb_sync = s390_iommu_iotlb_sync, \ |
| 892 | + .iotlb_sync_map = s390_iommu_iotlb_sync_map, \ |
| 893 | + .iova_to_phys = s390_iommu_iova_to_phys, \ |
| 894 | + .free = s390_domain_free, \ |
854 | 895 | }
|
| 896 | + |
| 897 | +static const struct iommu_ops s390_iommu_ops = { |
| 898 | + S390_IOMMU_COMMON_OPS() |
| 899 | +}; |
| 900 | + |
| 901 | +static const struct iommu_ops s390_iommu_rtr_ops = { |
| 902 | + .identity_domain = &s390_identity_domain, |
| 903 | + S390_IOMMU_COMMON_OPS() |
855 | 904 | };
|
0 commit comments