|
8 | 8 | #include <linux/module.h>
|
9 | 9 | #include <linux/mutex.h>
|
10 | 10 | #include <linux/iommufd.h>
|
| 11 | +#include <linux/pci.h> |
11 | 12 | #include <linux/poll.h>
|
12 | 13 | #include <linux/anon_inodes.h>
|
13 | 14 | #include <uapi/linux/iommufd.h>
|
14 | 15 |
|
15 | 16 | #include "../iommu-priv.h"
|
16 | 17 | #include "iommufd_private.h"
|
17 | 18 |
|
| 19 | +static int iommufd_fault_iopf_enable(struct iommufd_device *idev) |
| 20 | +{ |
| 21 | + struct device *dev = idev->dev; |
| 22 | + int ret; |
| 23 | + |
| 24 | + /* |
| 25 | + * Once we turn on PCI/PRI support for VF, the response failure code |
| 26 | + * should not be forwarded to the hardware due to PRI being a shared |
| 27 | + * resource between PF and VFs. There is no coordination for this |
| 28 | + * shared capability. This waits for a vPRI reset to recover. |
| 29 | + */ |
| 30 | + if (dev_is_pci(dev) && to_pci_dev(dev)->is_virtfn) |
| 31 | + return -EINVAL; |
| 32 | + |
| 33 | + mutex_lock(&idev->iopf_lock); |
| 34 | + /* Device iopf has already been on. */ |
| 35 | + if (++idev->iopf_enabled > 1) { |
| 36 | + mutex_unlock(&idev->iopf_lock); |
| 37 | + return 0; |
| 38 | + } |
| 39 | + |
| 40 | + ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF); |
| 41 | + if (ret) |
| 42 | + --idev->iopf_enabled; |
| 43 | + mutex_unlock(&idev->iopf_lock); |
| 44 | + |
| 45 | + return ret; |
| 46 | +} |
| 47 | + |
| 48 | +static void iommufd_fault_iopf_disable(struct iommufd_device *idev) |
| 49 | +{ |
| 50 | + mutex_lock(&idev->iopf_lock); |
| 51 | + if (!WARN_ON(idev->iopf_enabled == 0)) { |
| 52 | + if (--idev->iopf_enabled == 0) |
| 53 | + iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF); |
| 54 | + } |
| 55 | + mutex_unlock(&idev->iopf_lock); |
| 56 | +} |
| 57 | + |
| 58 | +static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, |
| 59 | + struct iommufd_device *idev) |
| 60 | +{ |
| 61 | + struct iommufd_attach_handle *handle; |
| 62 | + int ret; |
| 63 | + |
| 64 | + handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
| 65 | + if (!handle) |
| 66 | + return -ENOMEM; |
| 67 | + |
| 68 | + handle->idev = idev; |
| 69 | + ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group, |
| 70 | + &handle->handle); |
| 71 | + if (ret) |
| 72 | + kfree(handle); |
| 73 | + |
| 74 | + return ret; |
| 75 | +} |
| 76 | + |
| 77 | +int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, |
| 78 | + struct iommufd_device *idev) |
| 79 | +{ |
| 80 | + int ret; |
| 81 | + |
| 82 | + if (!hwpt->fault) |
| 83 | + return -EINVAL; |
| 84 | + |
| 85 | + ret = iommufd_fault_iopf_enable(idev); |
| 86 | + if (ret) |
| 87 | + return ret; |
| 88 | + |
| 89 | + ret = __fault_domain_attach_dev(hwpt, idev); |
| 90 | + if (ret) |
| 91 | + iommufd_fault_iopf_disable(idev); |
| 92 | + |
| 93 | + return ret; |
| 94 | +} |
| 95 | + |
| 96 | +static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, |
| 97 | + struct iommufd_attach_handle *handle) |
| 98 | +{ |
| 99 | + struct iommufd_fault *fault = hwpt->fault; |
| 100 | + struct iopf_group *group, *next; |
| 101 | + unsigned long index; |
| 102 | + |
| 103 | + if (!fault) |
| 104 | + return; |
| 105 | + |
| 106 | + mutex_lock(&fault->mutex); |
| 107 | + list_for_each_entry_safe(group, next, &fault->deliver, node) { |
| 108 | + if (group->attach_handle != &handle->handle) |
| 109 | + continue; |
| 110 | + list_del(&group->node); |
| 111 | + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); |
| 112 | + iopf_free_group(group); |
| 113 | + } |
| 114 | + |
| 115 | + xa_for_each(&fault->response, index, group) { |
| 116 | + if (group->attach_handle != &handle->handle) |
| 117 | + continue; |
| 118 | + xa_erase(&fault->response, index); |
| 119 | + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); |
| 120 | + iopf_free_group(group); |
| 121 | + } |
| 122 | + mutex_unlock(&fault->mutex); |
| 123 | +} |
| 124 | + |
| 125 | +static struct iommufd_attach_handle * |
| 126 | +iommufd_device_get_attach_handle(struct iommufd_device *idev) |
| 127 | +{ |
| 128 | + struct iommu_attach_handle *handle; |
| 129 | + |
| 130 | + handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0); |
| 131 | + if (!handle) |
| 132 | + return NULL; |
| 133 | + |
| 134 | + return to_iommufd_handle(handle); |
| 135 | +} |
| 136 | + |
| 137 | +void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt, |
| 138 | + struct iommufd_device *idev) |
| 139 | +{ |
| 140 | + struct iommufd_attach_handle *handle; |
| 141 | + |
| 142 | + handle = iommufd_device_get_attach_handle(idev); |
| 143 | + iommu_detach_group_handle(hwpt->domain, idev->igroup->group); |
| 144 | + iommufd_auto_response_faults(hwpt, handle); |
| 145 | + iommufd_fault_iopf_disable(idev); |
| 146 | + kfree(handle); |
| 147 | +} |
| 148 | + |
| 149 | +static int __fault_domain_replace_dev(struct iommufd_device *idev, |
| 150 | + struct iommufd_hw_pagetable *hwpt, |
| 151 | + struct iommufd_hw_pagetable *old) |
| 152 | +{ |
| 153 | + struct iommufd_attach_handle *handle, *curr = NULL; |
| 154 | + int ret; |
| 155 | + |
| 156 | + if (old->fault) |
| 157 | + curr = iommufd_device_get_attach_handle(idev); |
| 158 | + |
| 159 | + if (hwpt->fault) { |
| 160 | + handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
| 161 | + if (!handle) |
| 162 | + return -ENOMEM; |
| 163 | + |
| 164 | + handle->handle.domain = hwpt->domain; |
| 165 | + handle->idev = idev; |
| 166 | + ret = iommu_replace_group_handle(idev->igroup->group, |
| 167 | + hwpt->domain, &handle->handle); |
| 168 | + } else { |
| 169 | + ret = iommu_replace_group_handle(idev->igroup->group, |
| 170 | + hwpt->domain, NULL); |
| 171 | + } |
| 172 | + |
| 173 | + if (!ret && curr) { |
| 174 | + iommufd_auto_response_faults(old, curr); |
| 175 | + kfree(curr); |
| 176 | + } |
| 177 | + |
| 178 | + return ret; |
| 179 | +} |
| 180 | + |
| 181 | +int iommufd_fault_domain_replace_dev(struct iommufd_device *idev, |
| 182 | + struct iommufd_hw_pagetable *hwpt, |
| 183 | + struct iommufd_hw_pagetable *old) |
| 184 | +{ |
| 185 | + bool iopf_off = !hwpt->fault && old->fault; |
| 186 | + bool iopf_on = hwpt->fault && !old->fault; |
| 187 | + int ret; |
| 188 | + |
| 189 | + if (iopf_on) { |
| 190 | + ret = iommufd_fault_iopf_enable(idev); |
| 191 | + if (ret) |
| 192 | + return ret; |
| 193 | + } |
| 194 | + |
| 195 | + ret = __fault_domain_replace_dev(idev, hwpt, old); |
| 196 | + if (ret) { |
| 197 | + if (iopf_on) |
| 198 | + iommufd_fault_iopf_disable(idev); |
| 199 | + return ret; |
| 200 | + } |
| 201 | + |
| 202 | + if (iopf_off) |
| 203 | + iommufd_fault_iopf_disable(idev); |
| 204 | + |
| 205 | + return 0; |
| 206 | +} |
| 207 | + |
18 | 208 | void iommufd_fault_destroy(struct iommufd_object *obj)
|
19 | 209 | {
|
20 | 210 | struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
|
|
0 commit comments