@@ -115,6 +115,59 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
115115 return group ;
116116}
117117
118+ static struct iommu_attach_handle * find_fault_handler (struct device * dev ,
119+ struct iopf_fault * evt )
120+ {
121+ struct iommu_fault * fault = & evt -> fault ;
122+ struct iommu_attach_handle * attach_handle ;
123+
124+ if (fault -> prm .flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID ) {
125+ attach_handle = iommu_attach_handle_get (dev -> iommu_group ,
126+ fault -> prm .pasid , 0 );
127+ if (IS_ERR (attach_handle )) {
128+ const struct iommu_ops * ops = dev_iommu_ops (dev );
129+
130+ if (!ops -> user_pasid_table )
131+ return NULL ;
132+ /*
133+ * The iommu driver for this device supports user-
134+ * managed PASID table. Therefore page faults for
135+ * any PASID should go through the NESTING domain
136+ * attached to the device RID.
137+ */
138+ attach_handle = iommu_attach_handle_get (
139+ dev -> iommu_group , IOMMU_NO_PASID ,
140+ IOMMU_DOMAIN_NESTED );
141+ if (IS_ERR (attach_handle ))
142+ return NULL ;
143+ }
144+ } else {
145+ attach_handle = iommu_attach_handle_get (dev -> iommu_group ,
146+ IOMMU_NO_PASID , 0 );
147+
148+ if (IS_ERR (attach_handle ))
149+ return NULL ;
150+ }
151+
152+ if (!attach_handle -> domain -> iopf_handler )
153+ return NULL ;
154+
155+ return attach_handle ;
156+ }
157+
158+ static void iopf_error_response (struct device * dev , struct iopf_fault * evt )
159+ {
160+ const struct iommu_ops * ops = dev_iommu_ops (dev );
161+ struct iommu_fault * fault = & evt -> fault ;
162+ struct iommu_page_response resp = {
163+ .pasid = fault -> prm .pasid ,
164+ .grpid = fault -> prm .grpid ,
165+ .code = IOMMU_PAGE_RESP_INVALID
166+ };
167+
168+ ops -> page_response (dev , evt , & resp );
169+ }
170+
118171/**
119172 * iommu_report_device_fault() - Report fault event to device driver
120173 * @dev: the device
@@ -153,24 +206,39 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
153206 * handling framework should guarantee that the iommu domain could only be
154207 * freed after the device has stopped generating page faults (or the iommu
155208 * hardware has been set to block the page faults) and the pending page faults
156- * have been flushed.
209+ * have been flushed. In case no page fault handler is attached or no iopf params
210+ * are setup, then the ops->page_response() is called to complete the evt.
211+ *
212+ * Returns 0 on success, or an error in case of a bad/failed iopf setup.
157213 */
158- void iommu_report_device_fault (struct device * dev , struct iopf_fault * evt )
214+ int iommu_report_device_fault (struct device * dev , struct iopf_fault * evt )
159215{
216+ struct iommu_attach_handle * attach_handle ;
160217 struct iommu_fault * fault = & evt -> fault ;
161218 struct iommu_fault_param * iopf_param ;
162219 struct iopf_group abort_group = {};
163220 struct iopf_group * group ;
164221
222+ attach_handle = find_fault_handler (dev , evt );
223+ if (!attach_handle )
224+ goto err_bad_iopf ;
225+
226+ /*
227+ * Something has gone wrong if a fault capable domain is attached but no
228+ * iopf_param is setup
229+ */
165230 iopf_param = iopf_get_dev_fault_param (dev );
166231 if (WARN_ON (!iopf_param ))
167- return ;
232+ goto err_bad_iopf ;
168233
169234 if (!(fault -> prm .flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE )) {
170- report_partial_fault (iopf_param , fault );
235+ int ret ;
236+
237+ ret = report_partial_fault (iopf_param , fault );
171238 iopf_put_dev_fault_param (iopf_param );
172239 /* A request that is not the last does not need to be ack'd */
173- return ;
240+
241+ return ret ;
174242 }
175243
176244 /*
@@ -185,38 +253,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
185253 if (group == & abort_group )
186254 goto err_abort ;
187255
188- if (fault -> prm .flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID ) {
189- group -> attach_handle = iommu_attach_handle_get (dev -> iommu_group ,
190- fault -> prm .pasid ,
191- 0 );
192- if (IS_ERR (group -> attach_handle )) {
193- const struct iommu_ops * ops = dev_iommu_ops (dev );
194-
195- if (!ops -> user_pasid_table )
196- goto err_abort ;
197-
198- /*
199- * The iommu driver for this device supports user-
200- * managed PASID table. Therefore page faults for
201- * any PASID should go through the NESTING domain
202- * attached to the device RID.
203- */
204- group -> attach_handle =
205- iommu_attach_handle_get (dev -> iommu_group ,
206- IOMMU_NO_PASID ,
207- IOMMU_DOMAIN_NESTED );
208- if (IS_ERR (group -> attach_handle ))
209- goto err_abort ;
210- }
211- } else {
212- group -> attach_handle =
213- iommu_attach_handle_get (dev -> iommu_group , IOMMU_NO_PASID , 0 );
214- if (IS_ERR (group -> attach_handle ))
215- goto err_abort ;
216- }
217-
218- if (!group -> attach_handle -> domain -> iopf_handler )
219- goto err_abort ;
256+ group -> attach_handle = attach_handle ;
220257
221258 /*
222259 * On success iopf_handler must call iopf_group_response() and
@@ -225,7 +262,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
225262 if (group -> attach_handle -> domain -> iopf_handler (group ))
226263 goto err_abort ;
227264
228- return ;
265+ return 0 ;
229266
230267err_abort :
231268 dev_warn_ratelimited (dev , "iopf with pasid %d aborted\n" ,
@@ -235,6 +272,14 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
235272 __iopf_free_group (group );
236273 else
237274 iopf_free_group (group );
275+
276+ return 0 ;
277+
278+ err_bad_iopf :
279+ if (fault -> type == IOMMU_FAULT_PAGE_REQ )
280+ iopf_error_response (dev , evt );
281+
282+ return - EINVAL ;
238283}
239284EXPORT_SYMBOL_GPL (iommu_report_device_fault );
240285
0 commit comments