@@ -204,79 +204,84 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
204
204
}
205
205
206
206
/*
207
- * See Cycle bit rules. SW is the consumer for the event ring only.
208
- *
209
- * If we've just enqueued a TRB that is in the middle of a TD (meaning the
210
- * chain bit is set), then set the chain bit in all the following link TRBs.
211
- * If we've enqueued the last TRB in a TD, make sure the following link TRBs
212
- * have their chain bit cleared (so that each Link TRB is a separate TD).
213
- *
214
- * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
215
- * set, but other sections talk about dealing with the chain bit set. This was
216
- * fixed in the 0.96 specification errata, but we have to assume that all 0.95
217
- * xHCI hardware can't handle the chain bit being cleared on a link TRB.
218
- *
219
- * @more_trbs_coming: Will you enqueue more TRBs before calling
220
- * prepare_transfer()?
207
+ * If enqueue points at a link TRB, follow links until an ordinary TRB is reached.
208
+ * Toggle the cycle bit of passed link TRBs and optionally chain them.
221
209
*/
222
- static void inc_enq (struct xhci_hcd * xhci , struct xhci_ring * ring ,
223
- bool more_trbs_coming )
210
+ static void inc_enq_past_link (struct xhci_hcd * xhci , struct xhci_ring * ring , u32 chain )
224
211
{
225
- u32 chain ;
226
- union xhci_trb * next ;
227
212
unsigned int link_trb_count = 0 ;
228
213
229
- chain = le32_to_cpu (ring -> enqueue -> generic .field [3 ]) & TRB_CHAIN ;
230
-
231
- if (last_trb_on_seg (ring -> enq_seg , ring -> enqueue )) {
232
- xhci_err (xhci , "Tried to move enqueue past ring segment\n" );
233
- return ;
234
- }
235
-
236
- next = ++ (ring -> enqueue );
237
-
238
- /* Update the dequeue pointer further if that was a link TRB */
239
- while (trb_is_link (next )) {
214
+ while (trb_is_link (ring -> enqueue )) {
240
215
241
216
/*
242
- * If the caller doesn't plan on enqueueing more TDs before
243
- * ringing the doorbell, then we don't want to give the link TRB
244
- * to the hardware just yet. We'll give the link TRB back in
245
- * prepare_ring() just before we enqueue the TD at the top of
246
- * the ring.
247
- */
248
- if (!chain && !more_trbs_coming )
249
- break ;
250
-
251
- /* If we're not dealing with 0.95 hardware or isoc rings on
252
- * AMD 0.96 host, carry over the chain bit of the previous TRB
253
- * (which may mean the chain bit is cleared).
217
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
218
+ * set, but other sections talk about dealing with the chain bit set. This was
219
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
220
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
221
+ *
222
+ * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization
223
+ * and never changed here. On all others, modify it as requested by the caller.
254
224
*/
255
225
if (!xhci_link_chain_quirk (xhci , ring -> type )) {
256
- next -> link .control &= cpu_to_le32 (~TRB_CHAIN );
257
- next -> link .control |= cpu_to_le32 (chain );
226
+ ring -> enqueue -> link .control &= cpu_to_le32 (~TRB_CHAIN );
227
+ ring -> enqueue -> link .control |= cpu_to_le32 (chain );
258
228
}
229
+
259
230
/* Give this link TRB to the hardware */
260
231
wmb ();
261
- next -> link .control ^= cpu_to_le32 (TRB_CYCLE );
232
+ ring -> enqueue -> link .control ^= cpu_to_le32 (TRB_CYCLE );
262
233
263
234
/* Toggle the cycle bit after the last ring segment. */
264
- if (link_trb_toggles_cycle (next ))
235
+ if (link_trb_toggles_cycle (ring -> enqueue ))
265
236
ring -> cycle_state ^= 1 ;
266
237
267
238
ring -> enq_seg = ring -> enq_seg -> next ;
268
239
ring -> enqueue = ring -> enq_seg -> trbs ;
269
- next = ring -> enqueue ;
270
240
271
241
trace_xhci_inc_enq (ring );
272
242
273
243
if (link_trb_count ++ > ring -> num_segs ) {
274
- xhci_warn (xhci , "%s: Ring link TRB loop\n" , __func__ );
244
+ xhci_warn (xhci , "Link TRB loop at enqueue \n" );
275
245
break ;
276
246
}
277
247
}
278
248
}
279
249
250
+ /*
251
+ * See Cycle bit rules. SW is the consumer for the event ring only.
252
+ *
253
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
254
+ * chain bit is set), then set the chain bit in all the following link TRBs.
255
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
256
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
257
+ *
258
+ * @more_trbs_coming: Will you enqueue more TRBs before calling
259
+ * prepare_transfer()?
260
+ */
261
+ static void inc_enq (struct xhci_hcd * xhci , struct xhci_ring * ring ,
262
+ bool more_trbs_coming )
263
+ {
264
+ u32 chain ;
265
+
266
+ chain = le32_to_cpu (ring -> enqueue -> generic .field [3 ]) & TRB_CHAIN ;
267
+
268
+ if (last_trb_on_seg (ring -> enq_seg , ring -> enqueue )) {
269
+ xhci_err (xhci , "Tried to move enqueue past ring segment\n" );
270
+ return ;
271
+ }
272
+
273
+ ring -> enqueue ++ ;
274
+
275
+ /*
276
+ * If we are in the middle of a TD or the caller plans to enqueue more
277
+ * TDs as one transfer (eg. control), traverse any link TRBs right now.
278
+ * Otherwise, enqueue can stay on a link until the next prepare_ring().
279
+ * This avoids enqueue entering deq_seg and simplifies ring expansion.
280
+ */
281
+ if (trb_is_link (ring -> enqueue ) && (chain || more_trbs_coming ))
282
+ inc_enq_past_link (xhci , ring , chain );
283
+ }
284
+
280
285
/*
281
286
* If the suspect DMA address is a TRB in this TD, this function returns that
282
287
* TRB's segment. Otherwise it returns 0.
@@ -3213,7 +3218,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3213
3218
static int prepare_ring (struct xhci_hcd * xhci , struct xhci_ring * ep_ring ,
3214
3219
u32 ep_state , unsigned int num_trbs , gfp_t mem_flags )
3215
3220
{
3216
- unsigned int link_trb_count = 0 ;
3217
3221
unsigned int new_segs = 0 ;
3218
3222
3219
3223
/* Make sure the endpoint has been added to xHC schedule */
@@ -3261,33 +3265,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3261
3265
}
3262
3266
}
3263
3267
3264
- while (trb_is_link (ep_ring -> enqueue )) {
3265
- /* If we're not dealing with 0.95 hardware or isoc rings
3266
- * on AMD 0.96 host, clear the chain bit.
3267
- */
3268
- if (!xhci_link_chain_quirk (xhci , ep_ring -> type ))
3269
- ep_ring -> enqueue -> link .control &=
3270
- cpu_to_le32 (~TRB_CHAIN );
3271
- else
3272
- ep_ring -> enqueue -> link .control |=
3273
- cpu_to_le32 (TRB_CHAIN );
3274
-
3275
- wmb ();
3276
- ep_ring -> enqueue -> link .control ^= cpu_to_le32 (TRB_CYCLE );
3277
-
3278
- /* Toggle the cycle bit after the last ring segment. */
3279
- if (link_trb_toggles_cycle (ep_ring -> enqueue ))
3280
- ep_ring -> cycle_state ^= 1 ;
3281
-
3282
- ep_ring -> enq_seg = ep_ring -> enq_seg -> next ;
3283
- ep_ring -> enqueue = ep_ring -> enq_seg -> trbs ;
3284
-
3285
- /* prevent infinite loop if all first trbs are link trbs */
3286
- if (link_trb_count ++ > ep_ring -> num_segs ) {
3287
- xhci_warn (xhci , "Ring is an endless link TRB loop\n" );
3288
- return - EINVAL ;
3289
- }
3290
- }
3268
+ /* Ensure that new TRBs won't overwrite a link */
3269
+ if (trb_is_link (ep_ring -> enqueue ))
3270
+ inc_enq_past_link (xhci , ep_ring , 0 );
3291
3271
3292
3272
if (last_trb_on_seg (ep_ring -> enq_seg , ep_ring -> enqueue )) {
3293
3273
xhci_warn (xhci , "Missing link TRB at end of ring segment\n" );
0 commit comments