24
24
#include <zephyr/logging/log.h>
25
25
LOG_MODULE_REGISTER (uhc_vrt , CONFIG_UHC_DRIVER_LOG_LEVEL );
26
26
27
+ #define FRAME_MAX_TRANSFERS 16
28
+
27
29
struct uhc_vrt_config {
28
30
};
29
31
32
+ struct uhc_vrt_slot {
33
+ sys_dnode_t node ;
34
+ struct uhc_transfer * xfer ;
35
+ };
36
+
37
+ struct uhc_vrt_frame {
38
+ struct uhc_vrt_slot slots [FRAME_MAX_TRANSFERS ];
39
+ sys_dnode_t * ptr ;
40
+ sys_dlist_t list ;
41
+ uint8_t count ;
42
+ };
43
+
30
44
struct uhc_vrt_data {
31
45
const struct device * dev ;
32
46
struct uvb_node * host_node ;
33
47
struct k_work work ;
34
48
struct k_fifo fifo ;
35
49
struct uhc_transfer * last_xfer ;
50
+ struct uhc_vrt_frame frame ;
36
51
struct k_timer sof_timer ;
37
- bool busy ;
38
52
uint8_t req ;
39
53
};
40
54
41
55
enum uhc_vrt_event_type {
42
- /* Trigger next transfer */
43
- UHC_VRT_EVT_XFER ,
44
56
/* SoF generator event */
45
57
UHC_VRT_EVT_SOF ,
46
58
/* Request reply received */
@@ -94,7 +106,6 @@ static int vrt_xfer_control(const struct device *dev,
94
106
}
95
107
96
108
priv -> req = UVB_REQUEST_SETUP ;
97
- priv -> busy = true;
98
109
99
110
return uvb_advert_pkt (priv -> host_node , uvb_pkt );
100
111
}
@@ -118,7 +129,6 @@ static int vrt_xfer_control(const struct device *dev,
118
129
}
119
130
120
131
priv -> req = UVB_REQUEST_DATA ;
121
- priv -> busy = true;
122
132
123
133
return uvb_advert_pkt (priv -> host_node , uvb_pkt );
124
134
}
@@ -142,7 +152,6 @@ static int vrt_xfer_control(const struct device *dev,
142
152
}
143
153
144
154
priv -> req = UVB_REQUEST_DATA ;
145
- priv -> busy = true;
146
155
147
156
return uvb_advert_pkt (priv -> host_node , uvb_pkt );
148
157
}
@@ -177,25 +186,87 @@ static int vrt_xfer_bulk(const struct device *dev,
177
186
return uvb_advert_pkt (priv -> host_node , uvb_pkt );
178
187
}
179
188
180
- static int vrt_schedule_xfer (const struct device * dev )
189
+ static inline uint8_t get_xfer_ep_idx (const uint8_t ep )
181
190
{
182
- struct uhc_vrt_data * priv = uhc_get_private (dev );
191
+ /* We do not need to differentiate the direction for the control
192
+ * transfers because they are handled as a whole.
193
+ */
194
+ if (USB_EP_DIR_IS_OUT (ep ) || USB_EP_GET_IDX (ep ) == 0 ) {
195
+ return USB_EP_GET_IDX (ep & BIT_MASK (4 ));
196
+ }
197
+
198
+ return USB_EP_GET_IDX (ep & BIT_MASK (4 )) + 16U ;
199
+ }
200
+
201
+ static void vrt_assemble_frame (const struct device * dev )
202
+ {
203
+ struct uhc_vrt_data * const priv = uhc_get_private (dev );
204
+ struct uhc_vrt_frame * const frame = & priv -> frame ;
205
+ struct uhc_data * const data = dev -> data ;
206
+ struct uhc_transfer * tmp ;
207
+ unsigned int n = 0 ;
208
+ unsigned int key ;
209
+ uint32_t bm = 0 ;
210
+
211
+ sys_dlist_init (& frame -> list );
212
+ frame -> ptr = NULL ;
213
+ frame -> count = 0 ;
214
+ key = irq_lock ();
215
+
216
+ /* TODO: add periodic transfers up to 90% */
217
+ SYS_DLIST_FOR_EACH_CONTAINER (& data -> ctrl_xfers , tmp , node ) {
218
+ uint8_t idx = get_xfer_ep_idx (tmp -> ep );
219
+
220
+ /* There could be multiple transfers queued for the same
221
+ * endpoint, for now we only allow one to be scheduled per frame.
222
+ */
223
+ if (bm & BIT (idx )) {
224
+ continue ;
225
+ }
226
+
227
+ bm |= BIT (idx );
228
+ frame -> slots [n ].xfer = tmp ;
229
+ sys_dlist_append (& frame -> list , & frame -> slots [n ].node );
230
+ n ++ ;
231
+
232
+ if (n >= FRAME_MAX_TRANSFERS ) {
233
+ /* No more free slots */
234
+ break ;
235
+ }
236
+ }
237
+
238
+ irq_unlock (key );
239
+ }
240
+
241
+ static int vrt_schedule_frame (const struct device * dev )
242
+ {
243
+ struct uhc_vrt_data * const priv = uhc_get_private (dev );
244
+ struct uhc_vrt_frame * const frame = & priv -> frame ;
245
+ struct uhc_vrt_slot * slot ;
183
246
184
247
if (priv -> last_xfer == NULL ) {
185
- priv -> last_xfer = uhc_xfer_get_next (dev );
186
- if (priv -> last_xfer == NULL ) {
187
- LOG_DBG ("Nothing to transfer" );
248
+ if (frame -> count >= FRAME_MAX_TRANSFERS ) {
249
+ LOG_DBG ("Frame finished" );
188
250
return 0 ;
189
251
}
190
252
191
- LOG_DBG ("Next transfer is %p" , priv -> last_xfer );
253
+ frame -> ptr = sys_dlist_get (& frame -> list );
254
+ slot = SYS_DLIST_CONTAINER (frame -> ptr , slot , node );
255
+ if (slot == NULL ) {
256
+ LOG_DBG ("No more transfers for the frame" );
257
+ return 0 ;
258
+ }
259
+
260
+ priv -> last_xfer = slot -> xfer ;
261
+ frame -> count ++ ;
262
+ LOG_DBG ("Next transfer is %p (count %u)" ,
263
+ (void * )priv -> last_xfer , frame -> count );
192
264
}
193
265
194
266
if (USB_EP_GET_IDX (priv -> last_xfer -> ep ) == 0 ) {
195
267
return vrt_xfer_control (dev , priv -> last_xfer );
196
268
}
197
269
198
- /* TODO: Isochronous transfers */
199
270
return vrt_xfer_bulk (dev , priv -> last_xfer );
200
271
}
201
272
@@ -276,6 +347,7 @@ static int vrt_handle_reply(const struct device *dev,
276
347
struct uvb_packet * const pkt )
277
348
{
278
349
struct uhc_vrt_data * priv = uhc_get_private (dev );
350
+ struct uhc_vrt_frame * const frame = & priv -> frame ;
279
351
struct uhc_transfer * const xfer = priv -> last_xfer ;
280
352
int ret = 0 ;
281
353
@@ -285,11 +357,12 @@ static int vrt_handle_reply(const struct device *dev,
285
357
goto handle_reply_err ;
286
358
}
287
359
288
- priv -> busy = false;
289
-
290
360
switch (pkt -> reply ) {
291
361
case UVB_REPLY_NACK :
292
- /* Restart last transaction */
362
+ /* Move the transfer back to the list. */
363
+ sys_dlist_append (& frame -> list , frame -> ptr );
364
+ priv -> last_xfer = NULL ;
365
+ LOG_DBG ("NACK 0x%02x count %u" , xfer -> ep , frame -> count );
293
366
break ;
294
367
case UVB_REPLY_STALL :
295
368
vrt_xfer_drop_active (dev , - EPIPE );
@@ -315,7 +388,6 @@ static void vrt_xfer_cleanup_cancelled(const struct device *dev)
315
388
struct uhc_transfer * tmp ;
316
389
317
390
if (priv -> last_xfer != NULL && priv -> last_xfer -> err == - ECONNRESET ) {
318
- priv -> busy = false;
319
391
vrt_xfer_drop_active (dev , - ECONNRESET );
320
392
}
321
393
@@ -337,6 +409,11 @@ static void xfer_work_handler(struct k_work *work)
337
409
int err ;
338
410
339
411
switch (ev -> type ) {
412
+ case UHC_VRT_EVT_SOF :
413
+ vrt_xfer_cleanup_cancelled (dev );
414
+ vrt_assemble_frame (dev );
415
+ schedule = true;
416
+ break ;
340
417
case UHC_VRT_EVT_REPLY :
341
418
err = vrt_handle_reply (dev , ev -> pkt );
342
419
if (unlikely (err )) {
@@ -345,23 +422,16 @@ static void xfer_work_handler(struct k_work *work)
345
422
346
423
schedule = true;
347
424
break ;
348
- case UHC_VRT_EVT_XFER :
349
- LOG_DBG ("Transfer triggered for %p" , dev );
350
- schedule = true;
351
- break ;
352
- case UHC_VRT_EVT_SOF :
353
- break ;
354
425
default :
355
426
break ;
356
427
}
357
428
358
- vrt_xfer_cleanup_cancelled (dev );
359
-
360
- if (schedule && !priv -> busy ) {
361
- err = vrt_schedule_xfer (dev );
429
+ if (schedule ) {
430
+ err = vrt_schedule_frame (dev );
362
431
if (unlikely (err )) {
363
432
uhc_submit_event (dev , UHC_EVT_ERROR , err );
364
433
}
434
+
365
435
}
366
436
367
437
k_mem_slab_free (& uhc_vrt_slab , (void * )ev );
@@ -461,7 +531,6 @@ static int uhc_vrt_enqueue(const struct device *dev,
461
531
struct uhc_transfer * const xfer )
462
532
{
463
533
uhc_xfer_append (dev , xfer );
464
- vrt_event_submit (dev , UHC_VRT_EVT_XFER , NULL );
465
534
466
535
return 0 ;
467
536
}
@@ -482,7 +551,6 @@ static int uhc_vrt_dequeue(const struct device *dev,
482
551
}
483
552
484
553
irq_unlock (key );
485
- vrt_event_submit (dev , UHC_VRT_EVT_XFER , NULL );
486
554
487
555
return 0 ;
488
556
}
0 commit comments