Skip to content

Commit 7f53eca

Browse files
jfischer-nokartben
authored andcommitted
drivers: uhc_virtual: allow multiple transfers scheduled within a frame
With the current implementation, NACK claims all resources and prevents scheduling multiple transfers within a frame. Place a number of available transfers in a list at the beginning of a frame, and process the transfers in a limited number of slots. Signed-off-by: Johann Fischer <[email protected]>
1 parent a91740e commit 7f53eca

File tree

1 file changed

+97
-29
lines changed

1 file changed

+97
-29
lines changed

drivers/usb/uhc/uhc_virtual.c

Lines changed: 97 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -24,23 +24,35 @@
2424
#include <zephyr/logging/log.h>
2525
LOG_MODULE_REGISTER(uhc_vrt, CONFIG_UHC_DRIVER_LOG_LEVEL);
2626

27+
#define FRAME_MAX_TRANSFERS 16
28+
2729
struct uhc_vrt_config {
2830
};
2931

32+
struct uhc_vrt_slot {
33+
sys_dnode_t node;
34+
struct uhc_transfer *xfer;
35+
};
36+
37+
struct uhc_vrt_frame {
38+
struct uhc_vrt_slot slots[FRAME_MAX_TRANSFERS];
39+
sys_dnode_t *ptr;
40+
sys_dlist_t list;
41+
uint8_t count;
42+
};
43+
3044
struct uhc_vrt_data {
3145
const struct device *dev;
3246
struct uvb_node *host_node;
3347
struct k_work work;
3448
struct k_fifo fifo;
3549
struct uhc_transfer *last_xfer;
50+
struct uhc_vrt_frame frame;
3651
struct k_timer sof_timer;
37-
bool busy;
3852
uint8_t req;
3953
};
4054

4155
enum uhc_vrt_event_type {
42-
/* Trigger next transfer */
43-
UHC_VRT_EVT_XFER,
4456
/* SoF generator event */
4557
UHC_VRT_EVT_SOF,
4658
/* Request reply received */
@@ -94,7 +106,6 @@ static int vrt_xfer_control(const struct device *dev,
94106
}
95107

96108
priv->req = UVB_REQUEST_SETUP;
97-
priv->busy = true;
98109

99110
return uvb_advert_pkt(priv->host_node, uvb_pkt);
100111
}
@@ -118,7 +129,6 @@ static int vrt_xfer_control(const struct device *dev,
118129
}
119130

120131
priv->req = UVB_REQUEST_DATA;
121-
priv->busy = true;
122132

123133
return uvb_advert_pkt(priv->host_node, uvb_pkt);
124134
}
@@ -142,7 +152,6 @@ static int vrt_xfer_control(const struct device *dev,
142152
}
143153

144154
priv->req = UVB_REQUEST_DATA;
145-
priv->busy = true;
146155

147156
return uvb_advert_pkt(priv->host_node, uvb_pkt);
148157
}
@@ -177,25 +186,87 @@ static int vrt_xfer_bulk(const struct device *dev,
177186
return uvb_advert_pkt(priv->host_node, uvb_pkt);
178187
}
179188

180-
static int vrt_schedule_xfer(const struct device *dev)
189+
static inline uint8_t get_xfer_ep_idx(const uint8_t ep)
181190
{
182-
struct uhc_vrt_data *priv = uhc_get_private(dev);
191+
/* We do not need to differentiate the direction for the control
192+
* transfers because they are handled as a whole.
193+
*/
194+
if (USB_EP_DIR_IS_OUT(ep) || USB_EP_GET_IDX(ep) == 0) {
195+
return USB_EP_GET_IDX(ep & BIT_MASK(4));
196+
}
197+
198+
return USB_EP_GET_IDX(ep & BIT_MASK(4)) + 16U;
199+
}
200+
201+
static void vrt_assemble_frame(const struct device *dev)
202+
{
203+
struct uhc_vrt_data *const priv = uhc_get_private(dev);
204+
struct uhc_vrt_frame *const frame = &priv->frame;
205+
struct uhc_data *const data = dev->data;
206+
struct uhc_transfer *tmp;
207+
unsigned int n = 0;
208+
unsigned int key;
209+
uint32_t bm = 0;
210+
211+
sys_dlist_init(&frame->list);
212+
frame->ptr = NULL;
213+
frame->count = 0;
214+
key = irq_lock();
215+
216+
/* TODO: add periodic transfers up to 90% */
217+
SYS_DLIST_FOR_EACH_CONTAINER(&data->ctrl_xfers, tmp, node) {
218+
uint8_t idx = get_xfer_ep_idx(tmp->ep);
219+
220+
/* There could be multiple transfers queued for the same
221+
* endpoint, for now we only allow one to be scheduled per frame.
222+
*/
223+
if (bm & BIT(idx)) {
224+
continue;
225+
}
226+
227+
bm |= BIT(idx);
228+
frame->slots[n].xfer = tmp;
229+
sys_dlist_append(&frame->list, &frame->slots[n].node);
230+
n++;
231+
232+
if (n >= FRAME_MAX_TRANSFERS) {
233+
/* No more free slots */
234+
break;
235+
}
236+
}
237+
238+
irq_unlock(key);
239+
}
240+
241+
static int vrt_schedule_frame(const struct device *dev)
242+
{
243+
struct uhc_vrt_data *const priv = uhc_get_private(dev);
244+
struct uhc_vrt_frame *const frame = &priv->frame;
245+
struct uhc_vrt_slot *slot;
183246

184247
if (priv->last_xfer == NULL) {
185-
priv->last_xfer = uhc_xfer_get_next(dev);
186-
if (priv->last_xfer == NULL) {
187-
LOG_DBG("Nothing to transfer");
248+
if (frame->count >= FRAME_MAX_TRANSFERS) {
249+
LOG_DBG("Frame finished");
188250
return 0;
189251
}
190252

191-
LOG_DBG("Next transfer is %p", priv->last_xfer);
253+
frame->ptr = sys_dlist_get(&frame->list);
254+
slot = SYS_DLIST_CONTAINER(frame->ptr, slot, node);
255+
if (slot == NULL) {
256+
LOG_DBG("No more transfers for the frame");
257+
return 0;
258+
}
259+
260+
priv->last_xfer = slot->xfer;
261+
frame->count++;
262+
LOG_DBG("Next transfer is %p (count %u)",
263+
(void *)priv->last_xfer, frame->count);
192264
}
193265

194266
if (USB_EP_GET_IDX(priv->last_xfer->ep) == 0) {
195267
return vrt_xfer_control(dev, priv->last_xfer);
196268
}
197269

198-
/* TODO: Isochronous transfers */
199270
return vrt_xfer_bulk(dev, priv->last_xfer);
200271
}
201272

@@ -276,6 +347,7 @@ static int vrt_handle_reply(const struct device *dev,
276347
struct uvb_packet *const pkt)
277348
{
278349
struct uhc_vrt_data *priv = uhc_get_private(dev);
350+
struct uhc_vrt_frame *const frame = &priv->frame;
279351
struct uhc_transfer *const xfer = priv->last_xfer;
280352
int ret = 0;
281353

@@ -285,11 +357,12 @@ static int vrt_handle_reply(const struct device *dev,
285357
goto handle_reply_err;
286358
}
287359

288-
priv->busy = false;
289-
290360
switch (pkt->reply) {
291361
case UVB_REPLY_NACK:
292-
/* Restart last transaction */
362+
/* Move the transfer back to the list. */
363+
sys_dlist_append(&frame->list, frame->ptr);
364+
priv->last_xfer = NULL;
365+
LOG_DBG("NACK 0x%02x count %u", xfer->ep, frame->count);
293366
break;
294367
case UVB_REPLY_STALL:
295368
vrt_xfer_drop_active(dev, -EPIPE);
@@ -315,7 +388,6 @@ static void vrt_xfer_cleanup_cancelled(const struct device *dev)
315388
struct uhc_transfer *tmp;
316389

317390
if (priv->last_xfer != NULL && priv->last_xfer->err == -ECONNRESET) {
318-
priv->busy = false;
319391
vrt_xfer_drop_active(dev, -ECONNRESET);
320392
}
321393

@@ -337,6 +409,11 @@ static void xfer_work_handler(struct k_work *work)
337409
int err;
338410

339411
switch (ev->type) {
412+
case UHC_VRT_EVT_SOF:
413+
vrt_xfer_cleanup_cancelled(dev);
414+
vrt_assemble_frame(dev);
415+
schedule = true;
416+
break;
340417
case UHC_VRT_EVT_REPLY:
341418
err = vrt_handle_reply(dev, ev->pkt);
342419
if (unlikely(err)) {
@@ -345,23 +422,16 @@ static void xfer_work_handler(struct k_work *work)
345422

346423
schedule = true;
347424
break;
348-
case UHC_VRT_EVT_XFER:
349-
LOG_DBG("Transfer triggered for %p", dev);
350-
schedule = true;
351-
break;
352-
case UHC_VRT_EVT_SOF:
353-
break;
354425
default:
355426
break;
356427
}
357428

358-
vrt_xfer_cleanup_cancelled(dev);
359-
360-
if (schedule && !priv->busy) {
361-
err = vrt_schedule_xfer(dev);
429+
if (schedule) {
430+
err = vrt_schedule_frame(dev);
362431
if (unlikely(err)) {
363432
uhc_submit_event(dev, UHC_EVT_ERROR, err);
364433
}
434+
365435
}
366436

367437
k_mem_slab_free(&uhc_vrt_slab, (void *)ev);
@@ -461,7 +531,6 @@ static int uhc_vrt_enqueue(const struct device *dev,
461531
struct uhc_transfer *const xfer)
462532
{
463533
uhc_xfer_append(dev, xfer);
464-
vrt_event_submit(dev, UHC_VRT_EVT_XFER, NULL);
465534

466535
return 0;
467536
}
@@ -482,7 +551,6 @@ static int uhc_vrt_dequeue(const struct device *dev,
482551
}
483552

484553
irq_unlock(key);
485-
vrt_event_submit(dev, UHC_VRT_EVT_XFER, NULL);
486554

487555
return 0;
488556
}

0 commit comments

Comments
 (0)