Skip to content

Commit a99da8d

Browse files
author
Alain Volmat
committed
samples: usb: uvc: add video encoder support
Allow creating a pipeline as follow camera receiver -> encoder -> uvc In the chosen zephyr,videoenc is available, the sample will pipe the camera receiver to the encoder and then the UVC device instead of directly the camera receiver to the UVC. Current implementation has several points hardcoded for the time being: 1. intermediate pixel format between the camera receiver and encoder is set to NV12. This shouldn't be hardcoded and should instead be discovered as a commonly capable format from the encoder / video dev 2. it is considered that encoder device do NOT perform any resolution change and that encoder output resolution is directly based on the camera receiver resolution. Thanks to this, UVC exposed formats are thus the encoder output pixel format & camera receiver resolutions. Signed-off-by: Alain Volmat <[email protected]>
1 parent 9c0851f commit a99da8d

File tree

1 file changed

+219
-9
lines changed
  • samples/subsys/usb/uvc/src

1 file changed

+219
-9
lines changed

samples/subsys/usb/uvc/src/main.c

Lines changed: 219 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,41 @@ const static struct device *const video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_cam
2323
/* Format capabilities of video_dev, used everywhere through the sample */
2424
static struct video_caps video_caps = {.type = VIDEO_BUF_TYPE_OUTPUT};
2525

26+
#if DT_HAS_CHOSEN(zephyr_videoenc)
27+
28+
#if CONFIG_VIDEO_BUFFER_POOL_NUM_MAX < 2
29+
#error CONFIG_VIDEO_BUFFER_POOL_NUM_MAX must be >=2 in order to use a zephyr,videoenc
30+
#endif
31+
32+
const static struct device *const videoenc_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_videoenc));
33+
34+
/* Format capabilities of videoenc_dev, used everywhere through the sample */
35+
static struct video_caps videoenc_out_caps = {.type = VIDEO_BUF_TYPE_OUTPUT};
36+
#else
37+
/*
38+
* When no zephyr,videoenc chosen is available, videoenc_dev pointer is kept to NULL
39+
* just to avoid having to insert #if DT_HAS_CHOSEN(zephyr_videoenc) all over the code
40+
* and thus let a maximum of code compile
41+
*/
42+
const static struct device *const videoenc_dev = NULL;
43+
#endif
44+
2645
static const struct device *app_uvc_source_dev(void)
2746
{
47+
#if DT_HAS_CHOSEN(zephyr_videoenc)
48+
return videoenc_dev;
49+
#else
2850
return video_dev;
51+
#endif
52+
}
53+
54+
static struct video_caps *app_uvc_source_caps(void)
55+
{
56+
#if DT_HAS_CHOSEN(zephyr_videoenc)
57+
return &videoenc_out_caps;
58+
#else
59+
return &video_caps;
60+
#endif
2961
}
3062

3163
/* Pixel formats present in one of the UVC 1.5 standard */
@@ -38,7 +70,7 @@ static bool app_is_supported_format(uint32_t pixfmt)
3870

3971
static bool app_has_supported_format(void)
4072
{
41-
const struct video_format_cap *fmts = video_caps.format_caps;
73+
const struct video_format_cap *fmts = app_uvc_source_caps()->format_caps;
4274

4375
for (int i = 0; fmts[i].pixelformat != 0; i++) {
4476
if (app_is_supported_format(fmts[i].pixelformat)) {
@@ -103,12 +135,14 @@ static struct video_resolution video_common_fmts[] = {
103135
static void app_add_filtered_formats(void)
104136
{
105137
const bool has_sup_fmts = app_has_supported_format();
138+
struct video_caps *uvc_src_caps = app_uvc_source_caps();
106139

107-
for (int i = 0; video_caps.format_caps[i].pixelformat != 0; i++) {
140+
for (int i = 0; uvc_src_caps->format_caps[i].pixelformat != 0; i++) {
141+
uint32_t pixelformat = uvc_src_caps->format_caps[i].pixelformat;
108142
const struct video_format_cap *vcap = &video_caps.format_caps[i];
109143
int count = 1;
110144

111-
app_add_format(vcap->pixelformat, vcap->width_min, vcap->height_min, has_sup_fmts);
145+
app_add_format(pixelformat, vcap->width_min, vcap->height_min, has_sup_fmts);
112146

113147
if (vcap->width_min != vcap->width_max || vcap->height_min != vcap->height_max) {
114148
app_add_format(vcap->pixelformat, vcap->width_max, vcap->height_max,
@@ -138,19 +172,131 @@ static void app_add_filtered_formats(void)
138172
continue;
139173
}
140174

141-
app_add_format(vcap->pixelformat, video_common_fmts[j].width,
175+
app_add_format(pixelformat, video_common_fmts[j].width,
142176
video_common_fmts[j].height, has_sup_fmts);
143177
count++;
144178
}
145179
}
146180
}
147181

182+
static int app_init_videoenc(const struct device *const dev)
183+
{
184+
int ret;
185+
186+
if (dev == NULL) {
187+
return -EINVAL;
188+
}
189+
190+
if (!device_is_ready(dev)) {
191+
LOG_ERR("video encoder %s failed to initialize", dev->name);
192+
return -ENODEV;
193+
}
194+
195+
ret = video_get_caps(dev, app_uvc_source_caps());
196+
if (ret != 0) {
197+
LOG_ERR("Unable to retrieve video encoder output capabilities");
198+
return ret;
199+
}
200+
201+
/*
202+
* FIXME - we should look carefully at both video capture output and encoder input
203+
* caps to detect intermediate format.
204+
* This is where we should define the format which is going to be used
205+
* between the camera and the encoder input
206+
*/
207+
208+
return 0;
209+
}
210+
211+
static int app_configure_videoenc(const struct device *const dev,
212+
uint32_t width, uint32_t height,
213+
uint32_t sink_pixelformat, uint32_t source_pixelformat,
214+
uint32_t nb_buffer)
215+
{
216+
struct video_format fmt = {
217+
.width = width,
218+
.height = height,
219+
};
220+
struct video_buffer *buf;
221+
int ret;
222+
223+
if (dev == NULL) {
224+
return -EINVAL;
225+
}
226+
227+
/*
228+
* Need to configure both input & output of the encoder
229+
* and allocate / enqueue buffers to the output of the
230+
* encoder
231+
*/
232+
fmt.type = VIDEO_BUF_TYPE_INPUT;
233+
fmt.pixelformat = sink_pixelformat;
234+
ret = video_set_compose_format(dev, &fmt);
235+
if (ret != 0) {
236+
LOG_ERR("Could not set the %s encoder input format", dev->name);
237+
return ret;
238+
}
239+
240+
fmt.type = VIDEO_BUF_TYPE_OUTPUT;
241+
fmt.pixelformat = source_pixelformat;
242+
ret = video_set_compose_format(dev, &fmt);
243+
if (ret != 0) {
244+
LOG_ERR("Could not set the %s encoder output format", dev->name);
245+
return ret;
246+
}
247+
248+
LOG_INF("Preparing %u buffers of %u bytes for encoder output", nb_buffer, fmt.size);
249+
250+
for (int i = 0; i < nb_buffer; i++) {
251+
buf = video_buffer_aligned_alloc(fmt.size, CONFIG_VIDEO_BUFFER_POOL_ALIGN,
252+
K_NO_WAIT);
253+
if (buf == NULL) {
254+
LOG_ERR("Could not allocate the encoder output buffer");
255+
return -ENOMEM;
256+
}
257+
258+
buf->type = VIDEO_BUF_TYPE_OUTPUT;
259+
260+
ret = video_enqueue(dev, buf);
261+
if (ret != 0) {
262+
LOG_ERR("Could not enqueue video buffer");
263+
return ret;
264+
}
265+
}
266+
267+
return 0;
268+
}
269+
270+
static int app_start_videoenc(const struct device *const dev)
271+
{
272+
int ret;
273+
274+
if (dev == NULL) {
275+
return -EINVAL;
276+
}
277+
278+
ret = video_stream_start(dev, VIDEO_BUF_TYPE_OUTPUT);
279+
if (ret != 0) {
280+
LOG_ERR("Failed to start %s output", dev->name);
281+
return ret;
282+
}
283+
284+
ret = video_stream_start(dev, VIDEO_BUF_TYPE_INPUT);
285+
if (ret != 0) {
286+
LOG_ERR("Failed to start %s input", dev->name);
287+
return ret;
288+
}
289+
290+
return 0;
291+
}
292+
148293
int main(void)
149294
{
150295
const struct device *uvc_src_dev = app_uvc_source_dev();
151296
struct usbd_context *sample_usbd;
152297
struct video_buffer *vbuf;
153298
struct video_format fmt = {0};
299+
uint32_t uvc_buf_count = CONFIG_VIDEO_BUFFER_POOL_NUM_MAX;
154300
struct video_frmival frmival = {0};
155301
struct k_poll_signal sig;
156302
struct k_poll_event evt[1];
@@ -168,6 +314,16 @@ int main(void)
168314
return 0;
169315
}
170316

317+
if (IS_ENABLED(DT_HAS_CHOSEN(zephyr_videoenc))) {
318+
ret = app_init_videoenc(videoenc_dev);
319+
if (ret != 0) {
320+
return ret;
321+
}
322+
323+
/* When using encoder, we split the VIDEO_BUFFER_POOL_NUM_MAX in 2 */
324+
uvc_buf_count /= 2;
325+
}
326+
171327
/* Must be called before usb_enable() */
172328
uvc_set_video_dev(uvc_dev, uvc_src_dev);
173329

@@ -211,7 +367,27 @@ int main(void)
211367
VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height,
212368
frmival.numerator, frmival.denominator);
213369

370+
if (IS_ENABLED(DT_HAS_CHOSEN(zephyr_videoenc))) {
371+
/*
372+
* FIXME - this is currently hardcoded in NV12 while it should be
373+
* a format that has been validated for both video dev and encoder
374+
*/
375+
ret = app_configure_videoenc(videoenc_dev, fmt.width, fmt.height,
376+
VIDEO_PIX_FMT_NV12, fmt.pixelformat,
377+
CONFIG_VIDEO_BUFFER_POOL_NUM_MAX - uvc_buf_count);
378+
if (ret != 0) {
379+
return ret;
380+
}
381+
}
382+
214383
fmt.type = VIDEO_BUF_TYPE_OUTPUT;
384+
if (IS_ENABLED(DT_HAS_CHOSEN(zephyr_videoenc))) {
385+
/*
386+
* FIXME - this is currently hardcoded in NV12 while it should be
387+
* a format that has been validated for both video dev and encoder
388+
*/
389+
fmt.pixelformat = VIDEO_PIX_FMT_NV12;
390+
}
215391

216392
ret = video_set_compose_format(video_dev, &fmt);
217393
if (ret != 0) {
@@ -220,14 +396,19 @@ int main(void)
220396
fmt.width, fmt.height, fmt.size);
221397
}
222398

399+
/*
400+
* FIXME - shortcut here since current available encoders do not
401+
* have frmival support for the time being so this is done directly
402+
* at camera level
403+
*/
223404
ret = video_set_frmival(video_dev, &frmival);
224405
if (ret != 0) {
225406
LOG_WRN("Could not set the framerate of %s", video_dev->name);
226407
}
227408

228-
LOG_INF("Preparing %u buffers of %u bytes", CONFIG_VIDEO_BUFFER_POOL_NUM_MAX, fmt.size);
409+
LOG_INF("Preparing %u buffers of %u bytes", uvc_buf_count, fmt.size);
229410

230-
for (int i = 0; i < CONFIG_VIDEO_BUFFER_POOL_NUM_MAX; i++) {
411+
for (int i = 0; i < uvc_buf_count; i++) {
231412
vbuf = video_buffer_aligned_alloc(fmt.size, CONFIG_VIDEO_BUFFER_POOL_ALIGN,
232413
K_NO_WAIT);
233414
if (vbuf == NULL) {
@@ -244,14 +425,14 @@ int main(void)
244425
}
245426
}
246427

247-
LOG_DBG("Preparing signaling for %s input/output", video_dev->name);
428+
LOG_DBG("Preparing signaling for %s input/output", uvc_src_dev->name);
248429

249430
k_poll_signal_init(&sig);
250431
k_poll_event_init(&evt[0], K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sig);
251432

252-
ret = video_set_signal(video_dev, &sig);
433+
ret = video_set_signal(uvc_src_dev, &sig);
253434
if (ret != 0) {
254-
LOG_WRN("Failed to setup the signal on %s output endpoint", video_dev->name);
435+
LOG_WRN("Failed to setup the signal on %s output endpoint", uvc_src_dev->name);
255436
timeout = K_MSEC(1);
256437
}
257438

@@ -263,6 +444,13 @@ int main(void)
263444

264445
LOG_INF("Starting the video transfer");
265446

447+
if (IS_ENABLED(DT_HAS_CHOSEN(zephyr_videoenc))) {
448+
ret = app_start_videoenc(videoenc_dev);
449+
if (ret != 0) {
450+
return ret;
451+
}
452+
}
453+
266454
ret = video_stream_start(video_dev, VIDEO_BUF_TYPE_OUTPUT);
267455
if (ret != 0) {
268456
LOG_ERR("Failed to start %s", video_dev->name);
@@ -276,6 +464,17 @@ int main(void)
276464
return ret;
277465
}
278466

467+
if (IS_ENABLED(DT_HAS_CHOSEN(zephyr_videoenc))) {
468+
ret = video_transfer_buffer(video_dev, uvc_src_dev,
469+
VIDEO_BUF_TYPE_OUTPUT, VIDEO_BUF_TYPE_INPUT,
470+
K_NO_WAIT);
471+
if (ret != 0 && ret != -EAGAIN) {
472+
LOG_ERR("Failed to transfer from %s to %s",
473+
video_dev->name, uvc_src_dev->name);
474+
return ret;
475+
}
476+
}
477+
279478
ret = video_transfer_buffer(uvc_src_dev, uvc_dev,
280479
VIDEO_BUF_TYPE_OUTPUT, VIDEO_BUF_TYPE_INPUT, K_NO_WAIT);
281480
if (ret != 0 && ret != -EAGAIN) {
@@ -284,6 +483,17 @@ int main(void)
284483
return ret;
285484
}
286485

486+
if (IS_ENABLED(DT_HAS_CHOSEN(zephyr_videoenc))) {
487+
ret = video_transfer_buffer(uvc_src_dev, video_dev,
488+
VIDEO_BUF_TYPE_INPUT, VIDEO_BUF_TYPE_OUTPUT,
489+
K_NO_WAIT);
490+
if (ret != 0 && ret != -EAGAIN) {
491+
LOG_ERR("Failed to transfer from %s to %s",
492+
uvc_src_dev->name, video_dev->name);
493+
return ret;
494+
}
495+
}
496+
287497
ret = video_transfer_buffer(uvc_dev, uvc_src_dev,
288498
VIDEO_BUF_TYPE_INPUT, VIDEO_BUF_TYPE_OUTPUT, K_NO_WAIT);
289499
if (ret != 0 && ret != -EAGAIN) {

0 commit comments

Comments
 (0)