-
Notifications
You must be signed in to change notification settings - Fork 8.1k
[RFC] UVC Video encoder (zephyr,videoenc) support #97425
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 1 commit
a09556e
ecc6621
86628a6
a047d47
d03da1c
d38361d
11291ed
8c889cf
5c9faa0
99218f4
75eec12
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,6 +23,19 @@ const static struct device *const video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_cam | |
/* Format capabilities of video_dev, used everywhere through the sample */ | ||
static struct video_caps video_caps = {.type = VIDEO_BUF_TYPE_OUTPUT}; | ||
|
||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
const static struct device *const videoenc_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_videoenc)); | ||
|
||
/* Format capabilities of videoenc_dev, used everywhere through the sample */ | ||
static struct video_caps videoenc_in_caps = {.type = VIDEO_BUF_TYPE_INPUT}; | ||
static struct video_caps videoenc_out_caps = {.type = VIDEO_BUF_TYPE_OUTPUT}; | ||
const struct device *uvc_src_dev = videoenc_dev; | ||
struct video_caps *uvc_src_caps = &videoenc_out_caps; | ||
#else | ||
const struct device *uvc_src_dev = video_dev; | ||
struct video_caps *uvc_src_caps = &video_caps; | ||
#endif | ||
|
||
static bool app_is_standard_format(uint32_t pixfmt) | ||
{ | ||
return pixfmt == VIDEO_PIX_FMT_GREY || pixfmt == VIDEO_PIX_FMT_JPEG || | ||
|
@@ -32,7 +45,7 @@ static bool app_is_standard_format(uint32_t pixfmt) | |
/* Check whether the video device supports one of the wisespread image sensor formats */ | ||
static bool app_has_standard_formats(void) | ||
{ | ||
const struct video_format_cap *fmts = video_caps.format_caps; | ||
const struct video_format_cap *fmts = uvc_src_caps->format_caps; | ||
|
||
for (int i = 0; fmts[i].pixelformat != 0; i++) { | ||
if (app_is_standard_format(fmts[i].pixelformat)) { | ||
|
@@ -59,9 +72,9 @@ static void app_add_format(uint32_t pixfmt, uint16_t width, uint16_t height, boo | |
} | ||
|
||
/* Set the format to get the pitch */ | ||
ret = video_set_compose_format(video_dev, &fmt); | ||
ret = video_set_compose_format(uvc_src_dev, &fmt); | ||
if (ret != 0) { | ||
LOG_ERR("Could not set the format of %s", video_dev->name); | ||
LOG_ERR("Could not set the format of %s", uvc_src_dev->name); | ||
return; | ||
} | ||
|
||
|
@@ -95,14 +108,19 @@ static void app_add_filtered_formats(void) | |
{ | ||
const bool has_std_fmts = app_has_standard_formats(); | ||
|
||
for (int i = 0; video_caps.format_caps[i].pixelformat != 0; i++) { | ||
for (int i = 0; uvc_src_caps->format_caps[i].pixelformat != 0; i++) { | ||
/* | ||
* FIXME - a shortcut is done here, we consider that encoder do not allow | ||
* compose, hence we use encoder output format but camera output resolution | ||
*/ | ||
uint32_t pixelformat = uvc_src_caps->format_caps[i].pixelformat; | ||
const struct video_format_cap *vcap = &video_caps.format_caps[i]; | ||
int range_count = 0; | ||
|
||
app_add_format(vcap->pixelformat, vcap->width_min, vcap->height_min, has_std_fmts); | ||
app_add_format(pixelformat, vcap->width_min, vcap->height_min, has_std_fmts); | ||
|
||
if (vcap->width_min != vcap->width_max || vcap->height_min != vcap->height_max) { | ||
app_add_format(vcap->pixelformat, vcap->width_max, vcap->height_max, | ||
app_add_format(pixelformat, vcap->width_max, vcap->height_max, | ||
has_std_fmts); | ||
} | ||
|
||
|
@@ -126,7 +144,7 @@ static void app_add_filtered_formats(void) | |
continue; | ||
} | ||
|
||
app_add_format(vcap->pixelformat, video_common_fmts[j].width, | ||
app_add_format(pixelformat, video_common_fmts[j].width, | ||
video_common_fmts[j].height, has_std_fmts); | ||
} | ||
} | ||
|
@@ -137,6 +155,25 @@ int main(void) | |
struct usbd_context *sample_usbd; | ||
struct video_buffer *vbuf; | ||
struct video_format fmt = {0}; | ||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
struct video_format fmt_enc_in = { | ||
/* | ||
* FIXME - this is hardcoded for the time being, just for debug | ||
* until proper analysis of camera / encoder caps is done to | ||
* select the right common format | ||
*/ | ||
.pixelformat = VIDEO_PIX_FMT_NV12, | ||
}; | ||
#if CONFIG_VIDEO_BUFFER_POOL_NUM_MAX < 2 | ||
#error CONFIG_VIDEO_BUFFER_POOL_NUM_MAX must be >=2 in order to use a zephyr,videoenc | ||
#endif | ||
struct video_buffer *vbuf_enc_in; | ||
/* Buffers amount is shared between the encoder and the camera */ | ||
uint32_t enc_in_buf_nb = CONFIG_VIDEO_BUFFER_POOL_NUM_MAX / 2; | ||
uint32_t uvc_src_buf_nb = CONFIG_VIDEO_BUFFER_POOL_NUM_MAX / 2; | ||
#else | ||
uint32_t uvc_src_buf_nb = CONFIG_VIDEO_BUFFER_POOL_NUM_MAX; | ||
#endif | ||
struct video_frmival frmival = {0}; | ||
struct k_poll_signal sig; | ||
struct k_poll_event evt[1]; | ||
|
@@ -154,8 +191,33 @@ int main(void) | |
return 0; | ||
} | ||
|
||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
if (!device_is_ready(videoenc_dev)) { | ||
LOG_ERR("video encoder %s failed to initialize", videoenc_dev->name); | ||
return -ENODEV; | ||
} | ||
|
||
ret = video_get_caps(videoenc_dev, &videoenc_in_caps); | ||
if (ret != 0) { | ||
LOG_ERR("Unable to retrieve video encoder input capabilities"); | ||
return 0; | ||
} | ||
|
||
ret = video_get_caps(videoenc_dev, &videoenc_out_caps); | ||
if (ret != 0) { | ||
LOG_ERR("Unable to retrieve video encoder output capabilities"); | ||
return 0; | ||
} | ||
|
||
/* | ||
* FIXME - we should look carefully are both caps to detect intermediate format | ||
* This is where we should defined the format which is going to be used | ||
* between the camera and the encoder input | ||
*/ | ||
#endif | ||
|
||
/* Must be called before usb_enable() */ | ||
uvc_set_video_dev(uvc_dev, video_dev); | ||
uvc_set_video_dev(uvc_dev, uvc_src_dev); | ||
|
||
/* Must be called before uvc_set_video_dev() */ | ||
app_add_filtered_formats(); | ||
|
@@ -197,21 +259,67 @@ int main(void) | |
VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height, | ||
frmival.numerator, frmival.denominator); | ||
|
||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
/* | ||
* If there is an encoder, it is necessary to also configure the | ||
* camera output format and encoder input formats | ||
*/ | ||
|
||
fmt_enc_in.type = VIDEO_BUF_TYPE_OUTPUT; | ||
fmt_enc_in.width = fmt.width; | ||
fmt_enc_in.height = fmt.height; | ||
ret = video_set_compose_format(video_dev, &fmt_enc_in); | ||
if (ret != 0) { | ||
LOG_WRN("Could not set the format of %s output", video_dev->name); | ||
} | ||
|
||
fmt_enc_in.type = VIDEO_BUF_TYPE_INPUT; | ||
ret = video_set_compose_format(uvc_src_dev, &fmt_enc_in); | ||
if (ret != 0) { | ||
LOG_WRN("Could not set the format of %s input", uvc_src_dev->name); | ||
} | ||
|
||
LOG_INF("Preparing %u buffers of %u bytes for camera-encoder", | ||
enc_in_buf_nb, fmt_enc_in.size); | ||
|
||
for (int i = 0; i < enc_in_buf_nb; i++) { | ||
vbuf_enc_in = video_buffer_aligned_alloc(fmt_enc_in.size, | ||
CONFIG_VIDEO_BUFFER_POOL_ALIGN, K_NO_WAIT); | ||
if (vbuf_enc_in == NULL) { | ||
LOG_ERR("Could not allocate the encoder input buffer"); | ||
return -ENOMEM; | ||
} | ||
|
||
vbuf_enc_in->type = VIDEO_BUF_TYPE_OUTPUT; | ||
|
||
ret = video_enqueue(video_dev, vbuf_enc_in); | ||
if (ret != 0) { | ||
LOG_ERR("Could not enqueue video buffer"); | ||
return ret; | ||
} | ||
} | ||
#endif | ||
|
||
fmt.type = VIDEO_BUF_TYPE_OUTPUT; | ||
|
||
ret = video_set_compose_format(video_dev, &fmt); | ||
ret = video_set_compose_format(uvc_src_dev, &fmt); | ||
if (ret != 0) { | ||
LOG_WRN("Could not set the format of %s", video_dev->name); | ||
LOG_WRN("Could not set the format of %s output", uvc_src_dev->name); | ||
} | ||
|
||
/* | ||
* FIXME - shortcut here since current available encoders do not | ||
* have frmival support for the time being so this is done directly | ||
* at camera level | ||
*/ | ||
ret = video_set_frmival(video_dev, &frmival); | ||
if (ret != 0) { | ||
LOG_WRN("Could not set the framerate of %s", video_dev->name); | ||
} | ||
|
||
LOG_INF("Preparing %u buffers of %u bytes", CONFIG_VIDEO_BUFFER_POOL_NUM_MAX, fmt.size); | ||
LOG_INF("Preparing %u buffers of %u bytes", uvc_src_buf_nb, fmt.size); | ||
|
||
for (int i = 0; i < CONFIG_VIDEO_BUFFER_POOL_NUM_MAX; i++) { | ||
for (int i = 0; i < uvc_src_buf_nb; i++) { | ||
vbuf = video_buffer_aligned_alloc(fmt.size, CONFIG_VIDEO_BUFFER_POOL_ALIGN, | ||
K_NO_WAIT); | ||
if (vbuf == NULL) { | ||
|
@@ -221,21 +329,21 @@ int main(void) | |
|
||
vbuf->type = VIDEO_BUF_TYPE_OUTPUT; | ||
|
||
ret = video_enqueue(video_dev, vbuf); | ||
ret = video_enqueue(uvc_src_dev, vbuf); | ||
if (ret != 0) { | ||
LOG_ERR("Could not enqueue video buffer"); | ||
return ret; | ||
} | ||
} | ||
|
||
LOG_DBG("Preparing signaling for %s input/output", video_dev->name); | ||
LOG_DBG("Preparing signaling for %s input/output", uvc_src_dev->name); | ||
|
||
k_poll_signal_init(&sig); | ||
k_poll_event_init(&evt[0], K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sig); | ||
|
||
ret = video_set_signal(video_dev, &sig); | ||
ret = video_set_signal(uvc_src_dev, &sig); | ||
if (ret != 0) { | ||
LOG_WRN("Failed to setup the signal on %s output endpoint", video_dev->name); | ||
LOG_WRN("Failed to setup the signal on %s output endpoint", uvc_src_dev->name); | ||
timeout = K_MSEC(1); | ||
} | ||
|
||
|
@@ -247,11 +355,24 @@ int main(void) | |
|
||
LOG_INF("Starting the video transfer"); | ||
|
||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
ret = video_stream_start(video_dev, VIDEO_BUF_TYPE_OUTPUT); | ||
if (ret != 0) { | ||
LOG_ERR("Failed to start %s", video_dev->name); | ||
return ret; | ||
} | ||
ret = video_stream_start(uvc_src_dev, VIDEO_BUF_TYPE_INPUT); | ||
if (ret != 0) { | ||
LOG_ERR("Failed to start %s", uvc_src_dev->name); | ||
return ret; | ||
} | ||
#endif | ||
|
||
ret = video_stream_start(uvc_src_dev, VIDEO_BUF_TYPE_OUTPUT); | ||
if (ret != 0) { | ||
LOG_ERR("Failed to start %s", video_dev->name); | ||
return ret; | ||
} | ||
Comment on lines
+371
to
+375
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How about using |
||
|
||
while (true) { | ||
ret = k_poll(evt, ARRAY_SIZE(evt), timeout); | ||
|
@@ -260,9 +381,27 @@ int main(void) | |
return ret; | ||
} | ||
|
||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
vbuf_enc_in = &(struct video_buffer){.type = VIDEO_BUF_TYPE_OUTPUT}; | ||
|
||
if (video_dequeue(video_dev, &vbuf_enc_in, K_NO_WAIT) == 0) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This works well for this sample but this is where things will start to become difficult. One way to solve this maybe is to have a configuration table at the top: video_dst = video_dev;
prev = &video_dst;
#if DT_HAS_CHOSEN(zephyr_videoisp)
videoisp_src = *prev;
*prev = videoisp_dev;
prev = &videoisp_dst;
#endif
#if DT_HAS_CHOSEN(zephyr_videoscaler)
videoscaler_src = *prev;
*prev = videoscaler_dev;
prev = &videoscaler_dst;
#endif
#if DT_HAS_CHOSEN(zephyr_videoenc)
videoenc_src = *prev;
*prev = videoenc_dev;
prev = &videoenc_dst;
#endif
videousb_src = *prev;
*prev = videousb_dev; Then in each block, it is possible to just use Of course a component-based system is better, but this would be a different sample for libMP. :] There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is just for discussion and not necessarily to implement on this PR, thoough, as there is only one M2M device. |
||
LOG_DBG("Dequeued %p from %s, enqueueing to %s", | ||
(void *)vbuf_enc_in, video_dev->name, uvc_src_dev->name); | ||
|
||
vbuf_enc_in->type = VIDEO_BUF_TYPE_INPUT; | ||
|
||
ret = video_enqueue(uvc_src_dev, vbuf_enc_in); | ||
if (ret != 0) { | ||
LOG_ERR("Could not enqueue video buffer to %s", | ||
uvc_src_dev->name); | ||
return ret; | ||
} | ||
} | ||
#endif | ||
|
||
vbuf = &(struct video_buffer){.type = VIDEO_BUF_TYPE_OUTPUT}; | ||
|
||
if (video_dequeue(video_dev, &vbuf, K_NO_WAIT) == 0) { | ||
if (video_dequeue(uvc_src_dev, &vbuf, K_NO_WAIT) == 0) { | ||
LOG_DBG("Dequeued %p from %s, enqueueing to %s", | ||
(void *)vbuf, video_dev->name, uvc_dev->name); | ||
|
||
|
@@ -273,6 +412,24 @@ int main(void) | |
LOG_ERR("Could not enqueue video buffer to %s", uvc_dev->name); | ||
return ret; | ||
} | ||
|
||
#if DT_HAS_CHOSEN(zephyr_videoenc) | ||
vbuf_enc_in = &(struct video_buffer){.type = VIDEO_BUF_TYPE_INPUT}; | ||
|
||
if (video_dequeue(uvc_src_dev, &vbuf_enc_in, K_NO_WAIT) == 0) { | ||
LOG_DBG("Dequeued %p from %s, enqueueing to %s", | ||
(void *)vbuf_enc_in, uvc_src_dev->name, video_dev->name); | ||
|
||
vbuf_enc_in->type = VIDEO_BUF_TYPE_OUTPUT; | ||
|
||
ret = video_enqueue(video_dev, vbuf_enc_in); | ||
if (ret != 0) { | ||
LOG_ERR("Could not enqueue video buffer to %s", | ||
video_dev->name); | ||
return ret; | ||
} | ||
} | ||
#endif | ||
} | ||
|
||
vbuf = &(struct video_buffer){.type = VIDEO_BUF_TYPE_INPUT}; | ||
|
@@ -283,9 +440,10 @@ int main(void) | |
|
||
vbuf->type = VIDEO_BUF_TYPE_OUTPUT; | ||
|
||
ret = video_enqueue(video_dev, vbuf); | ||
ret = video_enqueue(uvc_src_dev, vbuf); | ||
if (ret != 0) { | ||
LOG_ERR("Could not enqueue video buffer to %s", video_dev->name); | ||
LOG_ERR("Could not enqueue video buffer to %s", | ||
uvc_src_dev->name); | ||
return ret; | ||
} | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How about using
videoenc_dev
for both here?Then if a
videoscaler_dev
is introduced, this becomes another#if DT_HAS_CHOSEN(zephyr_videoscaler)
that uses the associatedvideoscaler_dev
for both input/output?