@@ -901,7 +901,7 @@ void VideoDecoder::convertAVFrameToDecodedOutputOnCPU(
901901 int height = 0 ;
902902 int width = 0 ;
903903 std::tie (height, width) =
904- getHeightAndWidthFromOptionsOrAVFrame (streamInfo.options , frame);
904+ getHeightAndWidthFromOptionsOrAVFrame (streamInfo.options , * frame);
905905 if (preAllocatedOutputTensor.has_value ()) {
906906 tensor = preAllocatedOutputTensor.value ();
907907 auto shape = tensor.sizes ();
@@ -1318,7 +1318,7 @@ void VideoDecoder::convertFrameToBufferUsingSwsScale(
13181318 int outputHeight = 0 ;
13191319 int outputWidth = 0 ;
13201320 std::tie (outputHeight, outputWidth) =
1321- getHeightAndWidthFromOptionsOrAVFrame (activeStream.options , frame);
1321+ getHeightAndWidthFromOptionsOrAVFrame (activeStream.options , * frame);
13221322 if (activeStream.swsContext .get () == nullptr ) {
13231323 SwsContext* swsContext = sws_getContext (
13241324 frame->width ,
@@ -1387,7 +1387,7 @@ torch::Tensor VideoDecoder::convertFrameToTensorUsingFilterGraph(
13871387 int height = 0 ;
13881388 int width = 0 ;
13891389 std::tie (height, width) = getHeightAndWidthFromOptionsOrAVFrame (
1390- streams_[streamIndex].options , filteredFrame.get ());
1390+ streams_[streamIndex].options , * filteredFrame.get ());
13911391 std::vector<int64_t > shape = {height, width, 3 };
13921392
13931393 std::vector<int64_t > strides = {filteredFrame->linesize [0 ], 3 , 1 };
@@ -1423,10 +1423,10 @@ std::tuple<int, int> getHeightAndWidthFromOptionsOrMetadata(
14231423
14241424std::tuple<int , int > getHeightAndWidthFromOptionsOrAVFrame (
14251425 const VideoDecoder::VideoStreamDecoderOptions& options,
1426- AVFrame* avFrame) {
1426+ const AVFrame& avFrame) {
14271427 return std::make_tuple (
1428- options.height .value_or (avFrame-> height ),
1429- options.width .value_or (avFrame-> width ));
1428+ options.height .value_or (avFrame. height ),
1429+ options.width .value_or (avFrame. width ));
14301430}
14311431
14321432torch::Tensor allocateEmptyHWCTensor (
0 commit comments