@@ -68,12 +68,12 @@ VideoDecoder::VideoDecoder(const std::string& videoFilePath, SeekMode seekMode)
68
68
av_log_set_level (AV_LOG_QUIET);
69
69
70
70
AVFormatContext* rawContext = nullptr ;
71
- int ffmpegStatus =
71
+ int status =
72
72
avformat_open_input (&rawContext, videoFilePath.c_str (), nullptr , nullptr );
73
73
TORCH_CHECK (
74
- ffmpegStatus == 0 ,
74
+ status == 0 ,
75
75
" Could not open input file: " + videoFilePath + " " +
76
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
76
+ getFFMPEGErrorStringFromErrorCode (status ));
77
77
TORCH_CHECK (rawContext != nullptr );
78
78
formatContext_.reset (rawContext);
79
79
@@ -97,14 +97,13 @@ VideoDecoder::VideoDecoder(const void* data, size_t length, SeekMode seekMode)
97
97
TORCH_CHECK (rawContext != nullptr , " Unable to alloc avformat context" );
98
98
99
99
rawContext->pb = ioBytesContext_->getAVIO ();
100
- int ffmpegStatus =
101
- avformat_open_input (&rawContext, nullptr , nullptr , nullptr );
102
- if (ffmpegStatus != 0 ) {
100
+ int status = avformat_open_input (&rawContext, nullptr , nullptr , nullptr );
101
+ if (status != 0 ) {
103
102
avformat_free_context (rawContext);
104
103
TORCH_CHECK (
105
104
false ,
106
105
" Failed to open input buffer: " +
107
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
106
+ getFFMPEGErrorStringFromErrorCode (status ));
108
107
}
109
108
110
109
formatContext_.reset (rawContext);
@@ -132,11 +131,11 @@ void VideoDecoder::initializeDecoder() {
132
131
// store enough info in the header, so we call avformat_find_stream_info()
133
132
// which decodes a few frames to get missing info. For more, see:
134
133
// https://ffmpeg.org/doxygen/7.0/group__lavf__decoding.html
135
- int ffmpegStatus = avformat_find_stream_info (formatContext_.get (), nullptr );
136
- if (ffmpegStatus < 0 ) {
134
+ int status = avformat_find_stream_info (formatContext_.get (), nullptr );
135
+ if (status < 0 ) {
137
136
throw std::runtime_error (
138
137
" Failed to find stream info: " +
139
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
138
+ getFFMPEGErrorStringFromErrorCode (status ));
140
139
}
141
140
142
141
for (unsigned int i = 0 ; i < formatContext_->nb_streams ; i++) {
@@ -231,16 +230,16 @@ void VideoDecoder::scanFileAndUpdateMetadataAndIndex() {
231
230
ReferenceAVPacket packet (autoAVPacket);
232
231
233
232
// av_read_frame is a misleading name: it gets the next **packet**.
234
- int ffmpegStatus = av_read_frame (formatContext_.get (), packet.get ());
233
+ int status = av_read_frame (formatContext_.get (), packet.get ());
235
234
236
- if (ffmpegStatus == AVERROR_EOF) {
235
+ if (status == AVERROR_EOF) {
237
236
break ;
238
237
}
239
238
240
- if (ffmpegStatus != AVSUCCESS) {
239
+ if (status != AVSUCCESS) {
241
240
throw std::runtime_error (
242
241
" Failed to read frame from input file: " +
243
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
242
+ getFFMPEGErrorStringFromErrorCode (status ));
244
243
}
245
244
246
245
if (packet->flags & AV_PKT_FLAG_DISCARD) {
@@ -923,23 +922,23 @@ VideoDecoder::AVFrameStream VideoDecoder::decodeAVFrame(
923
922
// Need to get the next frame or error from PopFrame.
924
923
UniqueAVFrame avFrame (av_frame_alloc ());
925
924
AutoAVPacket autoAVPacket;
926
- int ffmpegStatus = AVSUCCESS;
925
+ int status = AVSUCCESS;
927
926
bool reachedEOF = false ;
928
927
while (true ) {
929
- ffmpegStatus =
928
+ status =
930
929
avcodec_receive_frame (streamInfo.codecContext .get (), avFrame.get ());
931
930
932
- if (ffmpegStatus != AVSUCCESS && ffmpegStatus != AVERROR (EAGAIN)) {
931
+ if (status != AVSUCCESS && status != AVERROR (EAGAIN)) {
933
932
// Non-retriable error
934
933
break ;
935
934
}
936
935
937
936
decodeStats_.numFramesReceivedByDecoder ++;
938
937
// Is this the kind of frame we're looking for?
939
- if (ffmpegStatus == AVSUCCESS && filterFunction (avFrame.get ())) {
938
+ if (status == AVSUCCESS && filterFunction (avFrame.get ())) {
940
939
// Yes, this is the frame we'll return; break out of the decoding loop.
941
940
break ;
942
- } else if (ffmpegStatus == AVSUCCESS) {
941
+ } else if (status == AVSUCCESS) {
943
942
// No, but we received a valid frame - just not the kind we're looking
944
943
// for. The logic below will read packets and send them to the decoder.
945
944
// But since we did just receive a frame, we should skip reading more
@@ -958,29 +957,29 @@ VideoDecoder::AVFrameStream VideoDecoder::decodeAVFrame(
958
957
// packets and send them to the decoder.
959
958
ReferenceAVPacket packet (autoAVPacket);
960
959
do {
961
- ffmpegStatus = av_read_frame (formatContext_.get (), packet.get ());
960
+ status = av_read_frame (formatContext_.get (), packet.get ());
962
961
decodeStats_.numPacketsRead ++;
963
962
964
- if (ffmpegStatus == AVERROR_EOF) {
963
+ if (status == AVERROR_EOF) {
965
964
// End of file reached. We must drain the codec by sending a nullptr
966
965
// packet.
967
- ffmpegStatus = avcodec_send_packet (
966
+ status = avcodec_send_packet (
968
967
streamInfo.codecContext .get (),
969
968
/* avpkt=*/ nullptr );
970
- if (ffmpegStatus < AVSUCCESS) {
969
+ if (status < AVSUCCESS) {
971
970
throw std::runtime_error (
972
971
" Could not flush decoder: " +
973
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
972
+ getFFMPEGErrorStringFromErrorCode (status ));
974
973
}
975
974
976
975
reachedEOF = true ;
977
976
break ;
978
977
}
979
978
980
- if (ffmpegStatus < AVSUCCESS) {
979
+ if (status < AVSUCCESS) {
981
980
throw std::runtime_error (
982
981
" Could not read frame from input file: " +
983
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
982
+ getFFMPEGErrorStringFromErrorCode (status ));
984
983
}
985
984
} while (packet->stream_index != activeStreamIndex_);
986
985
@@ -992,26 +991,25 @@ VideoDecoder::AVFrameStream VideoDecoder::decodeAVFrame(
992
991
993
992
// We got a valid packet. Send it to the decoder, and we'll receive it in
994
993
// the next iteration.
995
- ffmpegStatus =
996
- avcodec_send_packet (streamInfo.codecContext .get (), packet.get ());
997
- if (ffmpegStatus < AVSUCCESS) {
994
+ status = avcodec_send_packet (streamInfo.codecContext .get (), packet.get ());
995
+ if (status < AVSUCCESS) {
998
996
throw std::runtime_error (
999
997
" Could not push packet to decoder: " +
1000
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
998
+ getFFMPEGErrorStringFromErrorCode (status ));
1001
999
}
1002
1000
1003
1001
decodeStats_.numPacketsSentToDecoder ++;
1004
1002
}
1005
1003
1006
- if (ffmpegStatus < AVSUCCESS) {
1007
- if (reachedEOF || ffmpegStatus == AVERROR_EOF) {
1004
+ if (status < AVSUCCESS) {
1005
+ if (reachedEOF || status == AVERROR_EOF) {
1008
1006
throw VideoDecoder::EndOfFileException (
1009
1007
" Requested next frame while there are no more frames left to "
1010
1008
" decode." );
1011
1009
}
1012
1010
throw std::runtime_error (
1013
1011
" Could not receive frame from decoder: " +
1014
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
1012
+ getFFMPEGErrorStringFromErrorCode (status ));
1015
1013
}
1016
1014
1017
1015
// Note that we don't flush the decoder when we reach EOF (even though that's
@@ -1197,14 +1195,14 @@ torch::Tensor VideoDecoder::convertAVFrameToTensorUsingFilterGraph(
1197
1195
const AVFrame* avFrame) {
1198
1196
FilterGraphContext& filterGraphContext =
1199
1197
streamInfos_[activeStreamIndex_].filterGraphContext ;
1200
- int ffmpegStatus =
1198
+ int status =
1201
1199
av_buffersrc_write_frame (filterGraphContext.sourceContext , avFrame);
1202
- if (ffmpegStatus < AVSUCCESS) {
1200
+ if (status < AVSUCCESS) {
1203
1201
throw std::runtime_error (" Failed to add frame to buffer source context" );
1204
1202
}
1205
1203
1206
1204
UniqueAVFrame filteredAVFrame (av_frame_alloc ());
1207
- ffmpegStatus = av_buffersink_get_frame (
1205
+ status = av_buffersink_get_frame (
1208
1206
filterGraphContext.sinkContext , filteredAVFrame.get ());
1209
1207
TORCH_CHECK_EQ (filteredAVFrame->format , AV_PIX_FMT_RGB24);
1210
1208
@@ -1328,44 +1326,44 @@ void VideoDecoder::createFilterGraph(
1328
1326
filterArgs << " :pixel_aspect=" << codecContext->sample_aspect_ratio .num << " /"
1329
1327
<< codecContext->sample_aspect_ratio .den ;
1330
1328
1331
- int ffmpegStatus = avfilter_graph_create_filter (
1329
+ int status = avfilter_graph_create_filter (
1332
1330
&filterGraphContext.sourceContext ,
1333
1331
buffersrc,
1334
1332
" in" ,
1335
1333
filterArgs.str ().c_str (),
1336
1334
nullptr ,
1337
1335
filterGraphContext.filterGraph .get ());
1338
- if (ffmpegStatus < 0 ) {
1336
+ if (status < 0 ) {
1339
1337
throw std::runtime_error (
1340
1338
std::string (" Failed to create filter graph: " ) + filterArgs.str () +
1341
- " : " + getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
1339
+ " : " + getFFMPEGErrorStringFromErrorCode (status ));
1342
1340
}
1343
1341
1344
- ffmpegStatus = avfilter_graph_create_filter (
1342
+ status = avfilter_graph_create_filter (
1345
1343
&filterGraphContext.sinkContext ,
1346
1344
buffersink,
1347
1345
" out" ,
1348
1346
nullptr ,
1349
1347
nullptr ,
1350
1348
filterGraphContext.filterGraph .get ());
1351
- if (ffmpegStatus < 0 ) {
1349
+ if (status < 0 ) {
1352
1350
throw std::runtime_error (
1353
1351
" Failed to create filter graph: " +
1354
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
1352
+ getFFMPEGErrorStringFromErrorCode (status ));
1355
1353
}
1356
1354
1357
1355
enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE};
1358
1356
1359
- ffmpegStatus = av_opt_set_int_list (
1357
+ status = av_opt_set_int_list (
1360
1358
filterGraphContext.sinkContext ,
1361
1359
" pix_fmts" ,
1362
1360
pix_fmts,
1363
1361
AV_PIX_FMT_NONE,
1364
1362
AV_OPT_SEARCH_CHILDREN);
1365
- if (ffmpegStatus < 0 ) {
1363
+ if (status < 0 ) {
1366
1364
throw std::runtime_error (
1367
1365
" Failed to set output pixel formats: " +
1368
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
1366
+ getFFMPEGErrorStringFromErrorCode (status ));
1369
1367
}
1370
1368
1371
1369
UniqueAVFilterInOut outputs (avfilter_inout_alloc ());
@@ -1386,26 +1384,25 @@ void VideoDecoder::createFilterGraph(
1386
1384
1387
1385
AVFilterInOut* outputsTmp = outputs.release ();
1388
1386
AVFilterInOut* inputsTmp = inputs.release ();
1389
- ffmpegStatus = avfilter_graph_parse_ptr (
1387
+ status = avfilter_graph_parse_ptr (
1390
1388
filterGraphContext.filterGraph .get (),
1391
1389
description.str ().c_str (),
1392
1390
&inputsTmp,
1393
1391
&outputsTmp,
1394
1392
nullptr );
1395
1393
outputs.reset (outputsTmp);
1396
1394
inputs.reset (inputsTmp);
1397
- if (ffmpegStatus < 0 ) {
1395
+ if (status < 0 ) {
1398
1396
throw std::runtime_error (
1399
1397
" Failed to parse filter description: " +
1400
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
1398
+ getFFMPEGErrorStringFromErrorCode (status ));
1401
1399
}
1402
1400
1403
- ffmpegStatus =
1404
- avfilter_graph_config (filterGraphContext.filterGraph .get (), nullptr );
1405
- if (ffmpegStatus < 0 ) {
1401
+ status = avfilter_graph_config (filterGraphContext.filterGraph .get (), nullptr );
1402
+ if (status < 0 ) {
1406
1403
throw std::runtime_error (
1407
1404
" Failed to configure filter graph: " +
1408
- getFFMPEGErrorStringFromErrorCode (ffmpegStatus ));
1405
+ getFFMPEGErrorStringFromErrorCode (status ));
1409
1406
}
1410
1407
}
1411
1408
0 commit comments