Skip to content

Commit 3950942

Browse files
committed
Revert status name change
1 parent bd44ce3 commit 3950942

File tree

1 file changed

+52
-49
lines changed

1 file changed

+52
-49
lines changed

src/torchcodec/decoders/_core/VideoDecoder.cpp

Lines changed: 52 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -68,12 +68,12 @@ VideoDecoder::VideoDecoder(const std::string& videoFilePath, SeekMode seekMode)
6868
av_log_set_level(AV_LOG_QUIET);
6969

7070
AVFormatContext* rawContext = nullptr;
71-
int status =
71+
int ffmpegStatus =
7272
avformat_open_input(&rawContext, videoFilePath.c_str(), nullptr, nullptr);
7373
TORCH_CHECK(
74-
status == 0,
74+
ffmpegStatus == 0,
7575
"Could not open input file: " + videoFilePath + " " +
76-
getFFMPEGErrorStringFromErrorCode(status));
76+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
7777
TORCH_CHECK(rawContext != nullptr);
7878
formatContext_.reset(rawContext);
7979

@@ -97,13 +97,14 @@ VideoDecoder::VideoDecoder(const void* data, size_t length, SeekMode seekMode)
9797
TORCH_CHECK(rawContext != nullptr, "Unable to alloc avformat context");
9898

9999
rawContext->pb = ioBytesContext_->getAVIO();
100-
int status = avformat_open_input(&rawContext, nullptr, nullptr, nullptr);
101-
if (status != 0) {
100+
int ffmpegStatus =
101+
avformat_open_input(&rawContext, nullptr, nullptr, nullptr);
102+
if (ffmpegStatus != 0) {
102103
avformat_free_context(rawContext);
103104
TORCH_CHECK(
104105
false,
105106
"Failed to open input buffer: " +
106-
getFFMPEGErrorStringFromErrorCode(status));
107+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
107108
}
108109

109110
formatContext_.reset(rawContext);
@@ -131,11 +132,11 @@ void VideoDecoder::initializeDecoder() {
131132
// store enough info in the header, so we call avformat_find_stream_info()
132133
// which decodes a few frames to get missing info. For more, see:
133134
// https://ffmpeg.org/doxygen/7.0/group__lavf__decoding.html
134-
int status = avformat_find_stream_info(formatContext_.get(), nullptr);
135-
if (status < 0) {
135+
int ffmpegStatus = avformat_find_stream_info(formatContext_.get(), nullptr);
136+
if (ffmpegStatus < 0) {
136137
throw std::runtime_error(
137138
"Failed to find stream info: " +
138-
getFFMPEGErrorStringFromErrorCode(status));
139+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
139140
}
140141

141142
for (unsigned int i = 0; i < formatContext_->nb_streams; i++) {
@@ -230,16 +231,16 @@ void VideoDecoder::scanFileAndUpdateMetadataAndIndex() {
230231
ReferenceAVPacket packet(autoAVPacket);
231232

232233
// av_read_frame is a misleading name: it gets the next **packet**.
233-
int status = av_read_frame(formatContext_.get(), packet.get());
234+
int ffmpegStatus = av_read_frame(formatContext_.get(), packet.get());
234235

235-
if (status == AVERROR_EOF) {
236+
if (ffmpegStatus == AVERROR_EOF) {
236237
break;
237238
}
238239

239-
if (status != AVSUCCESS) {
240+
if (ffmpegStatus != AVSUCCESS) {
240241
throw std::runtime_error(
241242
"Failed to read frame from input file: " +
242-
getFFMPEGErrorStringFromErrorCode(status));
243+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
243244
}
244245

245246
if (packet->flags & AV_PKT_FLAG_DISCARD) {
@@ -922,23 +923,23 @@ VideoDecoder::AVFrameStream VideoDecoder::decodeAVFrame(
922923
// Need to get the next frame or error from PopFrame.
923924
UniqueAVFrame avFrame(av_frame_alloc());
924925
AutoAVPacket autoAVPacket;
925-
int status = AVSUCCESS;
926+
int ffmpegStatus = AVSUCCESS;
926927
bool reachedEOF = false;
927928
while (true) {
928-
status =
929+
ffmpegStatus =
929930
avcodec_receive_frame(streamInfo.codecContext.get(), avFrame.get());
930931

931-
if (status != AVSUCCESS && status != AVERROR(EAGAIN)) {
932+
if (ffmpegStatus != AVSUCCESS && ffmpegStatus != AVERROR(EAGAIN)) {
932933
// Non-retriable error
933934
break;
934935
}
935936

936937
decodeStats_.numFramesReceivedByDecoder++;
937938
// Is this the kind of frame we're looking for?
938-
if (status == AVSUCCESS && filterFunction(avFrame.get())) {
939+
if (ffmpegStatus == AVSUCCESS && filterFunction(avFrame.get())) {
939940
// Yes, this is the frame we'll return; break out of the decoding loop.
940941
break;
941-
} else if (status == AVSUCCESS) {
942+
} else if (ffmpegStatus == AVSUCCESS) {
942943
// No, but we received a valid frame - just not the kind we're looking
943944
// for. The logic below will read packets and send them to the decoder.
944945
// But since we did just receive a frame, we should skip reading more
@@ -957,29 +958,29 @@ VideoDecoder::AVFrameStream VideoDecoder::decodeAVFrame(
957958
// packets and send them to the decoder.
958959
ReferenceAVPacket packet(autoAVPacket);
959960
do {
960-
status = av_read_frame(formatContext_.get(), packet.get());
961+
ffmpegStatus = av_read_frame(formatContext_.get(), packet.get());
961962
decodeStats_.numPacketsRead++;
962963

963-
if (status == AVERROR_EOF) {
964+
if (ffmpegStatus == AVERROR_EOF) {
964965
// End of file reached. We must drain the codec by sending a nullptr
965966
// packet.
966-
status = avcodec_send_packet(
967+
ffmpegStatus = avcodec_send_packet(
967968
streamInfo.codecContext.get(),
968969
/*avpkt=*/nullptr);
969-
if (status < AVSUCCESS) {
970+
if (ffmpegStatus < AVSUCCESS) {
970971
throw std::runtime_error(
971972
"Could not flush decoder: " +
972-
getFFMPEGErrorStringFromErrorCode(status));
973+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
973974
}
974975

975976
reachedEOF = true;
976977
break;
977978
}
978979

979-
if (status < AVSUCCESS) {
980+
if (ffmpegStatus < AVSUCCESS) {
980981
throw std::runtime_error(
981982
"Could not read frame from input file: " +
982-
getFFMPEGErrorStringFromErrorCode(status));
983+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
983984
}
984985
} while (packet->stream_index != activeStreamIndex_);
985986

@@ -991,25 +992,26 @@ VideoDecoder::AVFrameStream VideoDecoder::decodeAVFrame(
991992

992993
// We got a valid packet. Send it to the decoder, and we'll receive it in
993994
// the next iteration.
994-
status = avcodec_send_packet(streamInfo.codecContext.get(), packet.get());
995-
if (status < AVSUCCESS) {
995+
ffmpegStatus =
996+
avcodec_send_packet(streamInfo.codecContext.get(), packet.get());
997+
if (ffmpegStatus < AVSUCCESS) {
996998
throw std::runtime_error(
997999
"Could not push packet to decoder: " +
998-
getFFMPEGErrorStringFromErrorCode(status));
1000+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
9991001
}
10001002

10011003
decodeStats_.numPacketsSentToDecoder++;
10021004
}
10031005

1004-
if (status < AVSUCCESS) {
1005-
if (reachedEOF || status == AVERROR_EOF) {
1006+
if (ffmpegStatus < AVSUCCESS) {
1007+
if (reachedEOF || ffmpegStatus == AVERROR_EOF) {
10061008
throw VideoDecoder::EndOfFileException(
10071009
"Requested next frame while there are no more frames left to "
10081010
"decode.");
10091011
}
10101012
throw std::runtime_error(
10111013
"Could not receive frame from decoder: " +
1012-
getFFMPEGErrorStringFromErrorCode(status));
1014+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
10131015
}
10141016

10151017
// Note that we don't flush the decoder when we reach EOF (even though that's
@@ -1195,14 +1197,14 @@ torch::Tensor VideoDecoder::convertAVFrameToTensorUsingFilterGraph(
11951197
const AVFrame* avFrame) {
11961198
FilterGraphContext& filterGraphContext =
11971199
streamInfos_[activeStreamIndex_].filterGraphContext;
1198-
int status =
1200+
int ffmpegStatus =
11991201
av_buffersrc_write_frame(filterGraphContext.sourceContext, avFrame);
1200-
if (status < AVSUCCESS) {
1202+
if (ffmpegStatus < AVSUCCESS) {
12011203
throw std::runtime_error("Failed to add frame to buffer source context");
12021204
}
12031205

12041206
UniqueAVFrame filteredAVFrame(av_frame_alloc());
1205-
status = av_buffersink_get_frame(
1207+
ffmpegStatus = av_buffersink_get_frame(
12061208
filterGraphContext.sinkContext, filteredAVFrame.get());
12071209
TORCH_CHECK_EQ(filteredAVFrame->format, AV_PIX_FMT_RGB24);
12081210

@@ -1326,44 +1328,44 @@ void VideoDecoder::createFilterGraph(
13261328
filterArgs << ":pixel_aspect=" << codecContext->sample_aspect_ratio.num << "/"
13271329
<< codecContext->sample_aspect_ratio.den;
13281330

1329-
int status = avfilter_graph_create_filter(
1331+
int ffmpegStatus = avfilter_graph_create_filter(
13301332
&filterGraphContext.sourceContext,
13311333
buffersrc,
13321334
"in",
13331335
filterArgs.str().c_str(),
13341336
nullptr,
13351337
filterGraphContext.filterGraph.get());
1336-
if (status < 0) {
1338+
if (ffmpegStatus < 0) {
13371339
throw std::runtime_error(
13381340
std::string("Failed to create filter graph: ") + filterArgs.str() +
1339-
": " + getFFMPEGErrorStringFromErrorCode(status));
1341+
": " + getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
13401342
}
13411343

1342-
status = avfilter_graph_create_filter(
1344+
ffmpegStatus = avfilter_graph_create_filter(
13431345
&filterGraphContext.sinkContext,
13441346
buffersink,
13451347
"out",
13461348
nullptr,
13471349
nullptr,
13481350
filterGraphContext.filterGraph.get());
1349-
if (status < 0) {
1351+
if (ffmpegStatus < 0) {
13501352
throw std::runtime_error(
13511353
"Failed to create filter graph: " +
1352-
getFFMPEGErrorStringFromErrorCode(status));
1354+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
13531355
}
13541356

13551357
enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE};
13561358

1357-
status = av_opt_set_int_list(
1359+
ffmpegStatus = av_opt_set_int_list(
13581360
filterGraphContext.sinkContext,
13591361
"pix_fmts",
13601362
pix_fmts,
13611363
AV_PIX_FMT_NONE,
13621364
AV_OPT_SEARCH_CHILDREN);
1363-
if (status < 0) {
1365+
if (ffmpegStatus < 0) {
13641366
throw std::runtime_error(
13651367
"Failed to set output pixel formats: " +
1366-
getFFMPEGErrorStringFromErrorCode(status));
1368+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
13671369
}
13681370

13691371
UniqueAVFilterInOut outputs(avfilter_inout_alloc());
@@ -1384,25 +1386,26 @@ void VideoDecoder::createFilterGraph(
13841386

13851387
AVFilterInOut* outputsTmp = outputs.release();
13861388
AVFilterInOut* inputsTmp = inputs.release();
1387-
status = avfilter_graph_parse_ptr(
1389+
ffmpegStatus = avfilter_graph_parse_ptr(
13881390
filterGraphContext.filterGraph.get(),
13891391
description.str().c_str(),
13901392
&inputsTmp,
13911393
&outputsTmp,
13921394
nullptr);
13931395
outputs.reset(outputsTmp);
13941396
inputs.reset(inputsTmp);
1395-
if (status < 0) {
1397+
if (ffmpegStatus < 0) {
13961398
throw std::runtime_error(
13971399
"Failed to parse filter description: " +
1398-
getFFMPEGErrorStringFromErrorCode(status));
1400+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
13991401
}
14001402

1401-
status = avfilter_graph_config(filterGraphContext.filterGraph.get(), nullptr);
1402-
if (status < 0) {
1403+
ffmpegStatus =
1404+
avfilter_graph_config(filterGraphContext.filterGraph.get(), nullptr);
1405+
if (ffmpegStatus < 0) {
14031406
throw std::runtime_error(
14041407
"Failed to configure filter graph: " +
1405-
getFFMPEGErrorStringFromErrorCode(status));
1408+
getFFMPEGErrorStringFromErrorCode(ffmpegStatus));
14061409
}
14071410
}
14081411

0 commit comments

Comments
 (0)