@@ -62,7 +62,9 @@ FFmpegDecoderVideo::~FFmpegDecoderVideo()
6262void FFmpegDecoderVideo::open (AVStream * const stream)
6363{
6464 m_stream = stream;
65- m_context = stream->codec ;
65+ m_codecpar = stream->codecpar ;
66+ const AVCodec* p_codec = avcodec_find_decoder (m_codecpar->codec_id );
67+ m_context = avcodec_alloc_context3 (p_codec);
6668
6769 // Trust the video size given at this point
6870 // (avcodec_open seems to sometimes return a 0x0 size)
@@ -99,11 +101,12 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
99101
100102 // Allocate converted RGB frame
101103 m_frame_rgba.reset (av_frame_alloc ());
102- m_buffer_rgba[0 ].resize (avpicture_get_size (AV_PIX_FMT_RGB24, width (), height ()));
104+ m_buffer_rgba[0 ].resize (av_image_get_buffer_size (AV_PIX_FMT_RGB24, width (), height (), 1 ));
103105 m_buffer_rgba[1 ].resize (m_buffer_rgba[0 ].size ());
104106
105107 // Assign appropriate parts of the buffer to image planes in m_frame_rgba
106- avpicture_fill ((AVPicture *) (m_frame_rgba).get (), &(m_buffer_rgba[0 ])[0 ], AV_PIX_FMT_RGB24, width (), height ());
108+ AVFrame *avf = m_frame_rgba.get ();
109+ av_image_fill_arrays (avf->data , avf->linesize , &(m_buffer_rgba[0 ])[0 ], AV_PIX_FMT_RGB24, width (), height (), 1 );
107110
108111 // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
109112 m_context->opaque = this ;
@@ -169,7 +172,7 @@ void FFmpegDecoderVideo::decodeLoop()
169172 int frame_finished = 0 ;
170173
171174 // We want to use the entire packet since some codecs will require extra information for decoding
172- const int bytes_decoded = avcodec_decode_video2 (m_context, m_frame.get (), &frame_finished, &(packet. packet ));
175+ const int bytes_decoded = avcodec_receive_frame (m_context, m_frame.get ());
173176
174177 if (bytes_decoded < 0 )
175178 throw std::runtime_error (" avcodec_decode_video failed()" );
@@ -283,7 +286,7 @@ void FFmpegDecoderVideo::findAspectRatio()
283286 m_pixel_aspect_ratio = ratio;
284287}
285288
286- int FFmpegDecoderVideo::convert (AVPicture *dst, int dst_pix_fmt, AVPicture *src,
289+ int FFmpegDecoderVideo::convert (AVFrame *dst, int dst_pix_fmt, AVFrame *src,
287290 int src_pix_fmt, int src_width, int src_height)
288291{
289292 osg::Timer_t startTick = osg::Timer::instance ()->tick ();
@@ -334,11 +337,11 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
334337 return;
335338 #endif
336339
337- AVPicture * const src = (AVPicture *) m_frame.get ();
338- AVPicture * const dst = (AVPicture *) m_frame_rgba.get ();
340+ AVFrame * const src = (AVFrame *) m_frame.get ();
341+ AVFrame * const dst = (AVFrame *) m_frame_rgba.get ();
339342
340- // Assign appropriate parts of the buffer to image planes in m_frame_rgba
341- avpicture_fill ((AVPicture *) (m_frame_rgba). get () , &(m_buffer_rgba[m_writeBuffer])[0 ], AV_PIX_FMT_RGB24, width (), height ());
343+ // Assign appropriate parts of the buffer to image planes in m_frame_rgba
344+ av_image_fill_arrays (dst-> data , dst-> linesize , &(m_buffer_rgba[m_writeBuffer])[0 ], AV_PIX_FMT_RGB24, width (), height (), 1 );
342345
343346 // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
344347
@@ -370,7 +373,7 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
370373
371374
372375
373- void FFmpegDecoderVideo::yuva420pToRgba (AVPicture * const dst, AVPicture * const src, int width, int height)
376+ void FFmpegDecoderVideo::yuva420pToRgba (AVFrame * const dst, AVFrame * const src, int width, int height)
374377{
375378 convert (dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt , width, height);
376379
0 commit comments