You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
gentoo-overlay/dev-games/openscenegraph/files/openscenegraph-3.5.1-ffmpeg...

174 lines
7.5 KiB

From e85d5743341585c6e6eb1ac693884f80e1fa06ce Mon Sep 17 00:00:00 2001
From: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
Date: Wed, 1 Jun 2016 10:32:35 +0100
Subject: [PATCH] Replace deprecated FFmpeg API to fix build with ffmpeg-3.0.x
---
src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp | 3 +-
src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp | 48 +++++++++++++---------------
src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp | 4 +--
src/osgPlugins/ffmpeg/FFmpegParameters.cpp | 2 +-
4 files changed, 26 insertions(+), 31 deletions(-)
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp
index 665c68f..636bddd 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp
@@ -227,8 +227,7 @@ printf("### CONVERTING from sample format %s TO %s\n\t\tFROM %d TO %d channels\n
if (avcodec_open2(m_context, p_codec, NULL) < 0)
throw std::runtime_error("avcodec_open() failed");
- m_context->get_buffer = avcodec_default_get_buffer;
- m_context->release_buffer = avcodec_default_release_buffer;
+ m_context->get_buffer2 = avcodec_default_get_buffer2;
}
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
index 9375657..083d3db 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
@@ -71,7 +71,7 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
findAspectRatio();
// Find out whether we support Alpha channel
- m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
+ m_alpha_channel = (m_context->pix_fmt == AV_PIX_FMT_YUVA420P);
// Find out the framerate
#if LIBAVCODEC_VERSION_MAJOR >= 56
@@ -95,20 +95,19 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
throw std::runtime_error("avcodec_open() failed");
// Allocate video frame
- m_frame.reset(avcodec_alloc_frame());
+ m_frame.reset(av_frame_alloc());
// Allocate converted RGB frame
- m_frame_rgba.reset(avcodec_alloc_frame());
- m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB24, width(), height()));
+ m_frame_rgba.reset(av_frame_alloc());
+ m_buffer_rgba[0].resize(avpicture_get_size(AV_PIX_FMT_RGB24, width(), height()));
m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB24, width(), height());
+ avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], AV_PIX_FMT_RGB24, width(), height());
// Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
m_context->opaque = this;
- m_context->get_buffer = getBuffer;
- m_context->release_buffer = releaseBuffer;
+ m_context->get_buffer2 = getBuffer;
}
@@ -267,8 +266,8 @@ int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
#ifdef USE_SWSCALE
if (m_swscale_ctx==0)
{
- m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
- src_width, src_height, (PixelFormat) dst_pix_fmt,
+ m_swscale_ctx = sws_getContext(src_width, src_height, (AVPixelFormat) src_pix_fmt,
+ src_width, src_height, (AVPixelFormat) dst_pix_fmt,
/*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
}
@@ -315,14 +314,14 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB24, width(), height());
+ avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], AV_PIX_FMT_RGB24, width(), height());
// Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
- if (m_context->pix_fmt == PIX_FMT_YUVA420P)
+ if (m_context->pix_fmt == AV_PIX_FMT_YUVA420P)
yuva420pToRgba(dst, src, width(), height());
else
- convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
+ convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
// Wait 'delay' seconds before publishing the picture.
int i_delay = static_cast<int>(delay * 1000000 + 0.5);
@@ -349,7 +348,7 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
{
- convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
+ convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
const size_t bpp = 4;
@@ -367,31 +366,28 @@ void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const
}
}
-
-
-int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
+int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture, int flags)
{
+ AVBufferRef *ref;
const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
- const int result = avcodec_default_get_buffer(context, picture);
+ const int result = avcodec_default_get_buffer2(context, picture, flags);
int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
*p_pts = this_->m_packet_pts;
picture->opaque = p_pts;
+ ref = av_buffer_create((uint8_t *)picture->opaque, sizeof(int64_t), FFmpegDecoderVideo::freeBuffer, picture->buf[0], flags);
+ picture->buf[0] = ref;
+
return result;
}
-
-
-void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
+void FFmpegDecoderVideo::freeBuffer(void *opaque, uint8_t *data)
{
- if (picture != 0)
- av_freep(&picture->opaque);
-
- avcodec_default_release_buffer(context, picture);
+ AVBufferRef *ref = (AVBufferRef *)opaque;
+ av_buffer_unref(&ref);
+ av_free(data);
}
-
-
} // namespace osgFFmpeg
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
index 7883b17..778c1a9 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
@@ -94,8 +94,8 @@ class FFmpegDecoderVideo : public OpenThreads::Thread
int src_pix_fmt, int src_width, int src_height);
- static int getBuffer(AVCodecContext * context, AVFrame * picture);
- static void releaseBuffer(AVCodecContext * context, AVFrame * picture);
+ static int getBuffer(AVCodecContext * context, AVFrame * picture, int flags);
+ static void freeBuffer(void * opaque, uint8_t *data);
PacketQueue & m_packets;
FFmpegClocks & m_clocks;
diff --git a/src/osgPlugins/ffmpeg/FFmpegParameters.cpp b/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
index 288e440..5915ab8 100644
--- a/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
@@ -19,7 +19,7 @@ extern "C"
#include <libavutil/pixdesc.h>
}
-inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
+inline AVPixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
namespace osgFFmpeg {