1
0
mirror of https://github.com/cookiengineer/audacity synced 2025-07-19 22:27:43 +02:00

Fix resource leaks in the usage of the FFMPEG library with RAII objects...

... This includes failure paths in the initialization if import.  Those
resources would have been reclaimed before program exit, but not as soon as
they should have been.

... This also includes certain leaks that would happen every time a file is
successfully imported or exported.  We never used avformat_free_context or
av_dict_free as we should have!

... There were also AVPacket objects repeatedly reinitialized without proper
cleanups in between.  That might have leaked memory too.
This commit is contained in:
Paul Licameli 2016-04-13 04:50:23 -04:00
parent 2207ebeb7d
commit 24df87bb4c
5 changed files with 317 additions and 211 deletions

@ -304,8 +304,11 @@ FFmpegContext::~FFmpegContext()
if (pb) { if (pb) {
ufile_close(pb); ufile_close(pb);
av_free(pb->buffer); if (FFmpegLibsInst->ValidLibsLoaded())
av_free(pb); {
av_free(pb->buffer);
av_free(pb);
}
} }
} }
@ -314,14 +317,14 @@ streamContext *import_ffmpeg_read_next_frame(AVFormatContext* formatContext,
unsigned int numStreams) unsigned int numStreams)
{ {
streamContext *sc = NULL; streamContext *sc = NULL;
AVPacket pkt; AVPacketEx pkt;
if (av_read_frame(formatContext,&pkt) < 0) if (av_read_frame(formatContext, &pkt) < 0)
{ {
return NULL; return NULL;
} }
// Find a stream to which this frame belongs to // Find a stream to which this frame belongs
for (unsigned int i = 0; i < numStreams; i++) for (unsigned int i = 0; i < numStreams; i++)
{ {
if (streams[i]->m_stream->index == pkt.stream_index) if (streams[i]->m_stream->index == pkt.stream_index)
@ -332,16 +335,14 @@ streamContext *import_ffmpeg_read_next_frame(AVFormatContext* formatContext,
// When not all streams are selected for import this will happen very often. // When not all streams are selected for import this will happen very often.
if (sc == NULL) if (sc == NULL)
{ {
av_free_packet(&pkt);
return (streamContext*)1; return (streamContext*)1;
} }
// Copy the frame to the stream context // Copy the frame to the stream context
memcpy(&sc->m_pkt, &pkt, sizeof(AVPacket)); sc->m_pkt.create(std::move(pkt));
sc->m_pktValid = 1; sc->m_pktDataPtr = sc->m_pkt->data;
sc->m_pktDataPtr = pkt.data; sc->m_pktRemainingSiz = sc->m_pkt->size;
sc->m_pktRemainingSiz = pkt.size;
return sc; return sc;
} }
@ -362,24 +363,23 @@ int import_ffmpeg_decode_frame(streamContext *sc, bool flushing)
} }
else else
{ {
if (!sc->m_pktValid || (sc->m_pktRemainingSiz <= 0)) if (!sc->m_pkt || (sc->m_pktRemainingSiz <= 0))
{ {
//No more data //No more data
return -1; return -1;
} }
} }
AVPacket avpkt; AVPacketEx avpkt;
av_init_packet(&avpkt);
avpkt.data = pDecode; avpkt.data = pDecode;
avpkt.size = nDecodeSiz; avpkt.size = nDecodeSiz;
AVFrame *frame = av_frame_alloc(); AVFrameHolder frame{ av_frame_alloc() };
int got_output = 0; int got_output = 0;
nBytesDecoded = nBytesDecoded =
avcodec_decode_audio4(sc->m_codecCtx, avcodec_decode_audio4(sc->m_codecCtx,
frame, // out frame.get(), // out
&got_output, // out &got_output, // out
&avpkt); // in &avpkt); // in
@ -398,15 +398,11 @@ int import_ffmpeg_decode_frame(streamContext *sc, bool flushing)
// Reallocate the audio sample buffer if it's smaller than the frame size. // Reallocate the audio sample buffer if it's smaller than the frame size.
if (newsize > sc->m_decodedAudioSamplesSiz ) if (newsize > sc->m_decodedAudioSamplesSiz )
{ {
if (sc->m_decodedAudioSamples) // Reallocate a bigger buffer. But av_realloc is NOT compatible with the returns of av_malloc!
{ // So do this:
av_free(sc->m_decodedAudioSamples); sc->m_decodedAudioSamples.reset(static_cast<uint8_t *>(av_malloc(newsize)));
}
sc->m_decodedAudioSamples = (uint8_t *) av_malloc(newsize);
sc->m_decodedAudioSamplesSiz = newsize; sc->m_decodedAudioSamplesSiz = newsize;
if (!sc->m_decodedAudioSamples)
if (sc->m_decodedAudioSamples == NULL)
{ {
//Can't allocate bytes //Can't allocate bytes
return -1; return -1;
@ -415,17 +411,15 @@ int import_ffmpeg_decode_frame(streamContext *sc, bool flushing)
if (frame->data[1]) { if (frame->data[1]) {
for (int i = 0; i<frame->nb_samples; i++) { for (int i = 0; i<frame->nb_samples; i++) {
for (int ch = 0; ch<channels; ch++) { for (int ch = 0; ch<channels; ch++) {
memcpy(sc->m_decodedAudioSamples + sc->m_samplesize * (ch + channels*i), memcpy(sc->m_decodedAudioSamples.get() + sc->m_samplesize * (ch + channels*i),
frame->extended_data[ch] + sc->m_samplesize*i, frame->extended_data[ch] + sc->m_samplesize*i,
sc->m_samplesize); sc->m_samplesize);
} }
} }
} else { } else {
memcpy(sc->m_decodedAudioSamples, frame->data[0], newsize); memcpy(sc->m_decodedAudioSamples.get(), frame->data[0], newsize);
} }
av_frame_free(&frame);
// We may not have read all of the data from this packet. If so, the user can call again. // We may not have read all of the data from this packet. If so, the user can call again.
// Whether or not they do depends on if m_pktRemainingSiz == 0 (they can check). // Whether or not they do depends on if m_pktRemainingSiz == 0 (they can check).
sc->m_pktDataPtr += nBytesDecoded; sc->m_pktDataPtr += nBytesDecoded;
@ -889,6 +883,7 @@ bool FFmpegLibs::InitLibs(const wxString &libpath_format, bool WXUNUSED(showerr)
FFMPEG_INITDYN(avformat, avio_size); FFMPEG_INITDYN(avformat, avio_size);
FFMPEG_INITDYN(avformat, avio_alloc_context); FFMPEG_INITDYN(avformat, avio_alloc_context);
FFMPEG_INITALT(avformat, av_guess_format, avformat, guess_format); FFMPEG_INITALT(avformat, av_guess_format, avformat, guess_format);
FFMPEG_INITDYN(avformat, avformat_free_context);
FFMPEG_INITDYN(avcodec, av_init_packet); FFMPEG_INITDYN(avcodec, av_init_packet);
FFMPEG_INITDYN(avcodec, av_free_packet); FFMPEG_INITDYN(avcodec, av_free_packet);
@ -906,6 +901,7 @@ bool FFmpegLibs::InitLibs(const wxString &libpath_format, bool WXUNUSED(showerr)
FFMPEG_INITDYN(avcodec, avcodec_fill_audio_frame); FFMPEG_INITDYN(avcodec, avcodec_fill_audio_frame);
FFMPEG_INITDYN(avutil, av_free); FFMPEG_INITDYN(avutil, av_free);
FFMPEG_INITDYN(avutil, av_dict_free);
FFMPEG_INITDYN(avutil, av_dict_get); FFMPEG_INITDYN(avutil, av_dict_get);
FFMPEG_INITDYN(avutil, av_dict_set); FFMPEG_INITDYN(avutil, av_dict_set);
FFMPEG_INITDYN(avutil, av_get_bytes_per_sample); FFMPEG_INITDYN(avutil, av_get_bytes_per_sample);
@ -918,7 +914,7 @@ bool FFmpegLibs::InitLibs(const wxString &libpath_format, bool WXUNUSED(showerr)
FFMPEG_INITDYN(avutil, av_fifo_size); FFMPEG_INITDYN(avutil, av_fifo_size);
FFMPEG_INITDYN(avutil, av_malloc); FFMPEG_INITDYN(avutil, av_malloc);
FFMPEG_INITDYN(avutil, av_fifo_generic_write); FFMPEG_INITDYN(avutil, av_fifo_generic_write);
FFMPEG_INITDYN(avutil, av_freep); // FFMPEG_INITDYN(avutil, av_freep);
FFMPEG_INITDYN(avutil, av_rescale_q); FFMPEG_INITDYN(avutil, av_rescale_q);
FFMPEG_INITDYN(avutil, avutil_version); FFMPEG_INITDYN(avutil, avutil_version);
FFMPEG_INITALT(avutil, av_frame_alloc, avcodec, avcodec_alloc_frame); FFMPEG_INITALT(avutil, av_frame_alloc, avcodec, avcodec_alloc_frame);

@ -710,6 +710,11 @@ extern "C" {
(const char *short_name, const char *filename, const char *mime_type), (const char *short_name, const char *filename, const char *mime_type),
(short_name, filename, mime_type) (short_name, filename, mime_type)
); );
FFMPEG_FUNCTION_NO_RETURN(
avformat_free_context,
(AVFormatContext *s),
(s)
);
FFMPEG_FUNCTION_WITH_RETURN( FFMPEG_FUNCTION_WITH_RETURN(
int, int,
av_write_trailer, av_write_trailer,
@ -750,11 +755,13 @@ extern "C" {
(size_t size), (size_t size),
(size) (size)
); );
/*
FFMPEG_FUNCTION_NO_RETURN( FFMPEG_FUNCTION_NO_RETURN(
av_freep, av_freep,
(void *ptr), (void *ptr),
(ptr) (ptr)
); );
*/
FFMPEG_FUNCTION_WITH_RETURN( FFMPEG_FUNCTION_WITH_RETURN(
int64_t, int64_t,
av_rescale_q, av_rescale_q,
@ -784,6 +791,11 @@ extern "C" {
(AVFifoBuffer *f, unsigned int size), (AVFifoBuffer *f, unsigned int size),
(f, size) (f, size)
); );
FFMPEG_FUNCTION_NO_RETURN(
av_dict_free,
(AVDictionary **m),
(m)
);
FFMPEG_FUNCTION_WITH_RETURN( FFMPEG_FUNCTION_WITH_RETURN(
AVDictionaryEntry *, AVDictionaryEntry *,
av_dict_get, av_dict_get,
@ -842,37 +854,121 @@ extern "C" {
#endif #endif
// Attach some C++ lifetime management to the struct, which owns some memory resources.
struct AVPacketEx : public AVPacket
{
AVPacketEx()
{
av_init_packet(this);
data = nullptr;
size = 0;
}
AVPacketEx(const AVPacketEx &) PROHIBITED;
AVPacketEx& operator= (const AVPacketEx&) PROHIBITED;
AVPacketEx(AVPacketEx &&that)
{
steal(std::move(that));
}
AVPacketEx &operator= (AVPacketEx &&that)
{
if (this != &that) {
reset();
steal(std::move(that));
}
return *this;
}
~AVPacketEx ()
{
reset();
}
void reset()
{
// This does not deallocate the pointer, but it frees side data.
av_free_packet(this);
}
private:
void steal(AVPacketEx &&that)
{
memcpy(this, &that, sizeof(that));
av_init_packet(&that);
that.data = nullptr;
that.size = 0;
}
};
// utilites for RAII:
// Deleter adaptor for functions like av_free that take a pointer
template<typename T, typename R, R(*Fn)(T*)> struct AV_Deleter {
inline void operator() (T* p) const
{
if (p)
Fn(p);
}
};
// Deleter adaptor for functions like av_freep that take a pointer to a pointer
template<typename T, typename R, R(*Fn)(T**)> struct AV_Deleterp {
inline void operator() (T* p) const
{
if (p)
Fn(&p);
}
};
using AVFrameHolder = std::unique_ptr<
AVFrame, AV_Deleterp<AVFrame, void, av_frame_free>
>;
using AVFifoBufferHolder = std::unique_ptr<
AVFifoBuffer, AV_Deleter<AVFifoBuffer, void, av_fifo_free>
>;
using AVFormatContextHolder = std::unique_ptr<
AVFormatContext, AV_Deleter<AVFormatContext, void, avformat_free_context>
>;
using AVCodecContextHolder = std::unique_ptr<
AVCodecContext, AV_Deleter<AVCodecContext, int, avcodec_close>
>;
using AVDictionaryCleanup = std::unique_ptr<
AVDictionary*, AV_Deleter<AVDictionary*, void, av_dict_free>
>;
using UFileHolder = std::unique_ptr<
AVIOContext, AV_Deleter<AVIOContext, int, ufile_close>
>;
template<typename T> using AVMallocHolder = std::unique_ptr<
T, AV_Deleter<void, void, av_free>
>;
struct streamContext struct streamContext
{ {
bool m_use; // TRUE = this stream will be loaded into Audacity bool m_use{}; // TRUE = this stream will be loaded into Audacity
AVStream *m_stream; // an AVStream * AVStream *m_stream{}; // an AVStream *
AVCodecContext *m_codecCtx; // pointer to m_stream->codec AVCodecContext *m_codecCtx{}; // pointer to m_stream->codec
AVPacket m_pkt; // the last AVPacket we read for this stream Maybe<AVPacketEx> m_pkt; // the last AVPacket we read for this stream
int m_pktValid; // is m_pkt valid? uint8_t *m_pktDataPtr{}; // pointer into m_pkt.data
uint8_t *m_pktDataPtr; // pointer into m_pkt.data int m_pktRemainingSiz{};
int m_pktRemainingSiz;
int64_t m_pts; // the current presentation time of the input stream int64_t m_pts{}; // the current presentation time of the input stream
int64_t m_ptsOffset; // packets associated with stream are relative to this int64_t m_ptsOffset{}; // packets associated with stream are relative to this
int m_frameValid; // is m_decodedVideoFrame/m_decodedAudioSamples valid? int m_frameValid{}; // is m_decodedVideoFrame/m_decodedAudioSamples valid?
uint8_t *m_decodedAudioSamples; // decoded audio samples stored here AVMallocHolder<uint8_t> m_decodedAudioSamples; // decoded audio samples stored here
unsigned int m_decodedAudioSamplesSiz; // current size of m_decodedAudioSamples unsigned int m_decodedAudioSamplesSiz{}; // current size of m_decodedAudioSamples
int m_decodedAudioSamplesValidSiz; // # valid bytes in m_decodedAudioSamples int m_decodedAudioSamplesValidSiz{}; // # valid bytes in m_decodedAudioSamples
int m_initialchannels; // number of channels allocated when we begin the importing. Assumes that number of channels doesn't change on the fly. int m_initialchannels{}; // number of channels allocated when we begin the importing. Assumes that number of channels doesn't change on the fly.
int m_samplesize; // input sample size in bytes int m_samplesize{}; // input sample size in bytes
AVSampleFormat m_samplefmt; // input sample format AVSampleFormat m_samplefmt{ AV_SAMPLE_FMT_NONE }; // input sample format
int m_osamplesize; // output sample size in bytes int m_osamplesize{}; // output sample size in bytes
sampleFormat m_osamplefmt; // output sample format sampleFormat m_osamplefmt{ floatSample }; // output sample format
streamContext() { memset(this, 0, sizeof(*this)); } streamContext() { memset(this, 0, sizeof(*this)); }
~streamContext() ~streamContext()
{ {
if (m_decodedAudioSamples)
av_free(m_decodedAudioSamples);
} }
}; };

@ -118,6 +118,8 @@ public:
/// Flushes audio encoder /// Flushes audio encoder
bool Finalize(); bool Finalize();
void FreeResources();
/// Creates options panel /// Creates options panel
///\param format - index of export type ///\param format - index of export type
wxWindow *OptionsCreate(wxWindow *parent, int format); wxWindow *OptionsCreate(wxWindow *parent, int format);
@ -150,33 +152,33 @@ public:
private: private:
AVFormatContext * mEncFormatCtx; // libavformat's context for our output file AVOutputFormat * mEncFormatDesc{}; // describes our output file to libavformat
AVOutputFormat * mEncFormatDesc; // describes our output file to libavformat int default_frame_size{};
int default_frame_size; AVStream * mEncAudioStream{}; // the output audio stream (may remain NULL)
AVStream * mEncAudioStream; // the output audio stream (may remain NULL) int mEncAudioFifoOutBufSiz{};
AVCodecContext * mEncAudioCodecCtx; // the encoder for the output audio stream
AVFifoBuffer * mEncAudioFifo; // FIFO to write incoming audio samples into
uint8_t * mEncAudioFifoOutBuf; // buffer to read _out_ of the FIFO into
int mEncAudioFifoOutBufSiz;
wxString mName; wxString mName;
int mSubFormat; int mSubFormat{};
int mBitRate; int mBitRate{};
int mSampleRate; int mSampleRate{};
int mChannels; int mChannels{};
bool mSupportsUTF8; bool mSupportsUTF8{};
// Smart pointer fields, their order is the reverse in which they are reset in Finalize():
AVFifoBufferHolder mEncAudioFifo; // FIFO to write incoming audio samples into
AVMallocHolder<int16_t> mEncAudioFifoOutBuf; // buffer to read _out_ of the FIFO into
AVFormatContextHolder mEncFormatCtx; // libavformat's context for our output file
UFileHolder mUfileCloser;
AVCodecContextHolder mEncAudioCodecCtx; // the encoder for the output audio stream
}; };
ExportFFmpeg::ExportFFmpeg() ExportFFmpeg::ExportFFmpeg()
: ExportPlugin() : ExportPlugin()
{ {
mEncFormatCtx = NULL; // libavformat's context for our output file
mEncFormatDesc = NULL; // describes our output file to libavformat mEncFormatDesc = NULL; // describes our output file to libavformat
mEncAudioStream = NULL; // the output audio stream (may remain NULL) mEncAudioStream = NULL; // the output audio stream (may remain NULL)
mEncAudioCodecCtx = NULL; // the encoder for the output audio stream
#define MAX_AUDIO_PACKET_SIZE (128 * 1024) #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
mEncAudioFifoOutBuf = NULL; // buffer to read _out_ of the FIFO into
mEncAudioFifoOutBufSiz = 0; mEncAudioFifoOutBufSiz = 0;
mSampleRate = 0; mSampleRate = 0;
@ -257,10 +259,17 @@ bool ExportFFmpeg::CheckFileName(wxFileName & WXUNUSED(filename), int WXUNUSED(f
bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, const Tags *metadata, int subformat) bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, const Tags *metadata, int subformat)
{ {
// This will undo the acquisition of resources along any early exit path:
auto deleter = [](ExportFFmpeg *This) {
This->FreeResources();
};
std::unique_ptr<ExportFFmpeg, decltype(deleter)> cleanup{ this, deleter };
int err; int err;
//FFmpegLibsInst->LoadLibs(NULL,true); //Loaded at startup or from Prefs now //FFmpegLibsInst->LoadLibs(NULL,true); //Loaded at startup or from Prefs now
if (!FFmpegLibsInst->ValidLibsLoaded()) return false; if (!FFmpegLibsInst->ValidLibsLoaded())
return false;
av_log_set_callback(av_log_wx_callback); av_log_set_callback(av_log_wx_callback);
@ -275,7 +284,8 @@ bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, const T
} }
// mEncFormatCtx is used by libavformat to carry around context data re our output file. // mEncFormatCtx is used by libavformat to carry around context data re our output file.
if ((mEncFormatCtx = avformat_alloc_context()) == NULL) mEncFormatCtx.reset(avformat_alloc_context());
if (!mEncFormatCtx)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't allocate output format context.")), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't allocate output format context.")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
@ -284,16 +294,30 @@ bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, const T
// Initialise the output format context. // Initialise the output format context.
mEncFormatCtx->oformat = mEncFormatDesc; mEncFormatCtx->oformat = mEncFormatDesc;
memcpy(mEncFormatCtx->filename, OSINPUT(mName), strlen(OSINPUT(mName))+1); memcpy(mEncFormatCtx->filename, OSINPUT(mName), strlen(OSINPUT(mName))+1);
// At the moment Audacity can export only one audio stream // At the moment Audacity can export only one audio stream
if ((mEncAudioStream = avformat_new_stream(mEncFormatCtx, NULL)) == NULL) if ((mEncAudioStream = avformat_new_stream(mEncFormatCtx.get(), NULL)) == NULL)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't add audio stream to output file \"%s\"."), mName.c_str()), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't add audio stream to output file \"%s\"."), mName.c_str()),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return false; return false;
} }
// Documentation for avformat_new_stream says
// "User is required to call avcodec_close() and avformat_free_context() to clean
// up the allocation by avformat_new_stream()."
// We use smart pointers that ensure these cleanups either in their destructors or
// sooner if they are reset. These are std::unique_ptr with nondefault deleter
// template parameters.
// mEncFormatCtx takes care of avformat_free_context(), so
// mEncAudioStream can be a plain pointer.
// mEncAudioCodecCtx now becomes responsible for closing the codec:
mEncAudioCodecCtx.reset(mEncAudioStream->codec);
mEncAudioStream->id = 0; mEncAudioStream->id = 0;
// Open the output file. // Open the output file.
@ -305,6 +329,8 @@ bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, const T
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return false; return false;
} }
// Give mUfileCloser responsibility
mUfileCloser.reset(mEncFormatCtx->pb);
} }
// Open the audio stream's codec and initialise any stream related data. // Open the audio stream's codec and initialise any stream related data.
@ -323,13 +349,17 @@ bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, const T
} }
// Write headers to the output file. // Write headers to the output file.
if ((err = avformat_write_header(mEncFormatCtx, NULL)) < 0) if ((err = avformat_write_header(mEncFormatCtx.get(), NULL)) < 0)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't write headers to output file \"%s\". Error code is %d."), mName.c_str(),err), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't write headers to output file \"%s\". Error code is %d."), mName.c_str(),err),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return false; return false;
} }
// Only now, we can keep all the resources until Finalize().
// Cancel the local cleanup.
cleanup.release();
return true; return true;
} }
@ -352,13 +382,13 @@ bool ExportFFmpeg::InitCodecs(AudacityProject *project)
{ {
AVCodec *codec = NULL; AVCodec *codec = NULL;
AVDictionary *options = NULL; AVDictionary *options = NULL;
AVDictionaryCleanup cleanup{ &options };
// Configure the audio stream's codec context. // Configure the audio stream's codec context.
mEncAudioCodecCtx = mEncAudioStream->codec;
mEncAudioCodecCtx->codec_id = ExportFFmpegOptions::fmts[mSubFormat].codecid; mEncAudioCodecCtx->codec_id = ExportFFmpegOptions::fmts[mSubFormat].codecid;
mEncAudioCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO; mEncAudioCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
mEncAudioCodecCtx->codec_tag = av_codec_get_tag((const AVCodecTag **)mEncFormatCtx->oformat->codec_tag,mEncAudioCodecCtx->codec_id); mEncAudioCodecCtx->codec_tag = av_codec_get_tag(mEncFormatCtx->oformat->codec_tag,mEncAudioCodecCtx->codec_id);
mSampleRate = (int)project->GetRate(); mSampleRate = (int)project->GetRate();
mEncAudioCodecCtx->global_quality = -99999; //quality mode is off by default; mEncAudioCodecCtx->global_quality = -99999; //quality mode is off by default;
@ -492,7 +522,7 @@ bool ExportFFmpeg::InitCodecs(AudacityProject *project)
} }
// Open the codec. // Open the codec.
if (avcodec_open2(mEncAudioCodecCtx, codec, &options) < 0) if (avcodec_open2(mEncAudioCodecCtx.get(), codec, &options) < 0)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't open audio codec 0x%x."),mEncAudioCodecCtx->codec_id), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't open audio codec 0x%x."),mEncAudioCodecCtx->codec_id),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
@ -508,11 +538,12 @@ bool ExportFFmpeg::InitCodecs(AudacityProject *project)
// The encoder may require a minimum number of raw audio samples for each encoding but we can't // The encoder may require a minimum number of raw audio samples for each encoding but we can't
// guarantee we'll get this minimum each time an audio frame is decoded from the input file so // guarantee we'll get this minimum each time an audio frame is decoded from the input file so
// we use a FIFO to store up incoming raw samples until we have enough for one call to the codec. // we use a FIFO to store up incoming raw samples until we have enough for one call to the codec.
mEncAudioFifo = av_fifo_alloc(1024); mEncAudioFifo.reset(av_fifo_alloc(1024));
mEncAudioFifoOutBufSiz = 2*MAX_AUDIO_PACKET_SIZE; mEncAudioFifoOutBufSiz = 2*MAX_AUDIO_PACKET_SIZE;
// Allocate a buffer to read OUT of the FIFO into. The FIFO maintains its own buffer internally. // Allocate a buffer to read OUT of the FIFO into. The FIFO maintains its own buffer internally.
if ((mEncAudioFifoOutBuf = (uint8_t*)av_malloc(mEncAudioFifoOutBufSiz)) == NULL) mEncAudioFifoOutBuf.reset(static_cast<int16_t*>(av_malloc(mEncAudioFifoOutBufSiz)));
if (!mEncAudioFifoOutBuf)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't allocate buffer to read into from audio FIFO.")), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't allocate buffer to read into from audio FIFO.")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
@ -524,12 +555,14 @@ bool ExportFFmpeg::InitCodecs(AudacityProject *project)
static int encode_audio(AVCodecContext *avctx, AVPacket *pkt, int16_t *audio_samples, int nb_samples) static int encode_audio(AVCodecContext *avctx, AVPacket *pkt, int16_t *audio_samples, int nb_samples)
{ {
// Assume *pkt is already initialized.
int i, ch, buffer_size, ret, got_output = 0; int i, ch, buffer_size, ret, got_output = 0;
void *samples = NULL; AVMallocHolder<uint8_t> samples;
AVFrame *frame = NULL; AVFrameHolder frame;
if (audio_samples) { if (audio_samples) {
frame = av_frame_alloc(); frame.reset(av_frame_alloc());
if (!frame) if (!frame)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -546,15 +579,15 @@ static int encode_audio(AVCodecContext *avctx, AVPacket *pkt, int16_t *audio_sam
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return buffer_size; return buffer_size;
} }
samples = av_malloc(buffer_size); samples.reset(static_cast<uint8_t*>(av_malloc(buffer_size)));
if (!samples) { if (!samples) {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not allocate bytes for samples buffer")), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not allocate bytes for samples buffer")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
/* setup the data pointers in the AVFrame */ /* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt, ret = avcodec_fill_audio_frame(frame.get(), avctx->channels, avctx->sample_fmt,
(const uint8_t*)samples, buffer_size, 0); samples.get(), buffer_size, 0);
if (ret < 0) { if (ret < 0) {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not setup audio frame")), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not setup audio frame")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
@ -598,11 +631,11 @@ static int encode_audio(AVCodecContext *avctx, AVPacket *pkt, int16_t *audio_sam
} }
} }
} }
av_init_packet(pkt);
pkt->data = NULL; // packet data will be allocated by the encoder pkt->data = NULL; // packet data will be allocated by the encoder
pkt->size = 0; pkt->size = 0;
ret = avcodec_encode_audio2(avctx, pkt, frame, &got_output); ret = avcodec_encode_audio2(avctx, pkt, frame.get(), &got_output);
if (ret < 0) { if (ret < 0) {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - encoding frame failed")), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - encoding frame failed")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
@ -611,9 +644,6 @@ static int encode_audio(AVCodecContext *avctx, AVPacket *pkt, int16_t *audio_sam
pkt->dts = pkt->pts = AV_NOPTS_VALUE; // we dont set frame.pts thus dont trust the AVPacket ts pkt->dts = pkt->pts = AV_NOPTS_VALUE; // we dont set frame.pts thus dont trust the AVPacket ts
av_frame_free(&frame);
av_freep(&samples);
return got_output; return got_output;
} }
@ -625,115 +655,112 @@ bool ExportFFmpeg::Finalize()
// Flush the audio FIFO and encoder. // Flush the audio FIFO and encoder.
for (;;) for (;;)
{ {
AVPacket pkt;
int nFifoBytes = av_fifo_size(mEncAudioFifo); // any bytes left in audio FIFO?
av_init_packet(&pkt);
nEncodedBytes = 0;
int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);
if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz || nFifoBytes > mEncAudioFifoOutBufSiz) {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Too much remaining data.")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return false;
}
// Flush the audio FIFO first if necessary. It won't contain a _full_ audio frame because
// if it did we'd have pulled it from the FIFO during the last encodeAudioFrame() call -
// the encoder must support short/incomplete frames for this to work.
if (nFifoBytes > 0)
{ {
// Fill audio buffer with zeroes. If codec tries to read the whole buffer, AVPacketEx pkt;
// it will just read silence. If not - who cares? int nFifoBytes = av_fifo_size(mEncAudioFifo.get()); // any bytes left in audio FIFO?
memset(mEncAudioFifoOutBuf,0,mEncAudioFifoOutBufSiz);
const AVCodec *codec = mEncAudioCodecCtx->codec;
// We have an incomplete buffer of samples left. Is it OK to encode it? nEncodedBytes = 0;
// If codec supports CODEC_CAP_SMALL_LAST_FRAME, we can feed it with smaller frame int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);
// Or if codec is FLAC, feed it anyway (it doesn't have CODEC_CAP_SMALL_LAST_FRAME, but it works)
// Or if frame_size is 1, then it's some kind of PCM codec, they don't have frames and will be fine with the samples if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz || nFifoBytes > mEncAudioFifoOutBufSiz) {
// Or if user configured the exporter to pad with silence, then we'll send audio + silence as a frame. wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Too much remaining data.")),
if ((codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE)) _("FFmpeg Error"), wxOK | wxCENTER | wxICON_EXCLAMATION);
|| mEncAudioCodecCtx->frame_size <= 1 return false;
|| gPrefs->Read(wxT("/FileFormats/OverrideSmallLastFrame"), true) }
)
// Flush the audio FIFO first if necessary. It won't contain a _full_ audio frame because
// if it did we'd have pulled it from the FIFO during the last encodeAudioFrame() call -
// the encoder must support short/incomplete frames for this to work.
if (nFifoBytes > 0)
{ {
int frame_size = default_frame_size; // Fill audio buffer with zeroes. If codec tries to read the whole buffer,
// it will just read silence. If not - who cares?
memset(mEncAudioFifoOutBuf.get(), 0, mEncAudioFifoOutBufSiz);
const AVCodec *codec = mEncAudioCodecCtx->codec;
// The last frame is going to contain a smaller than usual number of samples. // We have an incomplete buffer of samples left. Is it OK to encode it?
// For codecs without CODEC_CAP_SMALL_LAST_FRAME use normal frame size // If codec supports CODEC_CAP_SMALL_LAST_FRAME, we can feed it with smaller frame
if (codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE)) // Or if codec is FLAC, feed it anyway (it doesn't have CODEC_CAP_SMALL_LAST_FRAME, but it works)
frame_size = nFifoBytes / (mEncAudioCodecCtx->channels * sizeof(int16_t)); // Or if frame_size is 1, then it's some kind of PCM codec, they don't have frames and will be fine with the samples
// Or if user configured the exporter to pad with silence, then we'll send audio + silence as a frame.
wxLogDebug(wxT("FFmpeg : Audio FIFO still contains %d bytes, writing %d sample frame ..."), if ((codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_VARIABLE_FRAME_SIZE))
nFifoBytes, frame_size); || mEncAudioCodecCtx->frame_size <= 1
|| gPrefs->Read(wxT("/FileFormats/OverrideSmallLastFrame"), true)
// Pull the bytes out from the FIFO and feed them to the encoder. )
if (av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nFifoBytes, NULL) == 0)
{ {
nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, (int16_t*)mEncAudioFifoOutBuf, frame_size); int frame_size = default_frame_size;
// The last frame is going to contain a smaller than usual number of samples.
// For codecs without CODEC_CAP_SMALL_LAST_FRAME use normal frame size
if (codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_VARIABLE_FRAME_SIZE))
frame_size = nFifoBytes / (mEncAudioCodecCtx->channels * sizeof(int16_t));
wxLogDebug(wxT("FFmpeg : Audio FIFO still contains %d bytes, writing %d sample frame ..."),
nFifoBytes, frame_size);
// Pull the bytes out from the FIFO and feed them to the encoder.
if (av_fifo_generic_read(mEncAudioFifo.get(), mEncAudioFifoOutBuf.get(), nFifoBytes, NULL) == 0)
{
nEncodedBytes = encode_audio(mEncAudioCodecCtx.get(), &pkt, mEncAudioFifoOutBuf.get(), frame_size);
}
} }
} }
} }
// Now flush the encoder. // Now flush the encoder.
if (nEncodedBytes <= 0)
nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, NULL, 0);
if (nEncodedBytes <= 0)
break;
pkt.stream_index = mEncAudioStream->index;
// Set presentation time of frame (currently in the codec's timebase) in the stream timebase.
if(pkt.pts != int64_t(AV_NOPTS_VALUE))
pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
if(pkt.dts != int64_t(AV_NOPTS_VALUE))
pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
if (av_interleaved_write_frame(mEncFormatCtx, &pkt) != 0)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Couldn't write last audio frame to output file.")), AVPacketEx pkt;
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); if (nEncodedBytes <= 0)
break; nEncodedBytes = encode_audio(mEncAudioCodecCtx.get(), &pkt, NULL, 0);
if (nEncodedBytes <= 0)
break;
pkt.stream_index = mEncAudioStream->index;
// Set presentation time of frame (currently in the codec's timebase) in the stream timebase.
if (pkt.pts != int64_t(AV_NOPTS_VALUE))
pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
if (pkt.dts != int64_t(AV_NOPTS_VALUE))
pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
if (av_interleaved_write_frame(mEncFormatCtx.get(), &pkt) != 0)
{
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Couldn't write last audio frame to output file.")),
_("FFmpeg Error"), wxOK | wxCENTER | wxICON_EXCLAMATION);
break;
}
} }
av_free_packet(&pkt);
} }
// Write any file trailers. // Write any file trailers.
av_write_trailer(mEncFormatCtx); av_write_trailer(mEncFormatCtx.get());
FreeResources();
return true;
}
void ExportFFmpeg::FreeResources()
{
// Close the codecs. // Close the codecs.
if (mEncAudioStream != NULL) mEncAudioCodecCtx.reset();
avcodec_close(mEncAudioStream->codec);
for (i = 0; i < (int)mEncFormatCtx->nb_streams; i++)
{
av_freep(&mEncFormatCtx->streams[i]->codec);
av_freep(&mEncFormatCtx->streams[i]);
}
// Close the output file if we created it. // Close the output file if we created it.
if (!(mEncFormatDesc->flags & AVFMT_NOFILE)) mUfileCloser.reset();
ufile_close(mEncFormatCtx->pb);
// Free any buffers or structures we allocated. // Free any buffers or structures we allocated.
av_free(mEncFormatCtx); mEncFormatCtx.reset();
av_freep(&mEncAudioFifoOutBuf); mEncAudioFifoOutBuf.reset();
mEncAudioFifoOutBufSiz = 0; mEncAudioFifoOutBufSiz = 0;
av_fifo_free(mEncAudioFifo); mEncAudioFifo.reset();
mEncAudioFifo = NULL; av_log_set_callback(av_log_default_callback);
return true;
} }
bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize) bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize)
{ {
AVPacket pkt;
int nBytesToWrite = 0; int nBytesToWrite = 0;
uint8_t *pRawSamples = NULL; uint8_t *pRawSamples = NULL;
int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t); int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);
@ -741,10 +768,10 @@ bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize)
nBytesToWrite = frameSize; nBytesToWrite = frameSize;
pRawSamples = (uint8_t*)pFrame; pRawSamples = (uint8_t*)pFrame;
av_fifo_realloc2(mEncAudioFifo, av_fifo_size(mEncAudioFifo) + frameSize); av_fifo_realloc2(mEncAudioFifo.get(), av_fifo_size(mEncAudioFifo.get()) + frameSize);
// Put the raw audio samples into the FIFO. // Put the raw audio samples into the FIFO.
ret = av_fifo_generic_write(mEncAudioFifo, pRawSamples, nBytesToWrite,NULL); ret = av_fifo_generic_write(mEncAudioFifo.get(), pRawSamples, nBytesToWrite,NULL);
wxASSERT(ret == nBytesToWrite); wxASSERT(ret == nBytesToWrite);
@ -755,15 +782,15 @@ bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize)
} }
// Read raw audio samples out of the FIFO in nAudioFrameSizeOut byte-sized groups to encode. // Read raw audio samples out of the FIFO in nAudioFrameSizeOut byte-sized groups to encode.
while ((ret = av_fifo_size(mEncAudioFifo)) >= nAudioFrameSizeOut) while ((ret = av_fifo_size(mEncAudioFifo.get())) >= nAudioFrameSizeOut)
{ {
ret = av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nAudioFrameSizeOut, NULL); ret = av_fifo_generic_read(mEncAudioFifo.get(), mEncAudioFifoOutBuf.get(), nAudioFrameSizeOut, NULL);
av_init_packet(&pkt); AVPacketEx pkt;
int ret= encode_audio(mEncAudioCodecCtx, int ret= encode_audio(mEncAudioCodecCtx.get(),
&pkt, // out &pkt, // out
(int16_t*)mEncAudioFifoOutBuf, // in mEncAudioFifoOutBuf.get(), // in
default_frame_size); default_frame_size);
if (ret < 0) if (ret < 0)
{ {
@ -784,13 +811,12 @@ bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize)
pkt.stream_index = mEncAudioStream->index; pkt.stream_index = mEncAudioStream->index;
// Write the encoded audio frame to the output file. // Write the encoded audio frame to the output file.
if ((ret = av_interleaved_write_frame(mEncFormatCtx, &pkt)) < 0) if ((ret = av_interleaved_write_frame(mEncFormatCtx.get(), &pkt)) < 0)
{ {
wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Failed to write audio frame to file.")), wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Failed to write audio frame to file.")),
_("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
return false; return false;
} }
av_free_packet(&pkt);
} }
return true; return true;
} }

@ -359,19 +359,15 @@ bool FFmpegImportFileHandle::Init()
av_log_set_callback(av_log_wx_callback); av_log_set_callback(av_log_wx_callback);
int err; int err;
std::unique_ptr<FFmpegContext> tempContext;
err = ufile_fopen_input(tempContext, mName);
if (err < 0)
{ {
std::unique_ptr<FFmpegContext> tempContext; wxLogError(wxT("FFmpeg : av_open_input_file() failed for file %s"), mName.c_str());
err = ufile_fopen_input(tempContext, mName); return false;
if (err < 0)
{
wxLogError(wxT("FFmpeg : av_open_input_file() failed for file %s"), mName.c_str());
return false;
}
wxASSERT(tempContext.get());
// Move from unique to shared pointer
mContext.reset(tempContext.release());
} }
mFormatContext = mContext->ic_ptr; wxASSERT(tempContext.get());
mFormatContext = tempContext->ic_ptr;
err = avformat_find_stream_info(mFormatContext, NULL); err = avformat_find_stream_info(mFormatContext, NULL);
if (err < 0) if (err < 0)
@ -380,7 +376,13 @@ bool FFmpegImportFileHandle::Init()
return false; return false;
} }
InitCodecs(); if (!InitCodecs())
return false;
// Only now do we postpone destroying the FFmpegContext.
// Move from unique to shared pointer
mContext.reset(tempContext.release());
return true; return true;
} }
@ -648,11 +650,7 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory,
} }
// Cleanup after frame decoding // Cleanup after frame decoding
if (sc->m_pktValid) sc->m_pkt.reset();
{
av_free_packet(&sc->m_pkt);
sc->m_pktValid = 0;
}
} }
} }
@ -662,15 +660,12 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory,
for (int i = 0; i < mNumStreams; i++) for (int i = 0; i < mNumStreams; i++)
{ {
auto sc = scs[i].get(); auto sc = scs[i].get();
sc->m_pkt.create();
if (DecodeFrame(sc, true) == 0) if (DecodeFrame(sc, true) == 0)
{ {
WriteData(sc); WriteData(sc);
if (sc->m_pktValid) sc->m_pkt.reset();
{
av_free_packet(&sc->m_pkt);
sc->m_pktValid = 0;
}
} }
} }
} }
@ -743,7 +738,7 @@ int FFmpegImportFileHandle::WriteData(streamContext *sc)
} }
// Separate the channels and convert input sample format to 16-bit // Separate the channels and convert input sample format to 16-bit
uint8_t *in = sc->m_decodedAudioSamples; uint8_t *in = sc->m_decodedAudioSamples.get();
int index = 0; int index = 0;
int pos = 0; int pos = 0;
while (pos < insamples) while (pos < insamples)
@ -810,9 +805,9 @@ int FFmpegImportFileHandle::WriteData(streamContext *sc)
int updateResult = eProgressSuccess; int updateResult = eProgressSuccess;
int64_t filesize = avio_size(mFormatContext->pb); int64_t filesize = avio_size(mFormatContext->pb);
// PTS (presentation time) is the proper way of getting current position // PTS (presentation time) is the proper way of getting current position
if (sc->m_pkt.pts != int64_t(AV_NOPTS_VALUE) && mFormatContext->duration != int64_t(AV_NOPTS_VALUE)) if (sc->m_pkt->pts != int64_t(AV_NOPTS_VALUE) && mFormatContext->duration != int64_t(AV_NOPTS_VALUE))
{ {
mProgressPos = sc->m_pkt.pts * sc->m_stream->time_base.num / sc->m_stream->time_base.den; mProgressPos = sc->m_pkt->pts * sc->m_stream->time_base.num / sc->m_stream->time_base.den;
mProgressLen = (mFormatContext->duration > 0 ? mFormatContext->duration / AV_TIME_BASE: 1); mProgressLen = (mFormatContext->duration > 0 ? mFormatContext->duration / AV_TIME_BASE: 1);
} }
// When PTS is not set, use number of frames and number of current frame // When PTS is not set, use number of frames and number of current frame
@ -822,9 +817,9 @@ int FFmpegImportFileHandle::WriteData(streamContext *sc)
mProgressLen = sc->m_stream->nb_frames; mProgressLen = sc->m_stream->nb_frames;
} }
// When number of frames is unknown, use position in file // When number of frames is unknown, use position in file
else if (filesize > 0 && sc->m_pkt.pos > 0 && sc->m_pkt.pos <= filesize) else if (filesize > 0 && sc->m_pkt->pos > 0 && sc->m_pkt->pos <= filesize)
{ {
mProgressPos = sc->m_pkt.pos; mProgressPos = sc->m_pkt->pos;
mProgressLen = filesize; mProgressLen = filesize;
} }
updateResult = mProgress->Update(mProgressPos, mProgressLen != 0 ? mProgressLen : 1); updateResult = mProgress->Update(mProgressPos, mProgressLen != 0 ? mProgressLen : 1);

@ -372,7 +372,7 @@ int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCo
// for some formats // for some formats
// The only other case for inserting silence is for initial offset and ImportFFmpeg.cpp does this for us // The only other case for inserting silence is for initial offset and ImportFFmpeg.cpp does this for us
if (seeking) { if (seeking) {
actualDecodeStart = 0.52 + (sc->m_stream->codec->sample_rate * sc->m_pkt.dts actualDecodeStart = 0.52 + (sc->m_stream->codec->sample_rate * sc->m_pkt->dts
* ((double)sc->m_stream->time_base.num / sc->m_stream->time_base.den)); * ((double)sc->m_stream->time_base.num / sc->m_stream->time_base.den));
//this is mostly safe because den is usually 1 or low number but check for high values. //this is mostly safe because den is usually 1 or low number but check for high values.
@ -421,11 +421,7 @@ int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCo
break; break;
// Cleanup after frame decoding // Cleanup after frame decoding
if (sc->m_pktValid) sc->m_pkt.reset();
{
av_free_packet(&sc->m_pkt);
sc->m_pktValid = 0;
}
} }
} }
@ -434,14 +430,11 @@ int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCo
{ {
for (int i = 0; i < mChannels.size(); i++) for (int i = 0; i < mChannels.size(); i++)
{ {
sc->m_pkt.create();
sc = scs[i].get(); sc = scs[i].get();
if (DecodeFrame(sc, true) == 0) if (DecodeFrame(sc, true) == 0)
{ {
if (sc->m_pktValid) sc->m_pkt.reset();
{
av_free_packet(&sc->m_pkt);
sc->m_pktValid = 0;
}
} }
} }
} }