diff --git a/src/FFmpeg.h b/src/FFmpeg.h index fcbff8688..ae424382b 100644 --- a/src/FFmpeg.h +++ b/src/FFmpeg.h @@ -16,6 +16,8 @@ Describes shared object that is used to access FFmpeg libraries. #if !defined(__AUDACITY_FFMPEG__) #define __AUDACITY_FFMPEG__ +#include "MemoryX.h" + // TODO: Determine whether the libav* headers come from the FFmpeg or libav // project and set IS_FFMPEG_PROJECT depending on it. #define IS_FFMPEG_PROJECT 1 @@ -395,7 +397,7 @@ int ufile_fopen(AVIOContext **s, const wxString & name, int flags); int ufile_fopen_input(AVFormatContext **ic_ptr, wxString & name); int ufile_close(AVIOContext *pb); -typedef struct _streamContext +struct streamContext { bool m_use; // TRUE = this stream will be loaded into Audacity AVStream *m_stream; // an AVStream * @@ -421,7 +423,12 @@ typedef struct _streamContext int m_osamplesize; // output sample size in bytes sampleFormat m_osamplefmt; // output sample format -} streamContext; + streamContext() { memset(this, 0, sizeof(*this)); } + ~streamContext(); +}; + +using Scs = ArrayOf>; +using ScsPtr = std::shared_ptr; // common utility functions // utility calls that are shared with ImportFFmpeg and ODDecodeFFmpegTask @@ -853,6 +860,14 @@ extern "C" { (linesize, nb_channels, nb_samples, sample_fmt, align) ); }; + + +inline streamContext::~streamContext() +{ + if (m_decodedAudioSamples) + av_free(m_decodedAudioSamples); +} + #endif #endif // USE_FFMPEG diff --git a/src/import/ImportFFmpeg.cpp b/src/import/ImportFFmpeg.cpp index 1c2c79ed5..d1c20803d 100644 --- a/src/import/ImportFFmpeg.cpp +++ b/src/import/ImportFFmpeg.cpp @@ -32,6 +32,7 @@ Licensed under the GNU General Public License v2 or later #endif #include "../Experimental.h" +#include "../MemoryX.h" #define DESC _("FFmpeg-compatible files") @@ -256,14 +257,14 @@ public: void SetStreamUsage(wxInt32 StreamID, bool Use) { if (StreamID < mNumStreams) - mScs[StreamID]->m_use = Use; + mScs->get()[StreamID]->m_use = Use; } private: AVFormatContext *mFormatContext; //!< Format description, also contains metadata and some useful info int mNumStreams; //!< mNumstreams is less or equal to mFormatContext->nb_streams - streamContext **mScs; //!< Array of pointers to stream contexts. Length is mNumStreams. + ScsPtr mScs; //!< Points to array of pointers to stream contexts, which may be shared with a decoder task. wxArrayString *mStreamInfo; //!< Array of stream descriptions. Length is mNumStreams wxInt64 mProgressPos; //!< Current timestamp, file position or whatever is used as first argument for Update() @@ -382,15 +383,14 @@ bool FFmpegImportFileHandle::InitCodecs() { // Allocate the array of pointers to hold stream contexts pointers // Some of the allocated space may be unused (corresponds to video, subtitle, or undecodeable audio streams) - mScs = (streamContext**)malloc(sizeof(streamContext**)*mFormatContext->nb_streams); + mScs = std::make_shared(mFormatContext->nb_streams); // Fill the stream contexts for (unsigned int i = 0; i < mFormatContext->nb_streams; i++) { if (mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { //Create a context - streamContext *sc = new streamContext; - memset(sc,0,sizeof(*sc)); + auto sc = std::make_unique(); sc->m_stream = mFormatContext->streams[i]; sc->m_codecCtx = sc->m_stream->codec; @@ -400,14 +400,12 @@ bool FFmpegImportFileHandle::InitCodecs() { wxLogError(wxT("FFmpeg : avcodec_find_decoder() failed. Index[%02d], Codec[%02x - %s]"),i,sc->m_codecCtx->codec_id,sc->m_codecCtx->codec_name); //FFmpeg can't decode this stream, skip it - delete sc; continue; } if (codec->type != sc->m_codecCtx->codec_type) { wxLogError(wxT("FFmpeg : Codec type mismatch, skipping. Index[%02d], Codec[%02x - %s]"),i,sc->m_codecCtx->codec_id,sc->m_codecCtx->codec_name); //Non-audio codec reported as audio? Nevertheless, we don't need THIS. - delete sc; continue; } @@ -415,7 +413,6 @@ bool FFmpegImportFileHandle::InitCodecs() { wxLogError(wxT("FFmpeg : avcodec_open() failed. Index[%02d], Codec[%02x - %s]"),i,sc->m_codecCtx->codec_id,sc->m_codecCtx->codec_name); //Can't open decoder - skip this stream - delete sc; continue; } @@ -440,7 +437,7 @@ bool FFmpegImportFileHandle::InitCodecs() } strinfo.Printf(_("Index[%02x] Codec[%s], Language[%s], Bitrate[%s], Channels[%d], Duration[%d]"),sc->m_stream->id,codec->name,lang.c_str(),bitrate.c_str(),sc->m_stream->codec->channels, duration); mStreamInfo->Add(strinfo); - mScs[mNumStreams++] = sc; + mScs->get()[mNumStreams++] = std::move(sc); } //for video and unknown streams do nothing } @@ -469,14 +466,14 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, CreateProgress(); // Remove stream contexts which are not marked for importing and adjust mScs and mNumStreams accordingly + const auto scs = mScs->get(); for (int i = 0; i < mNumStreams;) { - if (!mScs[i]->m_use) + if (!scs[i]->m_use) { - delete mScs[i]; for (int j = i; j < mNumStreams - 1; j++) { - mScs[j] = mScs[j+1]; + scs[j] = std::move(scs[j+1]); } mNumStreams--; } @@ -490,32 +487,33 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, { ++s; - switch (mScs[s]->m_stream->codec->sample_fmt) + auto sc = scs[s].get(); + switch (sc->m_stream->codec->sample_fmt) { case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_S16: case AV_SAMPLE_FMT_U8P: case AV_SAMPLE_FMT_S16P: - mScs[s]->m_osamplesize = sizeof(int16_t); - mScs[s]->m_osamplefmt = int16Sample; + sc->m_osamplesize = sizeof(int16_t); + sc->m_osamplefmt = int16Sample; break; default: - mScs[s]->m_osamplesize = sizeof(float); - mScs[s]->m_osamplefmt = floatSample; + sc->m_osamplesize = sizeof(float); + sc->m_osamplefmt = floatSample; break; } // There is a possibility that number of channels will change over time, but we do not have WaveTracks for NEW channels. Remember the number of channels and stick to it. - mScs[s]->m_initialchannels = mScs[s]->m_stream->codec->channels; - stream.resize(mScs[s]->m_stream->codec->channels); + sc->m_initialchannels = sc->m_stream->codec->channels; + stream.resize(sc->m_stream->codec->channels); int c = -1; for (auto &channel : stream) { ++c; - channel = trackFactory->NewWaveTrack(mScs[s]->m_osamplefmt, mScs[s]->m_stream->codec->sample_rate); + channel = trackFactory->NewWaveTrack(sc->m_osamplefmt, sc->m_stream->codec->sample_rate); - if (mScs[s]->m_stream->codec->channels == 2) + if (sc->m_stream->codec->channels == 2) { switch (c) { @@ -544,10 +542,11 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, ++s; int64_t stream_delay = 0; - if (mScs[s]->m_stream->start_time != int64_t(AV_NOPTS_VALUE) && mScs[s]->m_stream->start_time > 0) + auto sc = scs[s].get(); + if (sc->m_stream->start_time != int64_t(AV_NOPTS_VALUE) && sc->m_stream->start_time > 0) { - stream_delay = mScs[s]->m_stream->start_time; - wxLogDebug(wxT("Stream %d start_time = %lld, that would be %f milliseconds."), s, (long long) mScs[s]->m_stream->start_time, double(mScs[s]->m_stream->start_time)/AV_TIME_BASE*1000); + stream_delay = sc->m_stream->start_time; + wxLogDebug(wxT("Stream %d start_time = %lld, that would be %f milliseconds."), s, (long long) sc->m_stream->start_time, double(sc->m_stream->start_time)/AV_TIME_BASE*1000); } if (stream_delay != 0) { @@ -573,22 +572,26 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, if (mUsingOD) { std::vector tasks; //append blockfiles to each stream and add an individual ODDecodeTask for each one. - for (int s = 0; s < mNumStreams; s++) { - ODDecodeFFmpegTask* odTask=new ODDecodeFFmpegTask(mScs,mNumStreams,mChannels,mFormatContext, s); + s = -1; + for (const auto &stream : mChannels) { + ++s; + ODDecodeFFmpegTask* odTask = + new ODDecodeFFmpegTask(mScs, ODDecodeFFmpegTask::FromList(mChannels), mFormatContext, s); odTask->CreateFileDecoder(mFilename); //each stream has different duration. We need to know it if seeking is to be allowed. sampleCount sampleDuration = 0; - if (mScs[s]->m_stream->duration > 0) - sampleDuration = ((sampleCount)mScs[s]->m_stream->duration * mScs[s]->m_stream->time_base.num) *mScs[s]->m_stream->codec->sample_rate / mScs[s]->m_stream->time_base.den; + auto sc = scs[s].get(); + if (sc->m_stream->duration > 0) + sampleDuration = ((sampleCount)sc->m_stream->duration * sc->m_stream->time_base.num), sc->m_stream->codec->sample_rate / sc->m_stream->time_base.den; else - sampleDuration = ((sampleCount)mFormatContext->duration *mScs[s]->m_stream->codec->sample_rate) / AV_TIME_BASE; + sampleDuration = ((sampleCount)mFormatContext->duration *sc->m_stream->codec->sample_rate) / AV_TIME_BASE; - // printf(" OD duration samples %qi, sr %d, secs %d\n",sampleDuration, (int)mScs[s]->m_stream->codec->sample_rate,(int)sampleDuration/mScs[s]->m_stream->codec->sample_rate); + // printf(" OD duration samples %qi, sr %d, secs %d\n",sampleDuration, (int)sc->m_stream->codec->sample_rate, (int)sampleDuration/sc->m_stream->codec->sample_rate); //for each wavetrack within the stream add coded blockfiles - for (int c = 0; c < mScs[s]->m_stream->codec->channels; c++) { - WaveTrack *t = mChannels[s][c]; + for (int c = 0; c < sc->m_stream->codec->channels; c++) { + WaveTrack *t = stream[c].get(); odTask->AddWaveTrack(t); sampleCount maxBlockSize = t->GetMaxBlockSize(); @@ -598,12 +601,12 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, if (i + blockLen > sampleDuration) blockLen = sampleDuration - i; - t->AppendCoded(mFilename, i, blockLen, c,ODTask::eODFFMPEG); + t->AppendCoded(mFilename, i, blockLen, c, ODTask::eODFFMPEG); // This only works well for single streams since we assume // each stream is of the same duration and channels - res = mProgress->Update(i+sampleDuration*c+ sampleDuration*mScs[s]->m_stream->codec->channels*s, - sampleDuration*mScs[s]->m_stream->codec->channels*mNumStreams); + res = mProgress->Update(i+sampleDuration*c+ sampleDuration*sc->m_stream->codec->channels*s, + sampleDuration*sc->m_stream->codec->channels*mNumStreams); if (res != eProgressSuccess) break; } @@ -621,10 +624,9 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, } } else { #endif - streamContext *sc = NULL; // Read next frame. - while ((sc = ReadNextFrame()) != NULL && (res == eProgressSuccess)) + for (streamContext *sc; (sc = ReadNextFrame()) != NULL && (res == eProgressSuccess);) { // ReadNextFrame returns 1 if stream is not to be imported if (sc != (streamContext*)1) @@ -654,14 +656,15 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, { for (int i = 0; i < mNumStreams; i++) { - if (DecodeFrame(mScs[i], true) == 0) + auto sc = scs[i].get(); + if (DecodeFrame(sc, true) == 0) { - WriteData(mScs[i]); + WriteData(sc); - if (mScs[i]->m_pktValid) + if (sc->m_pktValid) { - av_free_packet(&mScs[i]->m_pkt); - mScs[i]->m_pktValid = 0; + av_free_packet(&sc->m_pkt); + sc->m_pktValid = 0; } } } @@ -693,7 +696,11 @@ int FFmpegImportFileHandle::Import(TrackFactory *trackFactory, streamContext *FFmpegImportFileHandle::ReadNextFrame() { - return import_ffmpeg_read_next_frame(mFormatContext, mScs, mNumStreams); + // Get pointer to array of contiguous unique_ptrs + auto scs = mScs->get(); + // This reinterpret_cast to array of plain pointers is innocent + return import_ffmpeg_read_next_frame + (mFormatContext, reinterpret_cast(scs), mNumStreams); } int FFmpegImportFileHandle::DecodeFrame(streamContext *sc, bool flushing) @@ -706,9 +713,10 @@ int FFmpegImportFileHandle::WriteData(streamContext *sc) // Find the stream index in mScs array int streamid = -1; auto iter = mChannels.begin(); + auto scs = mScs->get(); for (int i = 0; i < mNumStreams; ++iter, ++i) { - if (mScs[i] == sc) + if (scs[i].get() == sc) { streamid = i; break; @@ -858,14 +866,6 @@ FFmpegImportFileHandle::~FFmpegImportFileHandle() av_log_set_callback(av_log_default_callback); } - for (int i = 0; i < mNumStreams; i++) - { - if (mScs[i]->m_decodedAudioSamples != NULL) - av_free(mScs[i]->m_decodedAudioSamples); - - delete mScs[i]; - } - free(mScs); #ifdef EXPERIMENTAL_OD_FFMPEG }//mUsingOD #endif diff --git a/src/ondemand/ODDecodeFFmpegTask.cpp b/src/ondemand/ODDecodeFFmpegTask.cpp index 85be825e2..6a6beb789 100644 --- a/src/ondemand/ODDecodeFFmpegTask.cpp +++ b/src/ondemand/ODDecodeFFmpegTask.cpp @@ -23,6 +23,9 @@ #ifdef USE_FFMPEG #ifdef EXPERIMENTAL_OD_FFMPEG +#include +#include + #include "../FFmpeg.h" // which brings in avcodec.h, avformat.h #include "../import/ImportFFmpeg.h" @@ -54,11 +57,13 @@ typedef struct _FFmpegDecodeCache ///class to decode a particular file (one per file). Saves info such as filename and length (after the header is read.) class ODFFmpegDecoder final : public ODFileDecoder { - - public: ///This should handle unicode converted to UTF-8 on mac/linux, but OD TODO:check on windows - ODFFmpegDecoder(const wxString & fileName, streamContext** scs, int numStreams,WaveTrack*** channels, AVFormatContext* formatContext, int streamIndex); + ODFFmpegDecoder(const wxString & fileName, + const ScsPtr &scs, + ODDecodeFFmpegTask::Streams &&channels, + AVFormatContext* formatContext, + int streamIndex); virtual ~ODFFmpegDecoder(); ///Decodes the samples for this blockfile from the real file into a float buffer. @@ -92,9 +97,8 @@ private: ///\return 0 on success, -1 if it can't decode any further int DecodeFrame(streamContext *sc, bool flushing); - int mNumStreams; - streamContext **mScs; //!< Array of pointers to stream contexts. Length is mNumStreams. - WaveTrack*** mChannels; + ScsPtr mScs; //!< Pointer to array of pointers to stream contexts. + ODDecodeFFmpegTask::Streams mChannels; AVFormatContext *mFormatContext; //!< Format description, also contains metadata and some useful info std::vector mDecodeCache; int mNumSamplesInCache; @@ -105,33 +109,44 @@ private: int mStreamIndex; }; +auto ODDecodeFFmpegTask::FromList(const std::list &channels) -> Streams +{ + Streams streams; + streams.reserve(channels.size()); + using namespace std; + transform(channels.begin(), channels.end(), back_inserter(streams), + [](const TrackHolders &holders) { + Channels channels; + channels.reserve(holders.size()); + transform(holders.begin(), holders.end(), back_inserter(channels), + mem_fun_ref(&TrackHolders::value_type::get) + ); + return channels; + } + ); + return streams; +} //------ ODDecodeFFmpegTask definitions -ODDecodeFFmpegTask::ODDecodeFFmpegTask(void* scs,int numStreams, WaveTrack*** channels, void* formatContext, int streamIndex) +ODDecodeFFmpegTask::ODDecodeFFmpegTask(const ScsPtr &scs, Streams &&channels, void* formatContext, int streamIndex) + : mChannels(std::move(channels)) { mScs=scs; - mNumStreams=numStreams; - mChannels=channels; mFormatContext = formatContext; mStreamIndex = streamIndex; - //TODO we probably need to create a NEW WaveTrack*** pointer and copy. - //same for streamContext, but we should also use a ref counting system - this should be added to streamContext - // mScs = (streamContext**)malloc(sizeof(streamContext**)*mFormatContext->nb_streams); } ODDecodeFFmpegTask::~ODDecodeFFmpegTask() { } -ODTask* ODDecodeFFmpegTask::Clone() +ODTask *ODDecodeFFmpegTask::Clone() { - //we need to create copies of mScs. It would be better to use a reference counter system. - - ODDecodeFFmpegTask* clone = new ODDecodeFFmpegTask((void*)mScs,mNumStreams,mChannels,mFormatContext,mStreamIndex); + auto clone = std::make_unique(mScs, Streams{ mChannels }, mFormatContext, mStreamIndex); clone->mDemandSample=GetDemandSample(); //the decoders and blockfiles should not be copied. They are created as the task runs. - return clone; + return clone.release(); } ///Creates an ODFileDecoder that decodes a file of filetype the subclass handles. @@ -140,7 +155,9 @@ ODTask* ODDecodeFFmpegTask::Clone() ODFileDecoder* ODDecodeFFmpegTask::CreateFileDecoder(const wxString & fileName) { // Open the file for import - ODFFmpegDecoder *decoder = new ODFFmpegDecoder(fileName, (streamContext**) mScs,mNumStreams,mChannels,(AVFormatContext*)mFormatContext, mStreamIndex); + ODFFmpegDecoder *decoder = + new ODFFmpegDecoder(fileName, mScs, ODDecodeFFmpegTask::Streams{ mChannels }, + (AVFormatContext*)mFormatContext, mStreamIndex); mDecoders.push_back(decoder); return decoder; @@ -219,10 +236,13 @@ test_failed: //------ ODDecodeFFmpegFileDecoder -ODFFmpegDecoder::ODFFmpegDecoder(const wxString & fileName, streamContext** scs,int numStreams,WaveTrack*** channels, AVFormatContext* formatContext, int streamIndex) +ODFFmpegDecoder::ODFFmpegDecoder(const wxString & fileName, + const ScsPtr &scs, + ODDecodeFFmpegTask::Streams &&channels, + AVFormatContext* formatContext, + int streamIndex) :ODFileDecoder(fileName), //mSamplesDone(0), -mNumStreams(numStreams), mScs(scs), mFormatContext(formatContext), mNumSamplesInCache(0), @@ -233,22 +253,14 @@ mStreamIndex(streamIndex) PickFFmpegLibs(); //do a shallow copy of the 2d array. - mChannels = new WaveTrack **[mNumStreams]; + mChannels = std::move(channels); - for (int s = 0; s < mNumStreams; s++) - { - mChannels[s] = new WaveTrack *[mScs[s]->m_stream->codec->channels]; - int c; - for (c = 0; c < mScs[s]->m_stream->codec->channels; c++) - { - mChannels[s][c] = channels[s][c]; - } - } // get the current stream start time. int64_t stream_delay = 0; - if (mScs[streamIndex]->m_stream->start_time != int64_t(AV_NOPTS_VALUE) && - mScs[streamIndex]->m_stream->start_time > 0) { - stream_delay = mScs[streamIndex]->m_stream->start_time; + const auto sc = mScs->get()[streamIndex].get(); + if (sc->m_stream->start_time != int64_t(AV_NOPTS_VALUE) && + sc->m_stream->start_time > 0) { + stream_delay = sc->m_stream->start_time; } mCurrentPos = double(stream_delay) / AV_TIME_BASE; @@ -265,15 +277,6 @@ ODFFmpegDecoder::~ODFFmpegDecoder() av_log_set_callback(av_log_default_callback); } - for (int i = 0; i < mNumStreams; i++) - { - if (mScs[i]->m_decodedAudioSamples != NULL) - av_free(mScs[i]->m_decodedAudioSamples); - - delete mScs[i]; - } - free(mScs); - //DELETE our caches. while(mDecodeCache.size()) { @@ -282,12 +285,6 @@ ODFFmpegDecoder::~ODFFmpegDecoder() mDecodeCache.erase(mDecodeCache.begin()); } - //free the channel pointer arrays - for (int s = 0; s < mNumStreams; s++) - { - delete[] mChannels[s]; - } - delete[] mChannels; DropFFmpegLibs(); } @@ -298,7 +295,9 @@ ODFFmpegDecoder::~ODFFmpegDecoder() #define kMaxSeekRewindAttempts 8 int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCount start, sampleCount len, unsigned int channel) { - format = mScs[mStreamIndex]->m_osamplefmt; + auto scs = mScs->get(); + auto sci = scs[mStreamIndex].get(); + format = sci->m_osamplefmt; data.Allocate(len, format); samplePtr bufStart = data.ptr(); @@ -315,7 +314,7 @@ int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCo bool seeking = false; //look at the decoding timestamp and see if the next sample that will be decoded is not the next sample we need. if(len && (mCurrentPos > start + len || mCurrentPos + kDecodeSampleAllowance < start ) && SeekingAllowed()) { - sc = mScs[mStreamIndex]; + sc = sci; AVStream* st = sc->m_stream; int stindex = -1; uint64_t targetts; @@ -404,9 +403,9 @@ int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCo // 16 bit int out. // 32 bit int, float, double mean float out. if (format == int16Sample) - cache->samplefmt = SAMPLE_FMT_S16; + cache->samplefmt = AV_SAMPLE_FMT_S16; else - cache->samplefmt = SAMPLE_FMT_FLT; + cache->samplefmt = AV_SAMPLE_FMT_FLT; cache->samplePtr = (uint8_t*) malloc(amt * cache->numChannels * SAMPLE_SIZE(format)); @@ -435,14 +434,15 @@ int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCo // Flush the decoders if we're done. if((!sc || sc == (streamContext*) 1)&& len>0) { - for (int i = 0; i < mNumStreams; i++) + for (int i = 0; i < mChannels.size(); i++) { - if (DecodeFrame(mScs[i], true) == 0) + sc = scs[i].get(); + if (DecodeFrame(sc, true) == 0) { - if (mScs[i]->m_pktValid) + if (sc->m_pktValid) { - av_free_packet(&mScs[i]->m_pkt); - mScs[i]->m_pktValid = 0; + av_free_packet(&sc->m_pkt); + sc->m_pktValid = 0; } } } @@ -530,27 +530,27 @@ int ODFFmpegDecoder::FillDataFromCache(samplePtr & data, sampleFormat outFormat, inIndex = (hitStartInCache + j) * nChannels + channel; switch (mDecodeCache[i]->samplefmt) { - case SAMPLE_FMT_U8: + case AV_SAMPLE_FMT_U8: //printf("u8 in %llu out %llu cachelen %llu outLen %llu\n", inIndex, outIndex, mDecodeCache[i]->len, len); ((int16_t *)outBuf)[outIndex] = (int16_t) (((uint8_t*)mDecodeCache[i]->samplePtr)[inIndex] - 0x80) << 8; break; - case SAMPLE_FMT_S16: + case AV_SAMPLE_FMT_S16: //printf("u16 in %llu out %llu cachelen %llu outLen %llu\n", inIndex, outIndex, mDecodeCache[i]->len, len); ((int16_t *)outBuf)[outIndex] = ((int16_t*)mDecodeCache[i]->samplePtr)[inIndex]; break; - case SAMPLE_FMT_S32: + case AV_SAMPLE_FMT_S32: //printf("s32 in %llu out %llu cachelen %llu outLen %llu\n", inIndex, outIndex, mDecodeCache[i]->len, len); ((float *)outBuf)[outIndex] = (float) ((int32_t*)mDecodeCache[i]->samplePtr)[inIndex] * (1.0 / (1 << 31)); break; - case SAMPLE_FMT_FLT: + case AV_SAMPLE_FMT_FLT: //printf("f in %llu out %llu cachelen %llu outLen %llu\n", inIndex, outIndex, mDecodeCache[i]->len, len); ((float *)outBuf)[outIndex] = (float) ((float*)mDecodeCache[i]->samplePtr)[inIndex]; break; - case SAMPLE_FMT_DBL: + case AV_SAMPLE_FMT_DBL: //printf("dbl in %llu out %llu cachelen %llu outLen %llu\n", inIndex, outIndex, mDecodeCache[i]->len, len); ((float *)outBuf)[outIndex] = (float) ((double*)mDecodeCache[i]->samplePtr)[inIndex]; break; @@ -588,7 +588,11 @@ int ODFFmpegDecoder::FillDataFromCache(samplePtr & data, sampleFormat outFormat, //get the right stream pointer. streamContext* ODFFmpegDecoder::ReadNextFrame() { - return import_ffmpeg_read_next_frame(mFormatContext, mScs, mNumStreams); + // Get pointer to array of contiguous unique_ptrs + auto scs = mScs->get(); + // This reinterpret_cast to array of plain pointers is innocent + return import_ffmpeg_read_next_frame + (mFormatContext, reinterpret_cast(scs), mChannels.size()); } diff --git a/src/ondemand/ODDecodeFFmpegTask.h b/src/ondemand/ODDecodeFFmpegTask.h index d6a30e5bf..1c502a1f5 100644 --- a/src/ondemand/ODDecodeFFmpegTask.h +++ b/src/ondemand/ODDecodeFFmpegTask.h @@ -10,6 +10,7 @@ ******************************************************************/ #include "../Experimental.h" +#include "../MemoryX.h" #ifdef EXPERIMENTAL_OD_FFMPEG @@ -25,13 +26,17 @@ class WaveTrack; /// A class representing a modular task to be used with the On-Demand structures. class ODDecodeFFmpegTask final : public ODDecodeTask { - public: +public: + using Channels = std::vector < WaveTrack* >; + using Streams = std::vector < Channels >; + + static Streams FromList(const std::list &channels); /// Constructs an ODTask - ODDecodeFFmpegTask(void* scs,int numStreams, WaveTrack*** channels, void* formatContext, int streamIndex); + ODDecodeFFmpegTask(const ScsPtr &scs, Streams &&channels, void* formatContext, int streamIndex); virtual ~ODDecodeFFmpegTask(); - ODTask* Clone() override; + ODTask *Clone() override; ///Creates an ODFileDecoder that decodes a file of filetype the subclass handles. ODFileDecoder* CreateFileDecoder(const wxString & fileName) override; @@ -39,10 +44,11 @@ class ODDecodeFFmpegTask final : public ODDecodeTask ///Subclasses should override to return respective type. unsigned int GetODType() override {return eODFFMPEG;} - protected: - WaveTrack*** mChannels; - int mNumStreams; - void* mScs; +protected: + // non-owning pointers to WaveTracks: + Streams mChannels; + + ScsPtr mScs; void* mFormatContext; int mStreamIndex; };