mirror of
https://github.com/cookiengineer/audacity
synced 2025-05-06 14:52:34 +02:00
638 lines
17 KiB
C++
638 lines
17 KiB
C++
/**********************************************************************
|
|
|
|
Audacity: A Digital Audio Editor
|
|
|
|
RealtimeEffectManager.cpp
|
|
|
|
Paul Licameli split from EffectManager.cpp
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
#include "RealtimeEffectManager.h"
|
|
|
|
#include "tenacity/EffectInterface.h"
|
|
#include <memory>
|
|
|
|
#include <atomic>
|
|
#include <wx/time.h>
|
|
|
|
class RealtimeEffectState
|
|
{
|
|
public:
|
|
explicit RealtimeEffectState( EffectClientInterface &effect );
|
|
|
|
EffectClientInterface &GetEffect() const { return mEffect; }
|
|
|
|
bool RealtimeSuspend();
|
|
bool RealtimeResume();
|
|
bool RealtimeAddProcessor(int group, unsigned chans, float rate);
|
|
size_t RealtimeProcess(int group,
|
|
unsigned chans, float **inbuf, float **outbuf, size_t numSamples);
|
|
bool IsRealtimeActive();
|
|
|
|
private:
|
|
EffectClientInterface &mEffect;
|
|
|
|
std::vector<int> mGroupProcessor;
|
|
int mCurrentProcessor;
|
|
|
|
std::atomic<int> mRealtimeSuspendCount{ 1 }; // Effects are initially suspended
|
|
};
|
|
|
|
RealtimeEffectManager & RealtimeEffectManager::Get()
|
|
{
|
|
static RealtimeEffectManager rem;
|
|
return rem;
|
|
}
|
|
|
|
RealtimeEffectManager::RealtimeEffectManager()
|
|
{
|
|
mRealtimeLock.Enter();
|
|
mRealtimeActive = false;
|
|
mRealtimeSuspended = true;
|
|
mRealtimeLatency = 0;
|
|
mRealtimeLock.Leave();
|
|
}
|
|
|
|
RealtimeEffectManager::~RealtimeEffectManager()
|
|
{
|
|
}
|
|
|
|
#if defined(EXPERIMENTAL_EFFECTS_RACK)
|
|
void RealtimeEffectManager::RealtimeSetEffects(const EffectArray & effects)
|
|
{
|
|
// Block RealtimeProcess()
|
|
RealtimeSuspend();
|
|
|
|
decltype( mStates ) newStates;
|
|
auto begin = mStates.begin(), end = mStates.end();
|
|
for ( auto pEffect : effects ) {
|
|
auto found = std::find_if( begin, end,
|
|
[=]( const decltype( mStates )::value_type &state ){
|
|
return state && &state->GetEffect() == pEffect;
|
|
}
|
|
);
|
|
if ( found == end ) {
|
|
// Tell New effect to get ready
|
|
pEffect->RealtimeInitialize();
|
|
newStates.emplace_back(
|
|
std::make_unique< RealtimeEffectState >( *pEffect ) );
|
|
}
|
|
else {
|
|
// Preserve state for effect that remains in the chain
|
|
newStates.emplace_back( std::move( *found ) );
|
|
}
|
|
}
|
|
|
|
// Remaining states that were not moved need to clean up
|
|
for ( auto &state : mStates ) {
|
|
if ( state )
|
|
state->GetEffect().RealtimeFinalize();
|
|
}
|
|
|
|
// Get rid of the old chain
|
|
// And install the NEW one
|
|
mStates.swap( newStates );
|
|
|
|
// Allow RealtimeProcess() to, well, process
|
|
RealtimeResume();
|
|
}
|
|
#endif
|
|
|
|
bool RealtimeEffectManager::RealtimeIsActive()
|
|
{
|
|
return mStates.size() != 0;
|
|
}
|
|
|
|
bool RealtimeEffectManager::RealtimeIsSuspended()
|
|
{
|
|
return mRealtimeSuspended;
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeAddEffect(EffectClientInterface *effect)
|
|
{
|
|
// Block RealtimeProcess()
|
|
RealtimeSuspend();
|
|
|
|
// Add to list of active effects
|
|
mStates.emplace_back( std::make_unique< RealtimeEffectState >( *effect ) );
|
|
auto &state = mStates.back();
|
|
|
|
// Initialize effect if realtime is already active
|
|
if (mRealtimeActive)
|
|
{
|
|
// Initialize realtime processing
|
|
effect->RealtimeInitialize();
|
|
|
|
// Add the required processors
|
|
for (size_t i = 0, cnt = mRealtimeChans.size(); i < cnt; i++)
|
|
{
|
|
state->RealtimeAddProcessor(i, mRealtimeChans[i], mRealtimeRates[i]);
|
|
}
|
|
}
|
|
|
|
|
|
// Allow RealtimeProcess() to, well, process
|
|
RealtimeResume();
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeRemoveEffect(EffectClientInterface *effect)
|
|
{
|
|
// Block RealtimeProcess()
|
|
RealtimeSuspend();
|
|
|
|
if (mRealtimeActive)
|
|
{
|
|
// Cleanup realtime processing
|
|
effect->RealtimeFinalize();
|
|
}
|
|
|
|
// Remove from list of active effects
|
|
auto end = mStates.end();
|
|
auto found = std::find_if( mStates.begin(), end,
|
|
[&](const decltype(mStates)::value_type &state){
|
|
return &state->GetEffect() == effect;
|
|
}
|
|
);
|
|
if (found != end)
|
|
mStates.erase(found);
|
|
|
|
// Allow RealtimeProcess() to, well, process
|
|
RealtimeResume();
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeInitialize(double rate)
|
|
{
|
|
// The audio thread should not be running yet, but protect anyway
|
|
RealtimeSuspend();
|
|
|
|
// (Re)Set processor parameters
|
|
mRealtimeChans.clear();
|
|
mRealtimeRates.clear();
|
|
|
|
// RealtimeAdd/RemoveEffect() needs to know when we're active so it can
|
|
// initialize newly added effects
|
|
mRealtimeActive = true;
|
|
|
|
// Tell each effect to get ready for action
|
|
for (auto &state : mStates) {
|
|
state->GetEffect().SetSampleRate(rate);
|
|
state->GetEffect().RealtimeInitialize();
|
|
}
|
|
|
|
// Get things moving
|
|
RealtimeResume();
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeAddProcessor(int group, unsigned chans, float rate)
|
|
{
|
|
for (auto &state : mStates)
|
|
state->RealtimeAddProcessor(group, chans, rate);
|
|
|
|
mRealtimeChans.push_back(chans);
|
|
mRealtimeRates.push_back(rate);
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeFinalize()
|
|
{
|
|
// Make sure nothing is going on
|
|
RealtimeSuspend();
|
|
|
|
// It is now safe to clean up
|
|
mRealtimeLatency = 0;
|
|
|
|
// Tell each effect to clean up as well
|
|
for (auto &state : mStates)
|
|
state->GetEffect().RealtimeFinalize();
|
|
|
|
// Reset processor parameters
|
|
mRealtimeChans.clear();
|
|
mRealtimeRates.clear();
|
|
|
|
// No longer active
|
|
mRealtimeActive = false;
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeSuspend()
|
|
{
|
|
mRealtimeLock.Enter();
|
|
|
|
// Already suspended...bail
|
|
if (mRealtimeSuspended)
|
|
{
|
|
mRealtimeLock.Leave();
|
|
return;
|
|
}
|
|
|
|
// Show that we aren't going to be doing anything
|
|
mRealtimeSuspended = true;
|
|
|
|
// And make sure the effects don't either
|
|
for (auto &state : mStates)
|
|
state->RealtimeSuspend();
|
|
|
|
mRealtimeLock.Leave();
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeSuspendOne( EffectClientInterface &effect )
|
|
{
|
|
auto begin = mStates.begin(), end = mStates.end();
|
|
auto found = std::find_if( begin, end,
|
|
[&effect]( const decltype( mStates )::value_type &state ){
|
|
return state && &state->GetEffect() == &effect;
|
|
}
|
|
);
|
|
if ( found != end )
|
|
(*found)->RealtimeSuspend();
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeResume()
|
|
{
|
|
mRealtimeLock.Enter();
|
|
|
|
// Already running...bail
|
|
if (!mRealtimeSuspended)
|
|
{
|
|
mRealtimeLock.Leave();
|
|
return;
|
|
}
|
|
|
|
// Tell the effects to get ready for more action
|
|
for (auto &state : mStates)
|
|
state->RealtimeResume();
|
|
|
|
// And we should too
|
|
mRealtimeSuspended = false;
|
|
|
|
mRealtimeLock.Leave();
|
|
}
|
|
|
|
void RealtimeEffectManager::RealtimeResumeOne( EffectClientInterface &effect )
|
|
{
|
|
auto begin = mStates.begin(), end = mStates.end();
|
|
auto found = std::find_if( begin, end,
|
|
[&effect]( const decltype( mStates )::value_type &state ){
|
|
return state && &state->GetEffect() == &effect;
|
|
}
|
|
);
|
|
if ( found != end )
|
|
(*found)->RealtimeResume();
|
|
}
|
|
|
|
//
|
|
// This will be called in a different thread than the main GUI thread.
|
|
//
|
|
void RealtimeEffectManager::RealtimeProcessStart()
|
|
{
|
|
// Protect ourselves from the main thread
|
|
mRealtimeLock.Enter();
|
|
|
|
// Can be suspended because of the audio stream being paused or because effects
|
|
// have been suspended.
|
|
if (!mRealtimeSuspended)
|
|
{
|
|
for (auto &state : mStates)
|
|
{
|
|
if (state->IsRealtimeActive())
|
|
state->GetEffect().RealtimeProcessStart();
|
|
}
|
|
}
|
|
|
|
mRealtimeLock.Leave();
|
|
}
|
|
|
|
//
|
|
// This will be called in a different thread than the main GUI thread.
|
|
//
|
|
size_t RealtimeEffectManager::RealtimeProcess(int group, unsigned chans, float **buffers, size_t numSamples)
|
|
{
|
|
|
|
// Allocate the in/out buffer arrays
|
|
auto ibuf = new float* [chans];
|
|
auto obuf = new float* [chans];
|
|
float* temp = safenew float[numSamples];
|
|
|
|
const size_t memcpy_size = numSamples * sizeof(float);
|
|
|
|
// Allocate new output buffers and copy buffer input into newly allocated input buffers
|
|
for (unsigned int i = 0; i < chans; i++) {
|
|
ibuf[i] = new float[numSamples];
|
|
memcpy(ibuf[i], buffers[i], memcpy_size);
|
|
obuf[i] = new float[numSamples];
|
|
}
|
|
|
|
// Protect ourselves from the main thread
|
|
mRealtimeLock.Enter();
|
|
|
|
// Can be suspended because of the audio stream being paused or because effects
|
|
// have been suspended, so in that case do nothing.
|
|
if (!mRealtimeSuspended && !mStates.empty()){
|
|
|
|
|
|
// Remember when we started so we can calculate the amount of latency we
|
|
// are introducing
|
|
wxMilliClock_t start = wxGetUTCTimeMillis();
|
|
|
|
// Now call each effect in the chain while swapping buffer pointers to feed the
|
|
// output of one effect as the input to the next effect
|
|
size_t called = 0;
|
|
for (auto& state : mStates) {
|
|
if (state->IsRealtimeActive()) {
|
|
state->RealtimeProcess(group, chans, ibuf, obuf, numSamples);
|
|
called++;
|
|
}
|
|
|
|
for (size_t j = 0; j < chans; j++) {
|
|
memcpy(temp, ibuf[j], memcpy_size);
|
|
memcpy(ibuf[j], obuf[j], memcpy_size);
|
|
memcpy(obuf[j], temp, memcpy_size);
|
|
}
|
|
}
|
|
|
|
// Once we're done, we might wind up with the last effect storing its results
|
|
// in the temporary buffers. If that's the case, we need to copy it over to
|
|
// the caller's buffers. This happens when the number of effects processed
|
|
// is odd.
|
|
if (called & 1) {
|
|
for (size_t i = 0; i < chans; i++) {
|
|
memcpy(buffers[i], ibuf[i], memcpy_size);
|
|
}
|
|
}
|
|
|
|
// Remember the latency
|
|
mRealtimeLatency = (int)(wxGetUTCTimeMillis() - start).GetValue();
|
|
}
|
|
|
|
mRealtimeLock.Leave();
|
|
|
|
delete[] temp;
|
|
|
|
for (size_t i = 0; i < chans; i++) {
|
|
delete[] obuf[i];
|
|
}
|
|
|
|
delete[] obuf;
|
|
|
|
for (size_t i = 0; i < chans; i++) {
|
|
delete[] ibuf[i];
|
|
}
|
|
|
|
delete[] ibuf;
|
|
|
|
//
|
|
// This is wrong...needs to handle tails
|
|
//
|
|
return numSamples;
|
|
}
|
|
|
|
//
|
|
// This will be called in a different thread than the main GUI thread.
|
|
//
|
|
void RealtimeEffectManager::RealtimeProcessEnd()
|
|
{
|
|
// Protect ourselves from the main thread
|
|
mRealtimeLock.Enter();
|
|
|
|
// Can be suspended because of the audio stream being paused or because effects
|
|
// have been suspended.
|
|
if (!mRealtimeSuspended)
|
|
{
|
|
for (auto &state : mStates)
|
|
{
|
|
if (state->IsRealtimeActive())
|
|
state->GetEffect().RealtimeProcessEnd();
|
|
}
|
|
}
|
|
|
|
mRealtimeLock.Leave();
|
|
}
|
|
|
|
int RealtimeEffectManager::GetRealtimeLatency()
|
|
{
|
|
return mRealtimeLatency;
|
|
}
|
|
|
|
RealtimeEffectState::RealtimeEffectState( EffectClientInterface &effect )
|
|
: mEffect{ effect }
|
|
{
|
|
}
|
|
|
|
bool RealtimeEffectState::RealtimeSuspend()
|
|
{
|
|
auto result = mEffect.RealtimeSuspend();
|
|
if ( result ) {
|
|
mRealtimeSuspendCount++;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
bool RealtimeEffectState::RealtimeResume()
|
|
{
|
|
auto result = mEffect.RealtimeResume();
|
|
if ( result ) {
|
|
mRealtimeSuspendCount--;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// RealtimeAddProcessor and RealtimeProcess use the same method of
|
|
// determining the current processor index, so updates to one should
|
|
// be reflected in the other.
|
|
bool RealtimeEffectState::RealtimeAddProcessor(int group, unsigned chans, float rate)
|
|
{
|
|
auto ichans = chans;
|
|
auto ochans = chans;
|
|
auto gchans = chans;
|
|
|
|
// Reset processor index
|
|
if (group == 0)
|
|
{
|
|
mCurrentProcessor = 0;
|
|
mGroupProcessor.clear();
|
|
}
|
|
|
|
// Remember the processor starting index
|
|
mGroupProcessor.push_back(mCurrentProcessor);
|
|
|
|
const auto numAudioIn = mEffect.GetAudioInCount();
|
|
const auto numAudioOut = mEffect.GetAudioOutCount();
|
|
|
|
// Call the client until we run out of input or output channels
|
|
while (ichans > 0 && ochans > 0)
|
|
{
|
|
// If we don't have enough input channels to accommodate the client's
|
|
// requirements, then we replicate the input channels until the
|
|
// client's needs are met.
|
|
if (ichans < numAudioIn)
|
|
{
|
|
// All input channels have been consumed
|
|
ichans = 0;
|
|
}
|
|
// Otherwise fulfill the client's needs with as many input channels as possible.
|
|
// After calling the client with this set, we will loop back up to process more
|
|
// of the input/output channels.
|
|
else if (ichans >= numAudioIn)
|
|
{
|
|
gchans = numAudioIn;
|
|
ichans -= gchans;
|
|
}
|
|
|
|
// If we don't have enough output channels to accommodate the client's
|
|
// requirements, then we provide all of the output channels and fulfill
|
|
// the client's needs with dummy buffers. These will just get tossed.
|
|
if (ochans < numAudioOut)
|
|
{
|
|
// All output channels have been consumed
|
|
ochans = 0;
|
|
}
|
|
// Otherwise fulfill the client's needs with as many output channels as possible.
|
|
// After calling the client with this set, we will loop back up to process more
|
|
// of the input/output channels.
|
|
else if (ochans >= numAudioOut)
|
|
{
|
|
ochans -= numAudioOut;
|
|
}
|
|
|
|
// Add a NEW processor
|
|
mEffect.RealtimeAddProcessor(gchans, rate);
|
|
|
|
// Bump to next processor
|
|
mCurrentProcessor++;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
// RealtimeAddProcessor and RealtimeProcess use the same method of
|
|
// determining the current processor group, so updates to one should
|
|
// be reflected in the other.
|
|
size_t RealtimeEffectState::RealtimeProcess(int group,
|
|
unsigned chans,
|
|
float **inbuf,
|
|
float **outbuf,
|
|
size_t numSamples)
|
|
{
|
|
//
|
|
// The caller passes the number of channels to process and specifies
|
|
// the number of input and output buffers. There will always be the
|
|
// same number of output buffers as there are input buffers.
|
|
//
|
|
// Effects always require a certain number of input and output buffers,
|
|
// so if the number of channels we're currently processing are different
|
|
// than what the effect expects, then we use a few methods of satisfying
|
|
// the effects requirements.
|
|
const auto numAudioIn = mEffect.GetAudioInCount();
|
|
const auto numAudioOut = mEffect.GetAudioOutCount();
|
|
|
|
auto clientIn = safenew float* [numAudioIn];
|
|
auto clientOut = safenew float* [numAudioOut];
|
|
auto dummybuf = safenew float [numSamples];
|
|
|
|
decltype(numSamples) len = 0;
|
|
auto ichans = chans;
|
|
auto ochans = chans;
|
|
auto gchans = chans;
|
|
unsigned indx = 0;
|
|
unsigned ondx = 0;
|
|
|
|
int processor = mGroupProcessor[group];
|
|
|
|
// Call the client until we run out of input or output channels
|
|
while (ichans > 0 && ochans > 0)
|
|
{
|
|
// If we don't have enough input channels to accommodate the client's
|
|
// requirements, then we replicate the input channels until the
|
|
// client's needs are met.
|
|
if (ichans < numAudioIn)
|
|
{
|
|
for (size_t i = 0; i < numAudioIn; i++)
|
|
{
|
|
if (indx == ichans)
|
|
{
|
|
indx = 0;
|
|
}
|
|
clientIn[i] = inbuf[indx++];
|
|
}
|
|
|
|
// All input channels have been consumed
|
|
ichans = 0;
|
|
}
|
|
// Otherwise fulfill the client's needs with as many input channels as possible.
|
|
// After calling the client with this set, we will loop back up to process more
|
|
// of the input/output channels.
|
|
else if (ichans >= numAudioIn)
|
|
{
|
|
gchans = 0;
|
|
for (size_t i = 0; i < numAudioIn; i++, ichans--, gchans++)
|
|
{
|
|
clientIn[i] = inbuf[indx++];
|
|
}
|
|
}
|
|
|
|
// If we don't have enough output channels to accommodate the client's
|
|
// requirements, then we provide all of the output channels and fulfill
|
|
// the client's needs with dummy buffers. These will just get tossed.
|
|
if (ochans < numAudioOut)
|
|
{
|
|
for (size_t i = 0; i < numAudioOut; i++)
|
|
{
|
|
if (i < ochans)
|
|
{
|
|
clientOut[i] = outbuf[i];
|
|
}
|
|
else
|
|
{
|
|
clientOut[i] = dummybuf;
|
|
}
|
|
}
|
|
|
|
// All output channels have been consumed
|
|
ochans = 0;
|
|
}
|
|
// Otherwise fulfill the client's needs with as many output channels as possible.
|
|
// After calling the client with this set, we will loop back up to process more
|
|
// of the input/output channels.
|
|
else if (ochans >= numAudioOut)
|
|
{
|
|
for (size_t i = 0; i < numAudioOut; i++, ochans--)
|
|
{
|
|
clientOut[i] = outbuf[ondx++];
|
|
}
|
|
}
|
|
|
|
// Finally call the plugin to process the block
|
|
len = 0;
|
|
const auto blockSize = mEffect.GetBlockSize();
|
|
for (decltype(numSamples) block = 0; block < numSamples; block += blockSize)
|
|
{
|
|
auto cnt = std::min(numSamples - block, blockSize);
|
|
len += mEffect.RealtimeProcess(processor, clientIn, clientOut, cnt);
|
|
|
|
for (size_t i = 0 ; i < numAudioIn; i++)
|
|
{
|
|
clientIn[i] += cnt;
|
|
}
|
|
|
|
for (size_t i = 0 ; i < numAudioOut; i++)
|
|
{
|
|
clientOut[i] += cnt;
|
|
}
|
|
}
|
|
|
|
// Bump to next processor
|
|
processor++;
|
|
}
|
|
|
|
delete[] clientIn;
|
|
delete[] clientOut;
|
|
delete[] dummybuf;
|
|
|
|
return len;
|
|
}
|
|
|
|
bool RealtimeEffectState::IsRealtimeActive()
|
|
{
|
|
return mRealtimeSuspendCount == 0;
|
|
}
|