1
0
mirror of https://github.com/cookiengineer/audacity synced 2025-08-16 08:34:10 +02:00

RealtimeEffectState into RealtimeEffectManager...

... which doesn't need Effect.h then.

Freeing Effect and RealtimeEffectManager from cycles, leaving 25 in the big
s.c.c.
This commit is contained in:
Paul Licameli 2019-06-23 16:31:47 -04:00
parent 5caeaf520b
commit cd9e4e3987
3 changed files with 244 additions and 244 deletions

View File

@ -472,20 +472,6 @@ bool Effect::RealtimeFinalize()
return false; return false;
} }
RealtimeEffectState::RealtimeEffectState( EffectClientInterface &effect )
: mEffect{ effect }
{
}
bool RealtimeEffectState::RealtimeSuspend()
{
auto result = mEffect.RealtimeSuspend();
if ( result ) {
mRealtimeSuspendCount++;
}
return result;
}
bool Effect::RealtimeSuspend() bool Effect::RealtimeSuspend()
{ {
if (mClient) if (mClient)
@ -494,15 +480,6 @@ bool Effect::RealtimeSuspend()
return true; return true;
} }
bool RealtimeEffectState::RealtimeResume()
{
auto result = mEffect.RealtimeResume();
if ( result ) {
mRealtimeSuspendCount--;
}
return result;
}
bool Effect::RealtimeResume() bool Effect::RealtimeResume()
{ {
if (mClient) if (mClient)
@ -2294,201 +2271,6 @@ double Effect::CalcPreviewInputLength(double previewLength)
return previewLength; return previewLength;
} }
// RealtimeAddProcessor and RealtimeProcess use the same method of
// determining the current processor index, so updates to one should
// be reflected in the other.
bool RealtimeEffectState::RealtimeAddProcessor(int group, unsigned chans, float rate)
{
auto ichans = chans;
auto ochans = chans;
auto gchans = chans;
// Reset processor index
if (group == 0)
{
mCurrentProcessor = 0;
mGroupProcessor.clear();
}
// Remember the processor starting index
mGroupProcessor.push_back(mCurrentProcessor);
const auto numAudioIn = mEffect.GetAudioInCount();
const auto numAudioOut = mEffect.GetAudioOutCount();
// Call the client until we run out of input or output channels
while (ichans > 0 && ochans > 0)
{
// If we don't have enough input channels to accomodate the client's
// requirements, then we replicate the input channels until the
// client's needs are met.
if (ichans < numAudioIn)
{
// All input channels have been consumed
ichans = 0;
}
// Otherwise fullfil the client's needs with as many input channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ichans >= numAudioIn)
{
gchans = numAudioIn;
ichans -= gchans;
}
// If we don't have enough output channels to accomodate the client's
// requirements, then we provide all of the output channels and fulfill
// the client's needs with dummy buffers. These will just get tossed.
if (ochans < numAudioOut)
{
// All output channels have been consumed
ochans = 0;
}
// Otherwise fullfil the client's needs with as many output channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ochans >= numAudioOut)
{
ochans -= numAudioOut;
}
// Add a NEW processor
mEffect.RealtimeAddProcessor(gchans, rate);
// Bump to next processor
mCurrentProcessor++;
}
return true;
}
// RealtimeAddProcessor and RealtimeProcess use the same method of
// determining the current processor group, so updates to one should
// be reflected in the other.
size_t RealtimeEffectState::RealtimeProcess(int group,
unsigned chans,
float **inbuf,
float **outbuf,
size_t numSamples)
{
//
// The caller passes the number of channels to process and specifies
// the number of input and output buffers. There will always be the
// same number of output buffers as there are input buffers.
//
// Effects always require a certain number of input and output buffers,
// so if the number of channels we're curently processing are different
// than what the effect expects, then we use a few methods of satisfying
// the effects requirements.
const auto numAudioIn = mEffect.GetAudioInCount();
const auto numAudioOut = mEffect.GetAudioOutCount();
float **clientIn = (float **) alloca(numAudioIn * sizeof(float *));
float **clientOut = (float **) alloca(numAudioOut * sizeof(float *));
float *dummybuf = (float *) alloca(numSamples * sizeof(float));
decltype(numSamples) len = 0;
auto ichans = chans;
auto ochans = chans;
auto gchans = chans;
unsigned indx = 0;
unsigned ondx = 0;
int processor = mGroupProcessor[group];
// Call the client until we run out of input or output channels
while (ichans > 0 && ochans > 0)
{
// If we don't have enough input channels to accomodate the client's
// requirements, then we replicate the input channels until the
// client's needs are met.
if (ichans < numAudioIn)
{
for (size_t i = 0; i < numAudioIn; i++)
{
if (indx == ichans)
{
indx = 0;
}
clientIn[i] = inbuf[indx++];
}
// All input channels have been consumed
ichans = 0;
}
// Otherwise fullfil the client's needs with as many input channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ichans >= numAudioIn)
{
gchans = 0;
for (size_t i = 0; i < numAudioIn; i++, ichans--, gchans++)
{
clientIn[i] = inbuf[indx++];
}
}
// If we don't have enough output channels to accomodate the client's
// requirements, then we provide all of the output channels and fulfill
// the client's needs with dummy buffers. These will just get tossed.
if (ochans < numAudioOut)
{
for (size_t i = 0; i < numAudioOut; i++)
{
if (i < ochans)
{
clientOut[i] = outbuf[i];
}
else
{
clientOut[i] = dummybuf;
}
}
// All output channels have been consumed
ochans = 0;
}
// Otherwise fullfil the client's needs with as many output channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ochans >= numAudioOut)
{
for (size_t i = 0; i < numAudioOut; i++, ochans--)
{
clientOut[i] = outbuf[ondx++];
}
}
// Finally call the plugin to process the block
len = 0;
const auto blockSize = mEffect.GetBlockSize();
for (decltype(numSamples) block = 0; block < numSamples; block += blockSize)
{
auto cnt = std::min(numSamples - block, blockSize);
len += mEffect.RealtimeProcess(processor, clientIn, clientOut, cnt);
for (size_t i = 0 ; i < numAudioIn; i++)
{
clientIn[i] += cnt;
}
for (size_t i = 0 ; i < numAudioOut; i++)
{
clientOut[i] += cnt;
}
}
// Bump to next processor
processor++;
}
return len;
}
bool RealtimeEffectState::IsRealtimeActive()
{
return mRealtimeSuspendCount == 0;
}
bool Effect::IsHidden() bool Effect::IsHidden()
{ {
return false; return false;

View File

@ -16,7 +16,6 @@
#include "../Experimental.h" #include "../Experimental.h"
#include <atomic>
#include <set> #include <set>
#include <wx/defs.h> #include <wx/defs.h>
@ -547,30 +546,6 @@ private:
friend class EffectPresetsDialog; friend class EffectPresetsDialog;
}; };
class RealtimeEffectState
{
public:
explicit RealtimeEffectState( EffectClientInterface &effect );
EffectClientInterface &GetEffect() const { return mEffect; }
bool RealtimeSuspend();
bool RealtimeResume();
bool RealtimeAddProcessor(int group, unsigned chans, float rate);
size_t RealtimeProcess(int group,
unsigned chans, float **inbuf, float **outbuf, size_t numSamples);
bool IsRealtimeActive();
private:
EffectClientInterface &mEffect;
std::vector<int> mGroupProcessor;
int mCurrentProcessor;
std::atomic<int> mRealtimeSuspendCount{ 1 }; // Effects are initially suspended
};
// FIXME: // FIXME:
// FIXME: Remove this once all effects are using the NEW dialog // FIXME: Remove this once all effects are using the NEW dialog
// FIXME: // FIXME:

View File

@ -13,10 +13,35 @@
#include "../Experimental.h" #include "../Experimental.h"
#include "Effect.h" #include "audacity/EffectInterface.h"
#include "MemoryX.h"
#include <atomic>
#include <wx/time.h> #include <wx/time.h>
class RealtimeEffectState
{
public:
explicit RealtimeEffectState( EffectClientInterface &effect );
EffectClientInterface &GetEffect() const { return mEffect; }
bool RealtimeSuspend();
bool RealtimeResume();
bool RealtimeAddProcessor(int group, unsigned chans, float rate);
size_t RealtimeProcess(int group,
unsigned chans, float **inbuf, float **outbuf, size_t numSamples);
bool IsRealtimeActive();
private:
EffectClientInterface &mEffect;
std::vector<int> mGroupProcessor;
int mCurrentProcessor;
std::atomic<int> mRealtimeSuspendCount{ 1 }; // Effects are initially suspended
};
RealtimeEffectManager & RealtimeEffectManager::Get() RealtimeEffectManager & RealtimeEffectManager::Get()
{ {
static RealtimeEffectManager rem; static RealtimeEffectManager rem;
@ -356,3 +381,221 @@ int RealtimeEffectManager::GetRealtimeLatency()
{ {
return mRealtimeLatency; return mRealtimeLatency;
} }
RealtimeEffectState::RealtimeEffectState( EffectClientInterface &effect )
: mEffect{ effect }
{
}
bool RealtimeEffectState::RealtimeSuspend()
{
auto result = mEffect.RealtimeSuspend();
if ( result ) {
mRealtimeSuspendCount++;
}
return result;
}
bool RealtimeEffectState::RealtimeResume()
{
auto result = mEffect.RealtimeResume();
if ( result ) {
mRealtimeSuspendCount--;
}
return result;
}
// RealtimeAddProcessor and RealtimeProcess use the same method of
// determining the current processor index, so updates to one should
// be reflected in the other.
bool RealtimeEffectState::RealtimeAddProcessor(int group, unsigned chans, float rate)
{
auto ichans = chans;
auto ochans = chans;
auto gchans = chans;
// Reset processor index
if (group == 0)
{
mCurrentProcessor = 0;
mGroupProcessor.clear();
}
// Remember the processor starting index
mGroupProcessor.push_back(mCurrentProcessor);
const auto numAudioIn = mEffect.GetAudioInCount();
const auto numAudioOut = mEffect.GetAudioOutCount();
// Call the client until we run out of input or output channels
while (ichans > 0 && ochans > 0)
{
// If we don't have enough input channels to accomodate the client's
// requirements, then we replicate the input channels until the
// client's needs are met.
if (ichans < numAudioIn)
{
// All input channels have been consumed
ichans = 0;
}
// Otherwise fullfil the client's needs with as many input channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ichans >= numAudioIn)
{
gchans = numAudioIn;
ichans -= gchans;
}
// If we don't have enough output channels to accomodate the client's
// requirements, then we provide all of the output channels and fulfill
// the client's needs with dummy buffers. These will just get tossed.
if (ochans < numAudioOut)
{
// All output channels have been consumed
ochans = 0;
}
// Otherwise fullfil the client's needs with as many output channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ochans >= numAudioOut)
{
ochans -= numAudioOut;
}
// Add a NEW processor
mEffect.RealtimeAddProcessor(gchans, rate);
// Bump to next processor
mCurrentProcessor++;
}
return true;
}
// RealtimeAddProcessor and RealtimeProcess use the same method of
// determining the current processor group, so updates to one should
// be reflected in the other.
size_t RealtimeEffectState::RealtimeProcess(int group,
unsigned chans,
float **inbuf,
float **outbuf,
size_t numSamples)
{
//
// The caller passes the number of channels to process and specifies
// the number of input and output buffers. There will always be the
// same number of output buffers as there are input buffers.
//
// Effects always require a certain number of input and output buffers,
// so if the number of channels we're curently processing are different
// than what the effect expects, then we use a few methods of satisfying
// the effects requirements.
const auto numAudioIn = mEffect.GetAudioInCount();
const auto numAudioOut = mEffect.GetAudioOutCount();
float **clientIn = (float **) alloca(numAudioIn * sizeof(float *));
float **clientOut = (float **) alloca(numAudioOut * sizeof(float *));
float *dummybuf = (float *) alloca(numSamples * sizeof(float));
decltype(numSamples) len = 0;
auto ichans = chans;
auto ochans = chans;
auto gchans = chans;
unsigned indx = 0;
unsigned ondx = 0;
int processor = mGroupProcessor[group];
// Call the client until we run out of input or output channels
while (ichans > 0 && ochans > 0)
{
// If we don't have enough input channels to accomodate the client's
// requirements, then we replicate the input channels until the
// client's needs are met.
if (ichans < numAudioIn)
{
for (size_t i = 0; i < numAudioIn; i++)
{
if (indx == ichans)
{
indx = 0;
}
clientIn[i] = inbuf[indx++];
}
// All input channels have been consumed
ichans = 0;
}
// Otherwise fullfil the client's needs with as many input channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ichans >= numAudioIn)
{
gchans = 0;
for (size_t i = 0; i < numAudioIn; i++, ichans--, gchans++)
{
clientIn[i] = inbuf[indx++];
}
}
// If we don't have enough output channels to accomodate the client's
// requirements, then we provide all of the output channels and fulfill
// the client's needs with dummy buffers. These will just get tossed.
if (ochans < numAudioOut)
{
for (size_t i = 0; i < numAudioOut; i++)
{
if (i < ochans)
{
clientOut[i] = outbuf[i];
}
else
{
clientOut[i] = dummybuf;
}
}
// All output channels have been consumed
ochans = 0;
}
// Otherwise fullfil the client's needs with as many output channels as possible.
// After calling the client with this set, we will loop back up to process more
// of the input/output channels.
else if (ochans >= numAudioOut)
{
for (size_t i = 0; i < numAudioOut; i++, ochans--)
{
clientOut[i] = outbuf[ondx++];
}
}
// Finally call the plugin to process the block
len = 0;
const auto blockSize = mEffect.GetBlockSize();
for (decltype(numSamples) block = 0; block < numSamples; block += blockSize)
{
auto cnt = std::min(numSamples - block, blockSize);
len += mEffect.RealtimeProcess(processor, clientIn, clientOut, cnt);
for (size_t i = 0 ; i < numAudioIn; i++)
{
clientIn[i] += cnt;
}
for (size_t i = 0 ; i < numAudioOut; i++)
{
clientOut[i] += cnt;
}
}
// Bump to next processor
processor++;
}
return len;
}
bool RealtimeEffectState::IsRealtimeActive()
{
return mRealtimeSuspendCount == 0;
}