1
0
mirror of https://github.com/cookiengineer/audacity synced 2025-08-09 16:41:14 +02:00

Put callback in a member function, remove many more gAudioIO->

This commit is contained in:
Paul Licameli 2018-08-07 10:16:07 -04:00
parent fc497368a5
commit fc8783bc74
2 changed files with 188 additions and 174 deletions

View File

@ -4729,9 +4729,16 @@ static void DoSoftwarePlaythrough(const void *inputBuffer,
int audacityAudioCallback(const void *inputBuffer, void *outputBuffer, int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer, unsigned long framesPerBuffer,
// If there were more of these conditionally used arguments, it const PaStreamCallbackTimeInfo *timeInfo,
// could make sense to make a NEW macro that looks like this: const PaStreamCallbackFlags statusFlags, void *userData )
// USEDIF( EXPERIMENTAL_MIDI_OUT, timeInfo ) {
return gAudioIO->AudioCallback(
inputBuffer, outputBuffer, framesPerBuffer,
timeInfo, statusFlags, userData);
}
int AudioIO::AudioCallback(const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer,
#ifdef EXPERIMENTAL_MIDI_OUT #ifdef EXPERIMENTAL_MIDI_OUT
const PaStreamCallbackTimeInfo *timeInfo, const PaStreamCallbackTimeInfo *timeInfo,
#else #else
@ -4739,9 +4746,9 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
#endif #endif
const PaStreamCallbackFlags statusFlags, void * WXUNUSED(userData) ) const PaStreamCallbackFlags statusFlags, void * WXUNUSED(userData) )
{ {
auto numPlaybackChannels = gAudioIO->mNumPlaybackChannels; auto numPlaybackChannels = mNumPlaybackChannels;
auto numPlaybackTracks = gAudioIO->mPlaybackTracks.size(); auto numPlaybackTracks = mPlaybackTracks.size();
auto numCaptureChannels = gAudioIO->mNumCaptureChannels; auto numCaptureChannels = mNumCaptureChannels;
int callbackReturn = paContinue; int callbackReturn = paContinue;
void *tempBuffer = alloca(framesPerBuffer*sizeof(float)* void *tempBuffer = alloca(framesPerBuffer*sizeof(float)*
MAX(numCaptureChannels,numPlaybackChannels)); MAX(numCaptureChannels,numPlaybackChannels));
@ -4750,29 +4757,29 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// output meter may need samples untouched by volume emulation // output meter may need samples untouched by volume emulation
float *outputMeterFloats; float *outputMeterFloats;
outputMeterFloats = outputMeterFloats =
(outputBuffer && gAudioIO->mEmulateMixerOutputVol && (outputBuffer && mEmulateMixerOutputVol &&
gAudioIO->mMixerOutputVol != 1.0) ? mMixerOutputVol != 1.0) ?
(float *)alloca(framesPerBuffer*numPlaybackChannels * sizeof(float)) : (float *)alloca(framesPerBuffer*numPlaybackChannels * sizeof(float)) :
(float *)outputBuffer; (float *)outputBuffer;
#ifdef EXPERIMENTAL_MIDI_OUT #ifdef EXPERIMENTAL_MIDI_OUT
if (gAudioIO->mCallbackCount++ == 0) { if (mCallbackCount++ == 0) {
// This is effectively mSystemMinusAudioTime when the buffer is empty: // This is effectively mSystemMinusAudioTime when the buffer is empty:
gAudioIO->mStartTime = SystemTime(gAudioIO->mUsingAlsa) - gAudioIO->mT0; mStartTime = SystemTime(mUsingAlsa) - mT0;
// later, mStartTime - mSystemMinusAudioTime will tell us latency // later, mStartTime - mSystemMinusAudioTime will tell us latency
} }
/* GSW: Save timeInfo in case MidiPlayback needs it */ /* GSW: Save timeInfo in case MidiPlayback needs it */
gAudioIO->mAudioCallbackClockTime = PaUtil_GetTime(); mAudioCallbackClockTime = PaUtil_GetTime();
/* for Linux, estimate a smooth audio time as a slowly-changing /* for Linux, estimate a smooth audio time as a slowly-changing
offset from system time */ offset from system time */
// rnow is system time as a double to simplify math // rnow is system time as a double to simplify math
double rnow = SystemTime(gAudioIO->mUsingAlsa); double rnow = SystemTime(mUsingAlsa);
// anow is next-sample-to-be-computed audio time as a double // anow is next-sample-to-be-computed audio time as a double
double anow = gAudioIO->AudioTime(); double anow = AudioTime();
if (gAudioIO->mUsingAlsa) { if (mUsingAlsa) {
// timeInfo's fields are not all reliable. // timeInfo's fields are not all reliable.
// enow is audio time estimated from our clock synchronization protocol, // enow is audio time estimated from our clock synchronization protocol,
@ -4784,54 +4791,54 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// so we are using enow to smooth out this jitter, in fact to < 1ms.) // so we are using enow to smooth out this jitter, in fact to < 1ms.)
// Add worst-case clock drift using previous framesPerBuffer: // Add worst-case clock drift using previous framesPerBuffer:
const auto increase = const auto increase =
gAudioIO->mAudioFramesPerBuffer * 0.0002 / gAudioIO->mRate; mAudioFramesPerBuffer * 0.0002 / mRate;
gAudioIO->mSystemMinusAudioTime += increase; mSystemMinusAudioTime += increase;
gAudioIO->mSystemMinusAudioTimePlusLatency += increase; mSystemMinusAudioTimePlusLatency += increase;
double enow = rnow - gAudioIO->mSystemMinusAudioTime; double enow = rnow - mSystemMinusAudioTime;
// now, use anow instead if it is ahead of enow // now, use anow instead if it is ahead of enow
if (anow > enow) { if (anow > enow) {
gAudioIO->mSystemMinusAudioTime = rnow - anow; mSystemMinusAudioTime = rnow - anow;
// Update our mAudioOutLatency estimate during the first 20 callbacks. // Update our mAudioOutLatency estimate during the first 20 callbacks.
// During this period, the buffer should fill. Once we have a good // During this period, the buffer should fill. Once we have a good
// estimate of mSystemMinusAudioTime (expected in fewer than 20 callbacks) // estimate of mSystemMinusAudioTime (expected in fewer than 20 callbacks)
// we want to stop the updating in case there is clock drift, which would // we want to stop the updating in case there is clock drift, which would
// cause the mAudioOutLatency estimation to drift as well. The clock drift // cause the mAudioOutLatency estimation to drift as well. The clock drift
// in the first 20 callbacks should be negligible, however. // in the first 20 callbacks should be negligible, however.
if (gAudioIO->mCallbackCount < 20) { if (mCallbackCount < 20) {
gAudioIO->mAudioOutLatency = gAudioIO->mStartTime - mAudioOutLatency = mStartTime -
gAudioIO->mSystemMinusAudioTime; mSystemMinusAudioTime;
} }
gAudioIO->mSystemMinusAudioTimePlusLatency = mSystemMinusAudioTimePlusLatency =
gAudioIO->mSystemMinusAudioTime + gAudioIO->mAudioOutLatency; mSystemMinusAudioTime + mAudioOutLatency;
} }
} }
else { else {
// If not using Alsa, rely on timeInfo to have meaningful values that are // If not using Alsa, rely on timeInfo to have meaningful values that are
// more precise than the output latency value reported at stream start. // more precise than the output latency value reported at stream start.
gAudioIO->mSystemMinusAudioTime = rnow - anow; mSystemMinusAudioTime = rnow - anow;
gAudioIO->mSystemMinusAudioTimePlusLatency = mSystemMinusAudioTimePlusLatency =
gAudioIO->mSystemMinusAudioTime + mSystemMinusAudioTime +
(timeInfo->outputBufferDacTime - timeInfo->currentTime); (timeInfo->outputBufferDacTime - timeInfo->currentTime);
} }
gAudioIO->mAudioFramesPerBuffer = framesPerBuffer; mAudioFramesPerBuffer = framesPerBuffer;
if (gAudioIO->IsPaused() if (IsPaused()
// PRL: Why was this added? Was it only because of the mysterious // PRL: Why was this added? Was it only because of the mysterious
// initial leading zeroes, now solved by setting mStreamToken early? // initial leading zeroes, now solved by setting mStreamToken early?
|| gAudioIO->mStreamToken <= 0 || mStreamToken <= 0
) )
gAudioIO->mNumPauseFrames += framesPerBuffer; mNumPauseFrames += framesPerBuffer;
// PRL: Note that when there is a separate MIDI thread, it is effectively // PRL: Note that when there is a separate MIDI thread, it is effectively
// blocked until the first visit to this line during a playback, and will // blocked until the first visit to this line during a playback, and will
// not read gAudioIO->mSystemMinusAudioTimePlusLatency sooner: // not read mSystemMinusAudioTimePlusLatency sooner:
gAudioIO->mNumFrames += framesPerBuffer; mNumFrames += framesPerBuffer;
#ifndef USE_MIDI_THREAD #ifndef USE_MIDI_THREAD
if (gAudioIO->mMidiStream) if (mMidiStream)
gAudioIO->FillMidiBuffers(); FillMidiBuffers();
#endif #endif
#endif #endif
@ -4840,8 +4847,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
/* Send data to recording VU meter if applicable */ /* Send data to recording VU meter if applicable */
if (gAudioIO->mInputMeter && if (mInputMeter &&
!gAudioIO->mInputMeter->IsMeterDisabled() && !mInputMeter->IsMeterDisabled() &&
inputBuffer) { inputBuffer) {
// get here if meters are actually live , and being updated // get here if meters are actually live , and being updated
/* It's critical that we don't update the meters while StopStream is /* It's critical that we don't update the meters while StopStream is
@ -4853,22 +4860,22 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
* is allowed to actually do the updating. * is allowed to actually do the updating.
* Note that mUpdatingMeters must be set first to avoid a race condition. * Note that mUpdatingMeters must be set first to avoid a race condition.
*/ */
gAudioIO->mUpdatingMeters = true; mUpdatingMeters = true;
if (gAudioIO->mUpdateMeters) { if (mUpdateMeters) {
if (gAudioIO->mCaptureFormat == floatSample) if (mCaptureFormat == floatSample)
gAudioIO->mInputMeter->UpdateDisplay(numCaptureChannels, mInputMeter->UpdateDisplay(numCaptureChannels,
framesPerBuffer, framesPerBuffer,
(float *)inputBuffer); (float *)inputBuffer);
else { else {
CopySamples((samplePtr)inputBuffer, gAudioIO->mCaptureFormat, CopySamples((samplePtr)inputBuffer, mCaptureFormat,
(samplePtr)tempFloats, floatSample, (samplePtr)tempFloats, floatSample,
framesPerBuffer * numCaptureChannels); framesPerBuffer * numCaptureChannels);
gAudioIO->mInputMeter->UpdateDisplay(numCaptureChannels, mInputMeter->UpdateDisplay(numCaptureChannels,
framesPerBuffer, framesPerBuffer,
tempFloats); tempFloats);
} }
} }
gAudioIO->mUpdatingMeters = false; mUpdatingMeters = false;
} // end recording VU meter update } // end recording VU meter update
// Stop recording if 'silence' is detected // Stop recording if 'silence' is detected
@ -4882,31 +4889,31 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// //
// By using CallAfter(), we can schedule the call to the toolbar // By using CallAfter(), we can schedule the call to the toolbar
// to run in the main GUI thread after the next event loop iteration. // to run in the main GUI thread after the next event loop iteration.
if(gAudioIO->mPauseRec && inputBuffer && gAudioIO->mInputMeter) { if(mPauseRec && inputBuffer && mInputMeter) {
if(gAudioIO->mInputMeter->GetMaxPeak() < gAudioIO->mSilenceLevel ) { if(mInputMeter->GetMaxPeak() < mSilenceLevel ) {
if(!gAudioIO->IsPaused()) { if(!IsPaused()) {
AudacityProject *p = GetActiveProject(); AudacityProject *p = GetActiveProject();
ControlToolBar *bar = p->GetControlToolBar(); ControlToolBar *bar = p->GetControlToolBar();
bar->CallAfter(&ControlToolBar::Pause); bar->CallAfter(&ControlToolBar::Pause);
} }
} }
else { else {
if(gAudioIO->IsPaused()) { if(IsPaused()) {
AudacityProject *p = GetActiveProject(); AudacityProject *p = GetActiveProject();
ControlToolBar *bar = p->GetControlToolBar(); ControlToolBar *bar = p->GetControlToolBar();
bar->CallAfter(&ControlToolBar::Pause); bar->CallAfter(&ControlToolBar::Pause);
} }
} }
} }
if( gAudioIO->mPaused ) if( mPaused )
{ {
if (outputBuffer && numPlaybackChannels > 0) if (outputBuffer && numPlaybackChannels > 0)
{ {
ClearSamples((samplePtr)outputBuffer, floatSample, ClearSamples((samplePtr)outputBuffer, floatSample,
0, framesPerBuffer * numPlaybackChannels); 0, framesPerBuffer * numPlaybackChannels);
if (inputBuffer && gAudioIO->mSoftwarePlaythrough) { if (inputBuffer && mSoftwarePlaythrough) {
DoSoftwarePlaythrough(inputBuffer, gAudioIO->mCaptureFormat, DoSoftwarePlaythrough(inputBuffer, mCaptureFormat,
numCaptureChannels, numCaptureChannels,
(float *)outputBuffer, (int)framesPerBuffer); (float *)outputBuffer, (int)framesPerBuffer);
} }
@ -4915,7 +4922,7 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
return paContinue; return paContinue;
} }
if (gAudioIO->mStreamToken > 0) if (mStreamToken > 0)
{ {
// //
// Mix and copy to PortAudio's output buffer // Mix and copy to PortAudio's output buffer
@ -4930,8 +4937,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
for( i = 0; i < framesPerBuffer*numPlaybackChannels; i++) for( i = 0; i < framesPerBuffer*numPlaybackChannels; i++)
outputFloats[i] = 0.0; outputFloats[i] = 0.0;
if (inputBuffer && gAudioIO->mSoftwarePlaythrough) { if (inputBuffer && mSoftwarePlaythrough) {
DoSoftwarePlaythrough(inputBuffer, gAudioIO->mCaptureFormat, DoSoftwarePlaythrough(inputBuffer, mCaptureFormat,
numCaptureChannels, numCaptureChannels,
(float *)outputBuffer, (int)framesPerBuffer); (float *)outputBuffer, (int)framesPerBuffer);
} }
@ -4945,76 +4952,76 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
#ifdef EXPERIMENTAL_SCRUBBING_SUPPORT #ifdef EXPERIMENTAL_SCRUBBING_SUPPORT
// While scrubbing, ignore seek requests // While scrubbing, ignore seek requests
if (gAudioIO->mSeek && gAudioIO->mPlayMode == AudioIO::PLAY_SCRUB) if (mSeek && mPlayMode == AudioIO::PLAY_SCRUB)
gAudioIO->mSeek = 0.0; mSeek = 0.0;
else if (gAudioIO->mSeek && gAudioIO->mPlayMode == AudioIO::PLAY_AT_SPEED) else if (mSeek && mPlayMode == AudioIO::PLAY_AT_SPEED)
gAudioIO->mSeek = 0.0; mSeek = 0.0;
else else
#endif #endif
if (gAudioIO->mSeek) if (mSeek)
{ {
int token = gAudioIO->mStreamToken; int token = mStreamToken;
wxMutexLocker locker(gAudioIO->mSuspendAudioThread); wxMutexLocker locker(mSuspendAudioThread);
if (token != gAudioIO->mStreamToken) if (token != mStreamToken)
// This stream got destroyed while we waited for it // This stream got destroyed while we waited for it
return paAbort; return paAbort;
// Pause audio thread and wait for it to finish // Pause audio thread and wait for it to finish
gAudioIO->mAudioThreadFillBuffersLoopRunning = false; mAudioThreadFillBuffersLoopRunning = false;
while( gAudioIO->mAudioThreadFillBuffersLoopActive == true ) while( mAudioThreadFillBuffersLoopActive == true )
{ {
wxMilliSleep( 50 ); wxMilliSleep( 50 );
} }
// Calculate the NEW time position // Calculate the NEW time position
gAudioIO->mTime += gAudioIO->mSeek; mTime += mSeek;
gAudioIO->mTime = gAudioIO->LimitStreamTime(gAudioIO->mTime); mTime = LimitStreamTime(mTime);
gAudioIO->mSeek = 0.0; mSeek = 0.0;
// Reset mixer positions and flush buffers for all tracks // Reset mixer positions and flush buffers for all tracks
if(gAudioIO->mTimeTrack) if(mTimeTrack)
// Following gives negative when mT0 > mTime // Following gives negative when mT0 > mTime
gAudioIO->mWarpedTime = mWarpedTime =
gAudioIO->mTimeTrack->ComputeWarpedLength mTimeTrack->ComputeWarpedLength
(gAudioIO->mT0, gAudioIO->mTime); (mT0, mTime);
else else
gAudioIO->mWarpedTime = gAudioIO->mTime - gAudioIO->mT0; mWarpedTime = mTime - mT0;
gAudioIO->mWarpedTime = std::abs(gAudioIO->mWarpedTime); mWarpedTime = std::abs(mWarpedTime);
// Reset mixer positions and flush buffers for all tracks // Reset mixer positions and flush buffers for all tracks
for (i = 0; i < numPlaybackTracks; i++) for (i = 0; i < numPlaybackTracks; i++)
{ {
gAudioIO->mPlaybackMixers[i]->Reposition(gAudioIO->mTime); mPlaybackMixers[i]->Reposition(mTime);
const auto toDiscard = const auto toDiscard =
gAudioIO->mPlaybackBuffers[i]->AvailForGet(); mPlaybackBuffers[i]->AvailForGet();
const auto discarded = const auto discarded =
gAudioIO->mPlaybackBuffers[i]->Discard( toDiscard ); mPlaybackBuffers[i]->Discard( toDiscard );
// wxASSERT( discarded == toDiscard ); // wxASSERT( discarded == toDiscard );
// but we can't assert in this thread // but we can't assert in this thread
wxUnusedVar(discarded); wxUnusedVar(discarded);
} }
// Reload the ring buffers // Reload the ring buffers
gAudioIO->mAudioThreadShouldCallFillBuffersOnce = true; mAudioThreadShouldCallFillBuffersOnce = true;
while( gAudioIO->mAudioThreadShouldCallFillBuffersOnce == true ) while( mAudioThreadShouldCallFillBuffersOnce == true )
{ {
wxMilliSleep( 50 ); wxMilliSleep( 50 );
} }
// Reenable the audio thread // Reenable the audio thread
gAudioIO->mAudioThreadFillBuffersLoopRunning = true; mAudioThreadFillBuffersLoopRunning = true;
return paContinue; return paContinue;
} }
unsigned numSolo = 0; unsigned numSolo = 0;
for(unsigned t = 0; t < numPlaybackTracks; t++ ) for(unsigned t = 0; t < numPlaybackTracks; t++ )
if( gAudioIO->mPlaybackTracks[t]->GetSolo() ) if( mPlaybackTracks[t]->GetSolo() )
numSolo++; numSolo++;
#ifdef EXPERIMENTAL_MIDI_OUT #ifdef EXPERIMENTAL_MIDI_OUT
auto numMidiPlaybackTracks = gAudioIO->mMidiPlaybackTracks.size(); auto numMidiPlaybackTracks = mMidiPlaybackTracks.size();
for( unsigned t = 0; t < numMidiPlaybackTracks; t++ ) for( unsigned t = 0; t < numMidiPlaybackTracks; t++ )
if( gAudioIO->mMidiPlaybackTracks[t]->GetSolo() ) if( mMidiPlaybackTracks[t]->GetSolo() )
numSolo++; numSolo++;
#endif #endif
@ -5034,7 +5041,7 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
decltype(framesPerBuffer) maxLen = 0; decltype(framesPerBuffer) maxLen = 0;
for (unsigned t = 0; t < numPlaybackTracks; t++) for (unsigned t = 0; t < numPlaybackTracks; t++)
{ {
const WaveTrack *vt = gAudioIO->mPlaybackTracks[t].get(); const WaveTrack *vt = mPlaybackTracks[t].get();
chans[chanCnt] = vt; chans[chanCnt] = vt;
@ -5067,13 +5074,13 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// this is original code prior to r10680 -RBD // this is original code prior to r10680 -RBD
if (cut) if (cut)
{ {
len = gAudioIO->mPlaybackBuffers[t]->Discard(framesPerBuffer); len = mPlaybackBuffers[t]->Discard(framesPerBuffer);
// keep going here. // keep going here.
// we may still need to issue a paComplete. // we may still need to issue a paComplete.
} }
else else
{ {
len = gAudioIO->mPlaybackBuffers[t]->Get((samplePtr)tempBufs[chanCnt], len = mPlaybackBuffers[t]->Get((samplePtr)tempBufs[chanCnt],
floatSample, floatSample,
framesPerBuffer); framesPerBuffer);
if (len < framesPerBuffer) if (len < framesPerBuffer)
@ -5108,11 +5115,11 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
if (cut) if (cut)
{ {
len = len =
gAudioIO->mPlaybackBuffers[t]->Discard(framesPerBuffer); mPlaybackBuffers[t]->Discard(framesPerBuffer);
} else } else
{ {
len = len =
gAudioIO->mPlaybackBuffers[t]->Get((samplePtr)tempFloats, mPlaybackBuffers[t]->Get((samplePtr)tempFloats,
floatSample, floatSample,
framesPerBuffer); framesPerBuffer);
} }
@ -5134,17 +5141,17 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// the end, then we may have finished playing the entire // the end, then we may have finished playing the entire
// selection. // selection.
if (bDone) if (bDone)
bDone = bDone && (gAudioIO->ReversedTime() bDone = bDone && (ReversedTime()
? gAudioIO->mTime <= gAudioIO->mT1 ? mTime <= mT1
: gAudioIO->mTime >= gAudioIO->mT1); : mTime >= mT1);
// We never finish if we are playing looped or or scrubbing. // We never finish if we are playing looped or or scrubbing.
if (bDone) { if (bDone) {
// playing straight we must have no more audio. // playing straight we must have no more audio.
if (gAudioIO->mPlayMode == AudioIO::PLAY_STRAIGHT) if (mPlayMode == AudioIO::PLAY_STRAIGHT)
bDone = (len == 0); bDone = (len == 0);
// playing at speed, it is OK to have some audio left over. // playing at speed, it is OK to have some audio left over.
else if (gAudioIO->mPlayMode == AudioIO::PLAY_AT_SPEED) else if (mPlayMode == AudioIO::PLAY_AT_SPEED)
bDone = true; bDone = true;
else else
bDone = false; bDone = false;
@ -5155,7 +5162,7 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// PRL: singalling MIDI output complete is necessary if // PRL: singalling MIDI output complete is necessary if
// not USE_MIDI_THREAD, otherwise it's harmlessly redundant // not USE_MIDI_THREAD, otherwise it's harmlessly redundant
#ifdef EXPERIMENTAL_MIDI_OUT #ifdef EXPERIMENTAL_MIDI_OUT
gAudioIO->mMidiOutputComplete = true, mMidiOutputComplete = true,
#endif #endif
callbackReturn = paComplete; callbackReturn = paComplete;
} }
@ -5179,8 +5186,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
outputMeterFloats[numPlaybackChannels*i] += outputMeterFloats[numPlaybackChannels*i] +=
gain*tempFloats[i]; gain*tempFloats[i];
if (gAudioIO->mEmulateMixerOutputVol) if (mEmulateMixerOutputVol)
gain *= gAudioIO->mMixerOutputVol; gain *= mMixerOutputVol;
for(decltype(len) i = 0; i < len; i++) for(decltype(len) i = 0; i < len; i++)
outputFloats[numPlaybackChannels*i] += gain*tempBufs[c][i]; outputFloats[numPlaybackChannels*i] += gain*tempBufs[c][i];
@ -5197,8 +5204,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
outputMeterFloats[numPlaybackChannels*i+1] += outputMeterFloats[numPlaybackChannels*i+1] +=
gain*tempFloats[i]; gain*tempFloats[i];
if (gAudioIO->mEmulateMixerOutputVol) if (mEmulateMixerOutputVol)
gain *= gAudioIO->mMixerOutputVol; gain *= mMixerOutputVol;
for(decltype(len) i = 0; i < len; i++) for(decltype(len) i = 0; i < len; i++)
outputFloats[numPlaybackChannels*i+1] += gain*tempBufs[c][i]; outputFloats[numPlaybackChannels*i+1] += gain*tempBufs[c][i];
@ -5211,16 +5218,16 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// about the time indicator being passed the end won't happen; // about the time indicator being passed the end won't happen;
// do it here instead (but not if looping or scrubbing) // do it here instead (but not if looping or scrubbing)
if (numPlaybackTracks == 0 && if (numPlaybackTracks == 0 &&
gAudioIO->mPlayMode == AudioIO::PLAY_STRAIGHT) mPlayMode == AudioIO::PLAY_STRAIGHT)
{ {
if ((gAudioIO->ReversedTime() if ((ReversedTime()
? gAudioIO->mTime <= gAudioIO->mT1 ? mTime <= mT1
: gAudioIO->mTime >= gAudioIO->mT1)) { : mTime >= mT1)) {
// PRL: singalling MIDI output complete is necessary if // PRL: singalling MIDI output complete is necessary if
// not USE_MIDI_THREAD, otherwise it's harmlessly redundant // not USE_MIDI_THREAD, otherwise it's harmlessly redundant
#ifdef EXPERIMENTAL_MIDI_OUT #ifdef EXPERIMENTAL_MIDI_OUT
gAudioIO->mMidiOutputComplete = true, mMidiOutputComplete = true,
#endif #endif
callbackReturn = paComplete; callbackReturn = paComplete;
} }
@ -5230,15 +5237,15 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// Update the current time position, for scrubbing // Update the current time position, for scrubbing
// "Consume" only as much as the ring buffers produced, which may // "Consume" only as much as the ring buffers produced, which may
// be less than framesPerBuffer (during "stutter") // be less than framesPerBuffer (during "stutter")
if (gAudioIO->mPlayMode == AudioIO::PLAY_SCRUB) if (mPlayMode == AudioIO::PLAY_SCRUB)
gAudioIO->mTime = gAudioIO->mScrubQueue->Consumer(maxLen); mTime = mScrubQueue->Consumer(maxLen);
else if (gAudioIO->mPlayMode == AudioIO::PLAY_AT_SPEED) else if (mPlayMode == AudioIO::PLAY_AT_SPEED)
gAudioIO->mTime = gAudioIO->mScrubQueue->Consumer(maxLen); mTime = mScrubQueue->Consumer(maxLen);
#endif #endif
em.RealtimeProcessEnd(); em.RealtimeProcessEnd();
gAudioIO->mLastPlaybackTimeMillis = ::wxGetLocalTimeMillis(); mLastPlaybackTimeMillis = ::wxGetLocalTimeMillis();
// //
// Clip output to [-1.0,+1.0] range (msmeyer) // Clip output to [-1.0,+1.0] range (msmeyer)
@ -5274,7 +5281,7 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
{ {
// If there are no playback tracks, and we are recording, then the // If there are no playback tracks, and we are recording, then the
// earlier checks for being passed the end won't happen, so do it here. // earlier checks for being passed the end won't happen, so do it here.
if (gAudioIO->mTime >= gAudioIO->mT1) { if (mTime >= mT1) {
callbackReturn = paComplete; callbackReturn = paComplete;
} }
@ -5291,9 +5298,9 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
size_t len = framesPerBuffer; size_t len = framesPerBuffer;
for(unsigned t = 0; t < numCaptureChannels; t++) for(unsigned t = 0; t < numCaptureChannels; t++)
len = std::min( len, len = std::min( len,
gAudioIO->mCaptureBuffers[t]->AvailForPut()); mCaptureBuffers[t]->AvailForPut());
if (gAudioIO->mSimulateRecordingErrors && 100LL * rand() < RAND_MAX) if (mSimulateRecordingErrors && 100LL * rand() < RAND_MAX)
// Make spurious errors for purposes of testing the error // Make spurious errors for purposes of testing the error
// reporting // reporting
len = 0; len = 0;
@ -5302,21 +5309,21 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// the other thread, executing FillBuffers, isn't consuming fast // the other thread, executing FillBuffers, isn't consuming fast
// enough from mCaptureBuffers; maybe it's CPU-bound, or maybe the // enough from mCaptureBuffers; maybe it's CPU-bound, or maybe the
// storage device it writes is too slow // storage device it writes is too slow
if (gAudioIO->mDetectDropouts && if (mDetectDropouts &&
((gAudioIO->mDetectUpstreamDropouts && inputError) || ((mDetectUpstreamDropouts && inputError) ||
len < framesPerBuffer) ) { len < framesPerBuffer) ) {
// Assume that any good partial buffer should be written leftmost // Assume that any good partial buffer should be written leftmost
// and zeroes will be padded after; label the zeroes. // and zeroes will be padded after; label the zeroes.
auto start = gAudioIO->mTime + len / gAudioIO->mRate + auto start = mTime + len / mRate +
gAudioIO->mRecordingSchedule.mLatencyCorrection; mRecordingSchedule.mLatencyCorrection;
auto duration = (framesPerBuffer - len) / gAudioIO->mRate; auto duration = (framesPerBuffer - len) / mRate;
auto interval = std::make_pair( start, duration ); auto interval = std::make_pair( start, duration );
gAudioIO->mLostCaptureIntervals.push_back( interval ); mLostCaptureIntervals.push_back( interval );
} }
if (len < framesPerBuffer) if (len < framesPerBuffer)
{ {
gAudioIO->mLostSamples += (framesPerBuffer - len); mLostSamples += (framesPerBuffer - len);
wxPrintf(wxT("lost %d samples\n"), (int)(framesPerBuffer - len)); wxPrintf(wxT("lost %d samples\n"), (int)(framesPerBuffer - len));
} }
@ -5329,7 +5336,7 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// it'd be nice to be able to call CopySamples, but it can't // it'd be nice to be able to call CopySamples, but it can't
// handle multiplying by the gain and then clipping. Bummer. // handle multiplying by the gain and then clipping. Bummer.
switch(gAudioIO->mCaptureFormat) { switch(mCaptureFormat) {
case floatSample: { case floatSample: {
float *inputFloats = (float *)inputBuffer; float *inputFloats = (float *)inputBuffer;
for( i = 0; i < len; i++) for( i = 0; i < len; i++)
@ -5358,8 +5365,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
} // switch } // switch
const auto put = const auto put =
gAudioIO->mCaptureBuffers[t]->Put( mCaptureBuffers[t]->Put(
(samplePtr)tempBuffer, gAudioIO->mCaptureFormat, len); (samplePtr)tempBuffer, mCaptureFormat, len);
// wxASSERT(put == len); // wxASSERT(put == len);
// but we can't assert in this thread // but we can't assert in this thread
wxUnusedVar(put); wxUnusedVar(put);
@ -5370,32 +5377,32 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
// Update the current time position if not scrubbing // Update the current time position if not scrubbing
// (Already did it above, for scrubbing) // (Already did it above, for scrubbing)
#ifdef EXPERIMENTAL_SCRUBBING_SUPPORT #ifdef EXPERIMENTAL_SCRUBBING_SUPPORT
if( (gAudioIO->mPlayMode != AudioIO::PLAY_SCRUB) && if( (mPlayMode != AudioIO::PLAY_SCRUB) &&
(gAudioIO->mPlayMode != AudioIO::PLAY_AT_SPEED) ) (mPlayMode != AudioIO::PLAY_AT_SPEED) )
#endif #endif
{ {
double delta = framesPerBuffer / gAudioIO->mRate; double delta = framesPerBuffer / mRate;
if (gAudioIO->ReversedTime()) if (ReversedTime())
delta *= -1.0; delta *= -1.0;
if (gAudioIO->mTimeTrack) if (mTimeTrack)
// MB: this is why SolveWarpedLength is needed :) // MB: this is why SolveWarpedLength is needed :)
gAudioIO->mTime = mTime =
gAudioIO->mTimeTrack->SolveWarpedLength(gAudioIO->mTime, delta); mTimeTrack->SolveWarpedLength(mTime, delta);
else else
gAudioIO->mTime += delta; mTime += delta;
} }
// Wrap to start if looping // Wrap to start if looping
if (gAudioIO->mPlayMode == AudioIO::PLAY_LOOPED) if (mPlayMode == AudioIO::PLAY_LOOPED)
{ {
while (gAudioIO->ReversedTime() while (ReversedTime()
? gAudioIO->mTime <= gAudioIO->mT1 ? mTime <= mT1
: gAudioIO->mTime >= gAudioIO->mT1) : mTime >= mT1)
{ {
// LL: This is not exactly right, but I'm at my wits end trying to // LL: This is not exactly right, but I'm at my wits end trying to
// figure it out. Feel free to fix it. :-) // figure it out. Feel free to fix it. :-)
// MB: it's much easier than you think, mTime isn't warped at all! // MB: it's much easier than you think, mTime isn't warped at all!
gAudioIO->mTime -= gAudioIO->mT1 - gAudioIO->mT0; mTime -= mT1 - mT0;
} }
} }
@ -5418,11 +5425,11 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
if (numCaptureChannels > 0 && numPlaybackChannels > 0) // simultaneously playing and recording if (numCaptureChannels > 0 && numPlaybackChannels > 0) // simultaneously playing and recording
{ {
if (timeInfo->inputBufferAdcTime > 0) if (timeInfo->inputBufferAdcTime > 0)
gAudioIO->mLastRecordingOffset = timeInfo->inputBufferAdcTime - timeInfo->outputBufferDacTime; mLastRecordingOffset = timeInfo->inputBufferAdcTime - timeInfo->outputBufferDacTime;
else if (gAudioIO->mLastRecordingOffset == 0.0) else if (mLastRecordingOffset == 0.0)
{ {
const PaStreamInfo* si = Pa_GetStreamInfo( gAudioIO->mPortStreamV19 ); const PaStreamInfo* si = Pa_GetStreamInfo( mPortStreamV19 );
gAudioIO->mLastRecordingOffset = -si->inputLatency; mLastRecordingOffset = -si->inputLatency;
} }
} }
#endif #endif
@ -5436,8 +5443,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
for( i = 0; i < framesPerBuffer*numPlaybackChannels; i++) for( i = 0; i < framesPerBuffer*numPlaybackChannels; i++)
outputFloats[i] = 0.0; outputFloats[i] = 0.0;
if (inputBuffer && gAudioIO->mSoftwarePlaythrough) { if (inputBuffer && mSoftwarePlaythrough) {
DoSoftwarePlaythrough(inputBuffer, gAudioIO->mCaptureFormat, DoSoftwarePlaythrough(inputBuffer, mCaptureFormat,
numCaptureChannels, numCaptureChannels,
(float *)outputBuffer, (int)framesPerBuffer); (float *)outputBuffer, (int)framesPerBuffer);
} }
@ -5452,8 +5459,8 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
} }
/* Send data to playback VU meter if applicable */ /* Send data to playback VU meter if applicable */
if (gAudioIO->mOutputMeter && if (mOutputMeter &&
!gAudioIO->mOutputMeter->IsMeterDisabled() && !mOutputMeter->IsMeterDisabled() &&
outputMeterFloats) { outputMeterFloats) {
// Get here if playback meter is live // Get here if playback meter is live
/* It's critical that we don't update the meters while StopStream is /* It's critical that we don't update the meters while StopStream is
@ -5465,9 +5472,9 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
* is allowed to actually do the updating. * is allowed to actually do the updating.
* Note that mUpdatingMeters must be set first to avoid a race condition. * Note that mUpdatingMeters must be set first to avoid a race condition.
*/ */
gAudioIO->mUpdatingMeters = true; mUpdatingMeters = true;
if (gAudioIO->mUpdateMeters) { if (mUpdateMeters) {
gAudioIO->mOutputMeter->UpdateDisplay(numPlaybackChannels, mOutputMeter->UpdateDisplay(numPlaybackChannels,
framesPerBuffer, framesPerBuffer,
outputMeterFloats); outputMeterFloats);
@ -5481,10 +5488,10 @@ int audacityAudioCallback(const void *inputBuffer, void *outputBuffer,
//AudacityProject* pProj = GetActiveProject(); //AudacityProject* pProj = GetActiveProject();
//MixerBoard* pMixerBoard = pProj->GetMixerBoard(); //MixerBoard* pMixerBoard = pProj->GetMixerBoard();
//if (pMixerBoard) //if (pMixerBoard)
// pMixerBoard->UpdateMeters(gAudioIO->GetStreamTime(), // pMixerBoard->UpdateMeters(GetStreamTime(),
// (pProj->mLastPlayMode == loopedPlay)); // (pProj->mLastPlayMode == loopedPlay));
} }
gAudioIO->mUpdatingMeters = false; mUpdatingMeters = false;
} // end playback VU meter update } // end playback VU meter update
return callbackReturn; return callbackReturn;

View File

@ -169,12 +169,47 @@ struct TransportTracks {
// which seems not to implement the notes-off message correctly. // which seems not to implement the notes-off message correctly.
#define AUDIO_IO_GB_MIDI_WORKAROUND #define AUDIO_IO_GB_MIDI_WORKAROUND
/** brief The function which is called from PortAudio's callback thread
* context to collect and deliver audio for / from the sound device.
*
* This covers recording, playback, and doing both simultaneously. It is
* also invoked to do monitoring and software playthrough. Note that dealing
* with the two buffers needs some care to ensure that the right things
* happen for all possible cases.
* @param inputBuffer Buffer of length framesPerBuffer containing samples
* from the sound card, or null if not capturing audio. Note that the data
* type will depend on the format of audio data that was chosen when the
* stream was created (so could be floats or various integers)
* @param outputBuffer Uninitialised buffer of length framesPerBuffer which
* will be sent to the sound card after the callback, or null if not playing
* audio back.
* @param framesPerBuffer The length of the playback and recording buffers
* @param PaStreamCallbackTimeInfo Pointer to PortAudio time information
* structure, which tells us how long we have been playing / recording
* @param statusFlags PortAudio stream status flags
* @param userData pointer to user-defined data structure. Provided for
* flexibility by PortAudio, but not used by Audacity - the data is stored in
* the AudioIO class instead.
*/
int audacityAudioCallback(
const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo *timeInfo,
PaStreamCallbackFlags statusFlags, void *userData );
class AUDACITY_DLL_API AudioIO final { class AUDACITY_DLL_API AudioIO final {
public: public:
AudioIO(); AudioIO();
~AudioIO(); ~AudioIO();
// This function executes in a thread spawned by the PortAudio library
int AudioCallback(
const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo *timeInfo,
const PaStreamCallbackFlags statusFlags, void *userData);
AudioIOListener* GetListener() { return mListener; } AudioIOListener* GetListener() { return mListener; }
void SetListener(AudioIOListener* listener); void SetListener(AudioIOListener* listener);
@ -780,34 +815,6 @@ private:
static double mCachedBestRateIn; static double mCachedBestRateIn;
static double mCachedBestRateOut; static double mCachedBestRateOut;
/** brief The function which is called from PortAudio's callback thread
* context to collect and deliver audio for / from the sound device.
*
* This covers recording, playback, and doing both simultaneously. It is
* also invoked to do monitoring and software playthrough. Note that dealing
* with the two buffers needs some care to ensure that the right things
* happen for all possible cases.
* @param inputBuffer Buffer of length framesPerBuffer containing samples
* from the sound card, or null if not capturing audio. Note that the data
* type will depend on the format of audio data that was chosen when the
* stream was created (so could be floats or various integers)
* @param outputBuffer Uninitialised buffer of length framesPerBuffer which
* will be sent to the sound card after the callback, or null if not playing
* audio back.
* @param framesPerBuffer The length of the playback and recording buffers
* @param PaStreamCallbackTimeInfo Pointer to PortAudio time information
* structure, which tells us how long we have been playing / recording
* @param statusFlags PortAudio stream status flags
* @param userData pointer to user-defined data structure. Provided for
* flexibility by PortAudio, but not used by Audacity - the data is stored in
* the AudioIO class instead.
*/
friend int audacityAudioCallback(
const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo *timeInfo,
PaStreamCallbackFlags statusFlags, void *userData );
// Serialize main thread and PortAudio thread's attempts to pause and change // Serialize main thread and PortAudio thread's attempts to pause and change
// the state used by the third, Audio thread. // the state used by the third, Audio thread.
wxMutex mSuspendAudioThread; wxMutex mSuspendAudioThread;