2013-03-21 5 views
8

non vedo in questo esempio http://teragonaudio.com/article/How-to-do-realtime-recording-with-effect-processing-on-iOS.htmlin tempo reale l'elaborazione audio senza uscita

e voglio spegnere il mio uscita. Provo a cambiare: kAudioSessionCategory_PlayAndRecord a kAudioSessionCategory_RecordAudio ma questo non funziona. Cerco anche di sbarazzarmi di:

if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_StreamFormat, 
          kAudioUnitScope_Output, 1, &streamDescription, sizeof(streamDescription)) != noErr) { 
     return 1; 
    } 

Perché voglio ottenere il suono dal microfono ma non riprodurlo. Ma non importa cosa faccio quando il mio suono arriva al metodo renderCallback c'è un errore -50. Quando l'audio viene riprodotto automaticamente sull'uscita tutto funziona bene ...

di aggiornamento con codice:

using namespace std; 

AudioUnit *audioUnit = NULL; 

float *convertedSampleBuffer = NULL; 

int initAudioSession() { 
    audioUnit = (AudioUnit*)malloc(sizeof(AudioUnit)); 

    if(AudioSessionInitialize(NULL, NULL, NULL, NULL) != noErr) { 
     return 1; 
    } 

    if(AudioSessionSetActive(true) != noErr) { 
     return 1; 
    } 

    UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord; 
    if(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, 
           sizeof(UInt32), &sessionCategory) != noErr) { 
     return 1; 
    } 

    Float32 bufferSizeInSec = 0.02f; 
    if(AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, 
           sizeof(Float32), &bufferSizeInSec) != noErr) { 
     return 1; 
    } 

    UInt32 overrideCategory = 1; 
    if(AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, 
           sizeof(UInt32), &overrideCategory) != noErr) { 
     return 1; 
    } 

    // There are many properties you might want to provide callback functions for: 
    // kAudioSessionProperty_AudioRouteChange 
    // kAudioSessionProperty_OverrideCategoryEnableBluetoothInput 
    // etc. 

    return 0; 
} 

OSStatus renderCallback(void *userData, AudioUnitRenderActionFlags *actionFlags, 
         const AudioTimeStamp *audioTimeStamp, UInt32 busNumber, 
         UInt32 numFrames, AudioBufferList *buffers) { 
    OSStatus status = AudioUnitRender(*audioUnit, actionFlags, audioTimeStamp, 
             1, numFrames, buffers); 

    int doOutput = 0; 

    if(status != noErr) { 
     return status; 
    } 

    if(convertedSampleBuffer == NULL) { 
     // Lazy initialization of this buffer is necessary because we don't 
     // know the frame count until the first callback 
     convertedSampleBuffer = (float*)malloc(sizeof(float) * numFrames); 
     baseTime = (float)QRealTimer::getUptimeInMilliseconds(); 
    } 

    SInt16 *inputFrames = (SInt16*)(buffers->mBuffers->mData); 

    // If your DSP code can use integers, then don't bother converting to 
    // floats here, as it just wastes CPU. However, most DSP algorithms rely 
    // on floating point, and this is especially true if you are porting a 
    // VST/AU to iOS. 

    int i; 

    for(i = numFrames; i < fftlength; i++)  // Shifting buffer 
     x_inbuf[i - numFrames] = x_inbuf[i]; 

    for( i = 0; i < numFrames; i++) { 
     x_inbuf[i + x_phase] = (float)inputFrames[i]/(float)32768; 
    } 

    if(x_phase + numFrames == fftlength) 
    { 
     x_alignment.SigProc_frontend(x_inbuf); // Signal processing front-end (FFT!) 
     doOutput = x_alignment.Align(); 


     /// Output as text! In the real-time version, 
     //  this is where we update visualisation callbacks and launch other services 
     if ((doOutput) & (x_netscore.isEvent(x_alignment.Position())) 
      &(x_alignment.lastAction()<x_alignment.Position())) 
     { 
      // here i want to do something with my input! 
     } 
    } 
    else 
     x_phase += numFrames; 


    return noErr; 
} 


int initAudioStreams(AudioUnit *audioUnit) { 
    UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord; 
    if(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, 
           sizeof(UInt32), &audioCategory) != noErr) { 
     return 1; 
    } 

    UInt32 overrideCategory = 1; 
    if(AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, 
           sizeof(UInt32), &overrideCategory) != noErr) { 
     // Less serious error, but you may want to handle it and bail here 
    } 

    AudioComponentDescription componentDescription; 
    componentDescription.componentType = kAudioUnitType_Output; 
    componentDescription.componentSubType = kAudioUnitSubType_RemoteIO; 
    componentDescription.componentManufacturer = kAudioUnitManufacturer_Apple; 
    componentDescription.componentFlags = 0; 
    componentDescription.componentFlagsMask = 0; 
    AudioComponent component = AudioComponentFindNext(NULL, &componentDescription); 
    if(AudioComponentInstanceNew(component, audioUnit) != noErr) { 
     return 1; 
    } 

    UInt32 enable = 1; 
    if(AudioUnitSetProperty(*audioUnit, kAudioOutputUnitProperty_EnableIO, 
          kAudioUnitScope_Input, 1, &enable, sizeof(UInt32)) != noErr) { 
     return 1; 
    } 

    AURenderCallbackStruct callbackStruct; 
    callbackStruct.inputProc = renderCallback; // Render function 
    callbackStruct.inputProcRefCon = NULL; 
    if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_SetRenderCallback, 
          kAudioUnitScope_Input, 0, &callbackStruct, 
          sizeof(AURenderCallbackStruct)) != noErr) { 
     return 1; 
    } 

    AudioStreamBasicDescription streamDescription; 
    // You might want to replace this with a different value, but keep in mind that the 
    // iPhone does not support all sample rates. 8kHz, 22kHz, and 44.1kHz should all work. 
    streamDescription.mSampleRate = 44100; 
    // Yes, I know you probably want floating point samples, but the iPhone isn't going 
    // to give you floating point data. You'll need to make the conversion by hand from 
    // linear PCM <-> float. 
    streamDescription.mFormatID = kAudioFormatLinearPCM; 
    // This part is important! 
    streamDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | 
    kAudioFormatFlagsNativeEndian | 
    kAudioFormatFlagIsPacked; 
    streamDescription.mBitsPerChannel = 16; 
    // 1 sample per frame, will always be 2 as long as 16-bit samples are being used 
    streamDescription.mBytesPerFrame = 2; 
    streamDescription.mChannelsPerFrame = 1; 
    streamDescription.mBytesPerPacket = streamDescription.mBytesPerFrame * 
    streamDescription.mChannelsPerFrame; 
    // Always should be set to 1 
    streamDescription.mFramesPerPacket = 1; 
    // Always set to 0, just to be sure 
    streamDescription.mReserved = 0; 

    // Set up input stream with above properties 
    if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_StreamFormat, 
          kAudioUnitScope_Input, 0, &streamDescription, sizeof(streamDescription)) != noErr) { 
     return 1; 
    } 

    // Ditto for the output stream, which we will be sending the processed audio to 
    if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_StreamFormat, 
          kAudioUnitScope_Output, 1, &streamDescription, sizeof(streamDescription)) != noErr) { 
     return 1; 
    } 

    return 0; 
} 


int startAudioUnit(AudioUnit *audioUnit) { 
    if(AudioUnitInitialize(*audioUnit) != noErr) { 
     return 1; 
    } 

    if(AudioOutputUnitStart(*audioUnit) != noErr) { 
     return 1; 
    } 

    return 0; 
} 

e chiamando dal mio VC:

initAudioSession(); 
    initAudioStreams(audioUnit); 
    startAudioUnit(audioUnit); 

risposta

11

se desideri solo registrazione e di riproduzione, commenta semplicemente la riga che imposta renderCallback:

AURenderCallbackStruct callbackStruct; 
callbackStruct.inputProc = renderCallback; // Render function 
callbackStruct.inputProcRefCon = NULL; 
if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_SetRenderCallback, 
    kAudioUnitScope_Input, 0, &callbackStruct, 
    sizeof(AURenderCallbackStruct)) != noErr) { 
    return 1; 
} 

Aggiornamento dopo aver visto il codice:

Come sospetto, manca la richiamata di input. Aggiungere queste righe:

// at top: 
#define kInputBus 1 

AURenderCallbackStruct callbackStruct; 
/**/ 
callbackStruct.inputProc = &ALAudioUnit::recordingCallback; 
callbackStruct.inputProcRefCon = this; 
status = AudioUnitSetProperty(audioUnit, 
           kAudioOutputUnitProperty_SetInputCallback, 
           kAudioUnitScope_Global, 
           kInputBus, 
           &callbackStruct, 
           sizeof(callbackStruct)); 

Ora nel tuo recordingCallback:

OSStatus ALAudioUnit::recordingCallback(void *inRefCon, 
             AudioUnitRenderActionFlags *ioActionFlags, 
             const AudioTimeStamp *inTimeStamp, 
             UInt32 inBusNumber, 
             UInt32 inNumberFrames, 
             AudioBufferList *ioData) 
{ 
    // TODO: Use inRefCon to access our interface object to do stuff 
    // Then, use inNumberFrames to figure out how much data is available, and make 
    // that much space available in buffers in an AudioBufferList. 

    // Then: 
    // Obtain recorded samples 

    OSStatus status; 

    ALAudioUnit *pThis = reinterpret_cast<ALAudioUnit*>(inRefCon); 
    if (!pThis) 
     return noErr; 

    //assert (pThis->m_nMaxSliceFrames >= inNumberFrames); 

    pThis->recorderBufferList->GetBufferList().mBuffers[0].mDataByteSize = inNumberFrames * pThis->m_recorderSBD.mBytesPerFrame; 

    status = AudioUnitRender(pThis->audioUnit, 
          ioActionFlags, 
          inTimeStamp, 
          inBusNumber, 
          inNumberFrames, 
          &pThis->recorderBufferList->GetBufferList()); 
    THROW_EXCEPTION_IF_ERROR(status, "error rendering audio unit"); 

    // If we're not playing, I don't care about the data, simply discard it 
    if (!pThis->playbackState || pThis->isSeeking) return noErr; 

    // Now, we have the samples we just read sitting in buffers in bufferList 
    pThis->DoStuffWithTheRecordedAudio(inNumberFrames, pThis->recorderBufferList, inTimeStamp); 

    return noErr; 
} 

Btw, Sto allocando il mio tampone invece di utilizzare quello fornito da AudioUnit. Potresti voler cambiare quelle parti se vuoi usare il buffer allocato AudioUnit.

Aggiornamento:

come allocare proprio buffer:

recorderBufferList = new AUBufferList(); 
recorderBufferList->Allocate(m_recorderSBD, m_nMaxSliceFrames); 
recorderBufferList->PrepareBuffer(m_recorderSBD, m_nMaxSliceFrames); 

Inoltre, se si sta facendo questo, dire AudioUnit di non allocare i buffer:

// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own) 
flag = 0; 
status = AudioUnitSetProperty(audioUnit, 
           kAudioUnitProperty_ShouldAllocateBuffer, 
           kAudioUnitScope_Input, 
           kInputBus, 
           &flag, 
           sizeof(flag)); 

Avrete bisogno per includere CoreAudio utility classes

+0

No, ho bisogno di input di rendering. Non voglio essere riprodotto subito, ma voglio il metodo di rendering. Il problema qui è ottenere il suono dal microfono e modificarlo in tempo reale. Questo è renderCallback do. – Kuba

+0

Non capisco. Pensavo volessi solo l'input del microfono, nessuna uscita. Vuoi input/output e vuoi modificare l'input in tempo reale e inviarlo all'output? – Mar0ux

+0

Desidero modificare l'input in tempo reale, ma non inviarlo all'output.Quindi non ho bisogno di output, ma ho bisogno di un metodo di rendering. – Kuba

0

sto facendo un'applicazione simile a lavorare con lo stesso codice e ho scoperto che si può terminare la riproduzione modificando l'enumerazione kAudioSessionCategory_PlayAndRecord a RecordAudio

int initAudioStreams(AudioUnit *audioUnit) { 
UInt32 audioCategory = kAudioSessionCategory_RecordAudio; 
if(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, 
          sizeof(UInt32), &audioCategory) != noErr) { 
    return 1; 
} 

Questa fermato il feedback tra microfono e altoparlante sul mio hardware.