2016-05-01 26 views
8

In Objective-C, registrare e riprodurre audio contemporaneamente è abbastanza semplice. E ci sono tonnellate di codice di esempio su Internet. Ma voglio registrare e riprodurre l'audio simultaneamente usando Audio Unit/Core Audio in Swift. Ci sono diverse piccole quantità di aiuto e codice di esempio su questo utilizzando Swift. E non ho trovato alcun aiuto che potesse mostrare come ottenere ciò.Come registrare e riprodurre l'audio simultaneamente in iOS utilizzando Swift?

Sto lottando con il codice muggito.

let preferredIOBufferDuration = 0.005 
let kInputBus = AudioUnitElement(1) 
let kOutputBus = AudioUnitElement(0) 

init() { 
    // This is my Audio Unit settings code. 
    var status: OSStatus 

    do { 
     try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(preferredIOBufferDuration) 
    } catch let error as NSError { 
     print(error) 
    } 


    var desc: AudioComponentDescription = AudioComponentDescription() 
    desc.componentType = kAudioUnitType_Output 
    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO 
    desc.componentFlags = 0 
    desc.componentFlagsMask = 0 
    desc.componentManufacturer = kAudioUnitManufacturer_Apple 

    let inputComponent: AudioComponent = AudioComponentFindNext(nil, &desc) 

    status = AudioComponentInstanceNew(inputComponent, &audioUnit) 
    checkStatus(status) 

    var flag = UInt32(1) 
    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, UInt32(sizeof(UInt32))) 
    checkStatus(status) 

    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, UInt32(sizeof(UInt32))) 
    checkStatus(status) 

    var audioFormat: AudioStreamBasicDescription! = AudioStreamBasicDescription() 
    audioFormat.mSampleRate = 8000 
    audioFormat.mFormatID = kAudioFormatLinearPCM 
    audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked 
    audioFormat.mFramesPerPacket = 1 
    audioFormat.mChannelsPerFrame = 1 
    audioFormat.mBitsPerChannel = 16 
    audioFormat.mBytesPerPacket = 2 
    audioFormat.mBytesPerFrame = 2 

    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, UInt32(sizeof(UInt32))) 
    checkStatus(status) 


    try! AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord) 
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, UInt32(sizeof(UInt32))) 
    checkStatus(status) 


    // Set input/recording callback 
    var inputCallbackStruct: AURenderCallbackStruct! = AURenderCallbackStruct(inputProc: recordingCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self))) 
    inputCallbackStruct.inputProc = recordingCallback 
    inputCallbackStruct.inputProcRefCon = UnsafeMutablePointer(unsafeAddressOf(self)) 
    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &inputCallbackStruct, UInt32(sizeof(UInt32))) 
    checkStatus(status) 


    // Set output/renderar/playback callback 
    var renderCallbackStruct: AURenderCallbackStruct! = AURenderCallbackStruct(inputProc: playbackCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self))) 
    renderCallbackStruct.inputProc = playbackCallback 
    renderCallbackStruct.inputProcRefCon = UnsafeMutablePointer(unsafeAddressOf(self)) 
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallbackStruct, UInt32(sizeof(UInt32))) 
    checkStatus(status) 


    flag = 0 
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Output, kInputBus, &flag, UInt32(sizeof(UInt32))) 
} 


func recordingCallback(inRefCon: UnsafeMutablePointer<Void>, 
         ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>, 
         inTimeStamp: UnsafePointer<AudioTimeStamp>, 
         inBufNumber: UInt32, 
         inNumberFrames: UInt32, 
         ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus { 

    print("recordingCallback got fired >>>") 


    return noErr 
} 



func playbackCallback(inRefCon: UnsafeMutablePointer<Void>, 
         ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>, 
         inTimeStamp: UnsafePointer<AudioTimeStamp>, 
         inBufNumber: UInt32, 
         inNumberFrames: UInt32, 
         ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus { 

    print("playbackCallback got fired <<<") 


    return noErr 
} 

Con quel codice, solo recordingCallback metodo è sempre chiamato. E il metodo playbackCallback non viene sparato affatto. Sono certo che sto sbagliando qualcosa qui. Qualcuno può aiutarmi per favore con questo. Sto sbattendo la testa per questo problema.

+0

dov'è l'audioUnit var? –

risposta

4

Si sta impostando il metodo e RenderCallback in modo errato. Altre impostazioni sembrano OK. Quindi il tuo metodo init dovrebbe essere così.

init() { 

    var status: OSStatus 

    do { 
     try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(preferredIOBufferDuration) 
    } catch let error as NSError { 
     print(error) 
    } 


    var desc: AudioComponentDescription = AudioComponentDescription() 
    desc.componentType = kAudioUnitType_Output 
    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO 
    desc.componentFlags = 0 
    desc.componentFlagsMask = 0 
    desc.componentManufacturer = kAudioUnitManufacturer_Apple 

    let inputComponent: AudioComponent = AudioComponentFindNext(nil, &desc) 

    status = AudioComponentInstanceNew(inputComponent, &audioUnit) 
    checkStatus(status) 

    var flag = UInt32(1) 
    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, UInt32(sizeof(UInt32))) 
    checkStatus(status) 

    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, UInt32(sizeof(UInt32))) 
    checkStatus(status) 

    var audioFormat: AudioStreamBasicDescription! = AudioStreamBasicDescription() 
    audioFormat.mSampleRate = 8000 
    audioFormat.mFormatID = kAudioFormatLinearPCM 
    audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked 
    audioFormat.mFramesPerPacket = 1 
    audioFormat.mChannelsPerFrame = 1 
    audioFormat.mBitsPerChannel = 16 
    audioFormat.mBytesPerPacket = 2 
    audioFormat.mBytesPerFrame = 2 

    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, UInt32(sizeof(UInt32))) 
    checkStatus(status) 


    try! AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord) 
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, UInt32(sizeof(UInt32))) 
    checkStatus(status) 


    // Set input/recording callback 
    var inputCallbackStruct = AURenderCallbackStruct(inputProc: recordingCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self))) 
    AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioOutputUnitProperty_SetInputCallback), AudioUnitScope(kAudioUnitScope_Global), 1, &inputCallbackStruct, UInt32(sizeof(AURenderCallbackStruct))) 


    // Set output/renderar/playback callback 
    var renderCallbackStruct = AURenderCallbackStruct(inputProc: playbackCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self))) 
    AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioUnitProperty_SetRenderCallback), AudioUnitScope(kAudioUnitScope_Global), 0, &renderCallbackStruct, UInt32(sizeof(AURenderCallbackStruct))) 


    flag = 0 
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Output, kInputBus, &flag, UInt32(sizeof(UInt32))) 
} 

prova con questo codice e di farci sapere se questo aiuta.

+1

come usare questo codice per registrare e riprodurre come un microfono mentre si parla riproduce negli altoparlanti –

+0

Dove si può specificare da quale dispositivo si desidera che la registrazione provenga? Cioè un iPhone contro un auricolare? – Surz