【问题标题】:iOS - Play streaming(mp3) audio with effectsiOS - 播放带有效果的流式 (mp3) 音频
【发布时间】:2011-07-26 08:26:54
【问题描述】:

我是 iOS 音频技术的新手。

我正在开发一个可以播放流媒体音频(mp3)的应用程序,计划添加一些效果,例如 iPod Equalizer 、 Pan Control。

实现这一目标的最佳方法是什么。

_我尝试使用 Matt Gallagher 的 AudioStreamer API (http://cocoawithlove.com/2008/09/streaming-and-playing-live-mp3-stream.html)。我能够播放流媒体音频。但我不确定如何使用 AudioQueue _ 添加效果。

从 Apple 文档中,我了解到 AudioUnit 可用于添加效果。但流格式应该是线性 PCM。

基本上我想添加效果并播放流音频。

我现在对前进的方向感到困惑。

有人能指出前进的方向吗?非常感谢任何帮助。

谢谢

萨西库玛

【问题讨论】:

    标签: iphone core-audio audio-streaming


    【解决方案1】:

    我认为你应该明确地使用 AudioUnits。

    看看它是多么简单:

    1) 创建音频单元

    // OUTPUT unit
    AudioComponentDescription iOUnitDescription;
    iOUnitDescription.componentType = kAudioUnitType_Output;
    iOUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
    iOUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    iOUnitDescription.componentFlags = 0;
    iOUnitDescription.componentFlagsMask = 0;
    
    // MIXER unit
    AudioComponentDescription MixerUnitDescription;
    MixerUnitDescription.componentType          = kAudioUnitType_Mixer;
    MixerUnitDescription.componentSubType       = kAudioUnitSubType_MultiChannelMixer;
    MixerUnitDescription.componentManufacturer  = kAudioUnitManufacturer_Apple;
    MixerUnitDescription.componentFlags         = 0;
    MixerUnitDescription.componentFlagsMask     = 0;
    
    // PLAYER unit
    AudioComponentDescription playerUnitDescription;
    playerUnitDescription.componentType = kAudioUnitType_Generator;
    playerUnitDescription.componentSubType = kAudioUnitSubType_AudioFilePlayer;
    playerUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    
    // EQ unit
    AudioComponentDescription EQUnitDescription;
    EQUnitDescription.componentType          = kAudioUnitType_Effect;
    EQUnitDescription.componentSubType       = kAudioUnitSubType_AUiPodEQ;
    EQUnitDescription.componentManufacturer  = kAudioUnitManufacturer_Apple;
    EQUnitDescription.componentFlags         = 0;
    EQUnitDescription.componentFlagsMask     = 0;
    

    等等

    2) 创建节点

    ////
    //// EQ NODE
    ////
    err = AUGraphAddNode(processingGraph, &EQUnitDescription, &eqNode);
    if (err) { NSLog(@"eqNode err = %ld", err); }
    
    ////
    //// FX NODE
    ////
    err = AUGraphAddNode(processingGraph, &FXUnitDescription, &fxNode);
    if (err) { NSLog(@"fxNode err = %ld", err); }
    
    ////
    //// VFX NODE
    ////
    err = AUGraphAddNode(processingGraph, &VFXUnitDescription, &vfxNode);
    if (err) { NSLog(@"vfxNode err = %ld", err); }
    
    ///
    /// MIXER NODE
    ///
    err = AUGraphAddNode (processingGraph, &MixerUnitDescription, &mixerNode );
    if (err) { NSLog(@"mixerNode err = %ld", err); }
    
    ///
    /// OUTPUT NODE
    ///
    err = AUGraphAddNode(processingGraph, &iOUnitDescription, &ioNode);
    if (err) { NSLog(@"outputNode err = %ld", err); }
    
    ////
    /// PLAYER NODE
    ///
    err = AUGraphAddNode(processingGraph, &playerUnitDescription, &audioPlayerNode);
    if (err) { NSLog(@"audioPlayerNode err = %ld", err); }
    

    3) 连接它们

    //// mic /lineIn ----> vfx bus 0
    err =   AUGraphConnectNodeInput(processingGraph, ioNode, 1, vfxNode, 0);
    if (err) { NSLog(@"vfxNode err = %ld", err); }
    
    //// vfx ----> mixer
    err =   AUGraphConnectNodeInput(processingGraph, vfxNode, 0, mixerNode, micBus );
    if (err) { NSLog(@"vfxNode err = %ld", err); }
    
    //// player ----> fx
    err = AUGraphConnectNodeInput(processingGraph, audioPlayerNode, 0, fxNode, 0);
    if (err) { NSLog(@"audioPlayerNode err = %ld", err); }
    
    //// fx ----> mixer
    err = AUGraphConnectNodeInput(processingGraph, fxNode, 0, mixerNode, filePlayerBus);
    if (err) { NSLog(@"audioPlayerNode err = %ld", err); }
    
    ///// mixer ----> eq
    err = AUGraphConnectNodeInput(processingGraph, mixerNode, 0, eqNode, 0);
    if (err) { NSLog(@"mixerNode err = %ld", err); }
    
    //// eq ----> output
    err = AUGraphConnectNodeInput(processingGraph, eqNode, 0, ioNode, 0);
    if (err) { NSLog(@"eqNode err = %ld", err); }
    

    4) 设置渲染回调

        // let's say a mic input callback
        AURenderCallbackStruct lineInrCallbackStruct = {};
        lineInrCallbackStruct.inputProc = &micLineInCallback;
        lineInrCallbackStruct.inputProcRefCon = (void*)self;
        err = AudioUnitSetProperty(
                               vfxUnit,
                               kAudioUnitProperty_SetRenderCallback,
                               kAudioUnitScope_Global,
                               0,
                               &lineInrCallbackStruct,
                               sizeof(lineInrCallbackStruct));
    

    5) 在回调中处理音频缓冲区

    static OSStatus micLineInCallback (void                 *inRefCon,
                                       AudioUnitRenderActionFlags   *ioActionFlags,
                                       const AudioTimeStamp         *inTimeStamp,
                                       UInt32                       inBusNumber,
                                       UInt32                       inNumberFrames,
                                       AudioBufferList              *ioData)
    {
        MixerHostAudio *THIS = (MixerHostAudio *)inRefCon;
        AudioUnit rioUnit = THIS.ioUnit;    // io unit which has the input data from mic/lineIn
        OSStatus renderErr;
        OSStatus err;
        UInt32 bus1 = 1;                    // input bus
        int i;
    
        renderErr = AudioUnitRender(
                                       rioUnit,
                                       ioActionFlags,
                                       inTimeStamp,
                                       bus1,
                                       inNumberFrames,
                                       ioData);
    
         //// do something with iOData like getting left and right channels
    
    AudioUnitSampleType *inSamplesLeft;         // convenience pointers to sample data
        AudioUnitSampleType *inSamplesRight;
    
        int isStereo;               // c boolean - for deciding how many channels to process.
        int numberOfChannels;       // 1 = mono, 2= stereo
    
        // Sint16 buffers to hold sample data after conversion
    
        SInt16 *sampleBufferLeft = THIS.conversionBufferLeft;
        SInt16 *sampleBufferRight = THIS.conversionBufferRight;
        SInt16 *sampleBuffer;
    
        // start the actual processing
    
        numberOfChannels = THIS.displayNumberOfInputChannels;
        isStereo = numberOfChannels > 1 ? 1 : 0;  // decide stereo or mono
    
    
        // copy all the input samples to the callback buffer - after this point we could bail and have a pass through
    
        renderErr = AudioUnitRender(rioUnit, ioActionFlags,
                                    inTimeStamp, bus1, inNumberFrames, ioData);
        if (renderErr < 0) {
            return renderErr;
        }
    
        inSamplesLeft = (AudioUnitSampleType *) ioData->mBuffers[0].mData; // left channel
        fixedPointToSInt16(inSamplesLeft, sampleBufferLeft, inNumberFrames);
    
        if(isStereo) {
            inSamplesRight = (AudioUnitSampleType *) ioData->mBuffers[1].mData; // right channel
            fixedPointToSInt16(inSamplesRight, sampleBufferRight, inNumberFrames);
        }
    

    我通过探索 Apple 的优秀文档(如

    )了解到这一点

    Apple MixerHost音频单元申请

    The Audio Unit Programming Guide 来自苹果

    AudioGraph 是您在现实世界的 AudioUnit 编程中可以拥有的最全面的示例代码/“非官方”文档。

    希望对您有所帮助,祝您好运!

    【讨论】:

      【解决方案2】:

      看看 PureData 音频过程 - libpd 是它的 ios 版本

      【讨论】:

        猜你喜欢
        • 1970-01-01
        • 2013-06-05
        • 2018-01-29
        • 1970-01-01
        • 1970-01-01
        • 1970-01-01
        • 1970-01-01
        • 1970-01-01
        • 2011-04-30
        相关资源
        最近更新 更多