【问题标题】:Play audio on iOS from a memory data stream在 iOS 上从内存数据流播放音频
【发布时间】:2018-06-22 22:44:07
【问题描述】:
我正在将一个音频库移植到 iOS,允许播放来自回调的音频流。用户提供了一个返回原始 PCM 数据的回调,我需要播放这些数据。此外,库必须能够同时播放多个流。
我想我需要使用 AVFoundation,但似乎 AVAudioPlayer 不支持流式音频缓冲区,我可以找到所有流式文档直接来自网络的使用数据。我应该在这里使用什么 API?
提前致谢!
顺便说一句,我没有通过 Swift 或 Objective-C 使用 Apple 库。但是我假设一切都暴露了,所以无论如何都会非常感谢 Swift 中的示例!
【问题讨论】:
标签:
ios
audio
avfoundation
【解决方案1】:
你需要初始化:
-
使用输入音频单元和输出的音频会话。
-(SInt32) audioSessionInitialization:(SInt32)preferred_sample_rate {
// - - - - - - Audio Session initialization
NSError *audioSessionError = nil;
session = [AVAudioSession sharedInstance];
// disable AVAudioSession
[session setActive:NO error:&audioSessionError];
// set category - (PlayAndRecord to use input and output session AudioUnits)
[session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error:&audioSessionError];
double preferredSampleRate = 441000;
[session setPreferredSampleRate:preferredSampleRate error:&audioSessionError];
// enable AVAudioSession
[session setActive:YES error:&audioSessionError];
// Configure notification for device output change (speakers/headphones)
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(routeChange:)
name:AVAudioSessionRouteChangeNotification
object:nil];
// - - - - - - Create audio engine
[self audioEngineInitialization];
return [session sampleRate];
}
-
音频引擎
-(void) audioEngineInitialization{
engine = [[AVAudioEngine alloc] init];
inputNode = [engine inputNode];
outputNode = [engine outputNode];
[engine connect:inputNode to:outputNode format:[inputNode inputFormatForBus:0]];
AudioStreamBasicDescription asbd_player;
asbd_player.mSampleRate = session.sampleRate;
asbd_player.mFormatID = kAudioFormatLinearPCM;
asbd_player.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd_player.mFramesPerPacket = 1;
asbd_player.mChannelsPerFrame = 2;
asbd_player.mBitsPerChannel = 16;
asbd_player.mBytesPerPacket = 4;
asbd_player.mBytesPerFrame = 4;
OSStatus status;
status = AudioUnitSetProperty(inputNode.audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&asbd_player,
sizeof(asbd_player));
// Add the render callback for the ioUnit: for playing
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = engineInputCallback; ///CALLBACK///
callbackStruct.inputProcRefCon = (__bridge void *)(self);
status = AudioUnitSetProperty(inputNode.audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,//Global
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
[engine prepare];
}
-
音频引擎回调
static OSStatus engineInputCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// the reference to the audio controller where you get the stream data
MyAudioController *ac = (__bridge MyAudioController *)(inRefCon);
// in practice we will only ever have 1 buffer, since audio format is mono
for (int i = 0; i < ioData->mNumberBuffers; i++) {
AudioBuffer buffer = ioData->mBuffers[i];
// copy stream buffer data to output buffer
UInt32 size = min(buffer.mDataByteSize, ac.playbackBuffer.mDataByteSize);
memcpy(buffer.mData, ac.streamBuffer.mData, size);
buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer
}
return noErr;
}