I'm trying to get audio to work with the video for an iOS application. The video is fine. No audio is recorded to the file (My iPhone speaker works.)
Here's the init setup:
session = [[AVCaptureSession alloc] init];
menu->session = session;
menu_open = NO;
session.sessionPreset = AVCaptureSessionPresetMedium;
camera = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
microphone = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
menu->camera = camera;
[session beginConfiguration];
[camera lockForConfiguration:nil];
if([camera isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]){
camera.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
}
if([camera isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]){
camera.focusMode = AVCaptureFocusModeContinuousAutoFocus;
}
if([camera isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]){
camera.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
}
if ([camera hasTorch]) {
if([camera isTorchModeSupported:AVCaptureTorchModeOn]){
[camera setTorchMode:AVCaptureTorchModeOn];
}
}
[camera unlockForConfiguration];
[session commitConfiguration];
AVCaptureDeviceInput * camera_input = [AVCaptureDeviceInput deviceInputWithDevice:camera error:nil];
[session addInput:camera_input];
microphone_input = [[AVCaptureDeviceInput deviceInputWithDevice:microphone error:nil] retain];
AVCaptureVideoDataOutput * output = [[[AVCaptureVideoDataOutput alloc] init] autorelease];
output.videoSettings = [NSDictionary dictionaryWithObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
[session addOutput:output];
output.minFrameDuration = CMTimeMake(1,30);
dispatch_queue_t queue = dispatch_queue_create("MY QUEUE", NULL);
[output setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
audio_output = [[[AVCaptureAudioDataOutput alloc] init] retain];
queue = dispatch_queue_create("MY QUEUE", NULL);
AudioOutputBufferDelegate * special_delegate = [[[AudioOutputBufferDelegate alloc] init] autorelease];
special_delegate->normal_delegate = self;
[special_delegate retain];
[audio_output setSampleBufferDelegate:special_delegate queue:queue];
dispatch_release(queue);
[session startRunning];
Here is the beginning and end of recording:
if (recording) { //Hence stop recording
[video_button setTitle:@"Video" forState: UIControlStateNormal];
recording = NO;
[writer_input markAsFinished];
[audio_writer_input markAsFinished];
[video_writer endSessionAtSourceTime: CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: start_time],30)];
[video_writer finishWriting];
UISaveVideoAtPathToSavedPhotosAlbum(temp_url,self,@selector(video:didFinishSavingWithError:contextInfo:),nil);
[start_time release];
[temp_url release];
[av_adaptor release];
[microphone lockForConfiguration:nil];
[session beginConfiguration];
[session removeInput:microphone_input];
[session removeOutput:audio_output];
[session commitConfiguration];
[microphone unlockForConfiguration];
[menu restateConfigiration];
[vid_off play];
}else{ //Start recording
[vid_on play];
[microphone lockForConfiguration:nil];
[session beginConfiguration];
[session addInput:microphone_input];
[session addOutput:audio_output];
[session commitConfiguration];
[microphone unlockForConfiguration];
[menu restateConfigiration];
[video_button setTitle:@"Stop" forState: UIControlStateNormal];
recording = YES;
NSError *error = nil;
NSFileManager * file_manager = [[NSFileManager alloc] init];
temp_url = [[NSString alloc] initWithFormat:@"%@/%@", NSTemporaryDirectory(), @"temp.mp4"];
[file_manager removeItemAtPath: temp_url error:NULL];
[file_manager release];
video_writer = [[AVAssetWriter alloc] initWithURL: [NSURL fileURLWithPath:temp_url] fileType: AVFileTypeMPEG4 error: &error];
NSDictionary *video_settings = [NSDictionary dictionaryWithObjectsAndKeys: AVVideoCodecH264, AVVideoCodecKey,[NSNumber numberWithInt:360], AVVideoWidthKey,[NSNumber numberWithInt:480], AVVideoHeightKey,nil];
writer_input = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:video_settings] retain];
AudioChannelLayout acl;
bzero( &acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
audio_writer_input = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings: [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey,[NSNumber numberWithInt: 1], AVNumberOfChannelsKey,[NSNumber numberWithFloat: 44100.0], AVSampleRateKey,[NSNumber numberWithInt: 64000], AVEncoderBitRateKey,[NSData dataWithBytes: &acl length: sizeof(acl) ], AVChannelLayoutKey,nil]] retain];
audio_writer_input.expectsMediaDataInRealTime = YES;
av_adaptor = [[AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput: writer_input sourcePixelBufferAttributes:NULL] retain];
[video_writer addInput:writer_input];
[video_writer addInput: audio_writer_input];
[video_writer startWriting];
[video_writer startSessionAtSourceTime: CMTimeMake(0,1)];
start_time = [[NSDate alloc] init];
}
Here is the delegate for the audio:
@implementation AudioOutputBufferDelegate
-(void)captureOutput: (AVCaptureOutput *) captureOutput didOutputSampleBuffer: (CMSampleBufferRef) sampleBuffer fromConnection: (AVCaptureConnection *) conenction{
if (normal_delegate->recording) {
CMSampleBufferSetOutputPresentationTimeStamp(sampleBuffer,CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: normal_delegate->start_time],30));
[normal_delegate->audio_writer_input appendSampleBuffer: sampleBuffer];
}
}
@end
The video method doesn't matter because it works. "restateConfigiration" just sorts out the session configuration otherwise the torch goes off etc:
[session beginConfiguration];
switch (quality) {
case Low:
session.sessionPreset = AVCaptureSessionPresetLow;
break;
case Medium:
session.sessionPreset = AVCaptureSessionPreset640x480;
break;
}
[session commitConfiguration];
[camera lockForConfiguration:nil];
if([camera isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]){
camera.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
}
if([camera isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]){
camera.focusMode = AVCaptureFocusModeContinuousAutoFocus;
}
if([camera isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]){
camera.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
}
if ([camera hasTorch]) {
if (torch) {
if([camera isTorchModeSupported:AVCaptureTorchModeOn]){
[camera setTorchMode:AVCaptureTorchModeOn];
}
}else{
if([camera isTorchModeSupported:AVCaptureTorchModeOff]){
[camera setTorchMode:AVCaptureTorchModeOff];
}
}
}
[camera unlockForConfiguration];
THank you for any help.
This may be the same issue as mentioned in the linked post. Try commenting out these lines
[writer_input markAsFinished];
[audio_writer_input markAsFinished];
[video_writer endSessionAtSourceTime: CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: start_time],30)];
Edit
I don't know if the way you are setting the presentation time stamp is necessarily wrong. The way I handle this is with a local variable that is set to 0 on start. Then when my delegate receives the first packet I do:
if (_startTime.value == 0) {
_startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
}
and then
[bufferWriter->writer startWriting];
[bufferWriter->writer startSessionAtSourceTime:_startTime];
Your code looks valid as you are calculating the time difference for each received packet. However, AVFoundation calculates this for you, and also optimizes the timestamps for placement in the interleaved container. Another thing I am unsure of is each CMSampleBufferRef for audio contains more then 1 data buffer where each data buffer has it's own PTS. I am not sure if setting the PTS automatically adjusts all the other data buffers.
Where my code differs from yours is I use a single dispatch queue for both audio and video. In the callback I use (some code removed).
switch (bufferWriter->writer.status) {
case AVAssetWriterStatusUnknown:
if (_startTime.value == 0) {
_startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
}
[bufferWriter->writer startWriting];
[bufferWriter->writer startSessionAtSourceTime:_startTime];
//Break if not ready, otherwise fall through.
if (bufferWriter->writer.status != AVAssetWriterStatusWriting) {
break ;
}
case AVAssetWriterStatusWriting:
if( captureOutput == self.captureManager.audioOutput) {
if( !bufferWriter->audioIn.readyForMoreMediaData) {
break;
}
@try {
if( ![bufferWriter->audioIn appendSampleBuffer:sampleBuffer] ) {
[self delegateMessage:@"Audio Writing Error" withType:ERROR];
}
}
@catch (NSException *e) {
NSLog(@"Audio Exception: %@", [e reason]);
}
}
else if( captureOutput == self.captureManager.videoOutput ) {
if( !bufferWriter->videoIn.readyForMoreMediaData) {
break;;
}
@try {
if (!frontCamera) {
if( ![bufferWriter->videoIn appendSampleBuffer:sampleBuffer] ) {
[self delegateMessage:@"Video Writing Error" withType:ERROR];
}
}
else {
CMTime pt = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
flipBuffer(sampleBuffer, pixelBuffer);
if( ![bufferWriter->adaptor appendPixelBuffer:pixelBuffer withPresentationTime:pt] ) {
[self delegateMessage:@"Video Writing Error" withType:ERROR];
}
}
}
@catch (NSException *e) {
NSLog(@"Video Exception Exception: %@", [e reason]);
}
}
break;
case AVAssetWriterStatusCompleted:
return;
case AVAssetWriterStatusFailed:
[self delegateMessage:@"Critical Error Writing Queues" withType:ERROR];
bufferWriter->writer_failed = YES ;
_broadcastError = YES;
[self stopCapture] ;
return;
case AVAssetWriterStatusCancelled:
break;
default:
break;
}