Search code examples
iosobjective-cavfoundationcore-audio

Initialising AVAudio​Session​Mode​Measurement


Hey there I'm definitely out of my depth here, however unfortunately its too late to turn back as I was provided the project by a lecturer.

I'm trying to disable the system supplied signal processing applied to my input by using AVAudio​Session​Mode​Measurement within my project.

However I'm struggling to find any sources on doing this.

My desired outcome is that by enabling this I will be able to make more accurate readings within my application.

Here is the code:

#import "ViewController.h"

@import AudioToolbox;
@import AVFoundation;


#define kOutputBus 0
#define kInputBus 1

@interface ViewController () 

@property (nonatomic, weak) IBOutlet UILabel *dBSPLView2;

@end



@implementation ViewController

static AudioComponentInstance audioUnit;

- (void)viewDidLoad {
    [super viewDidLoad];
    // Do any additional setup after loading the view, typically from a nib.


    [self setupAudio];
}

- (void) setupAudio {


    AudioComponentDescription desc;


    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    AudioComponent comp = AudioComponentFindNext(NULL, &desc);

    OSStatus status = AudioComponentInstanceNew(comp, &audioUnit);
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }

    AudioStreamBasicDescription audioFormat;

    audioFormat.mSampleRate = 96000.00;
    audioFormat.mFormatID = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket = 1;
    audioFormat.mChannelsPerFrame = 1;
    audioFormat.mBitsPerChannel = 16;
    audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(SInt16);
    audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame;


    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }

    flag = 0;
    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag));
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }

    status = AudioUnitSetProperty(audioUnit , kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat));
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }

    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = (__bridge void*)self;

    status = AudioUnitSetProperty(audioUnit , kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct));
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }

    status = AudioUnitInitialize(audioUnit);
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }

}

static OSStatus recordingCallback(

                                  void *inRefCon,
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp,
                                  UInt32 inBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList *ioData

                            ) {

    AudioBuffer buffer;

    buffer.mNumberChannels = 1;
    buffer.mDataByteSize = inNumberFrames * sizeof(SInt16);
    buffer.mData = malloc(buffer.mDataByteSize);

    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;

    OSStatus status = AudioUnitRender(audioUnit , ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
    if (status != noErr) {
        printf("Error\n");
        return -1;
    }

    SInt16 *frameBuffer = buffer.mData;

    double totalAmplitude = 0;

    for (int i = 0; i < inNumberFrames; i++) {
       // printf("%i\n",frameBuffer[i]);

        totalAmplitude += frameBuffer[i] * frameBuffer[i];
    }

    totalAmplitude /= inNumberFrames;

    totalAmplitude = sqrt(totalAmplitude);

    //Creates a negative number that goes no higher than zero
    //float SPLFloat = totalAmplitude / (float)SHRT_MAX * 2;

    float dBFloat = (20 * log10(totalAmplitude)) + 11;



    dispatch_async(dispatch_get_main_queue(), ^{
        ViewController *viewController = (__bridge ViewController*)inRefCon;
        viewController.dBSPLView2.text = [NSString stringWithFormat:@"%.f", dBFloat];
    });

    return noErr;
}

- (IBAction)recordButtonPressed:(id)sender {
    NSError *error;
    [[AVAudioSession sharedInstance] setActive:YES error:&error];
    if (error != nil) {
        NSAssert(error == nil, @"Error");
    }
    [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryRecord error:&error];
    if (error != nil) {
        NSAssert(error == nil, @"Error");
    }

    [[AVAudioSession sharedInstance] requestRecordPermission:^(BOOL granted) {
        if (granted) {
            OSStatus status = AudioOutputUnitStart(audioUnit);
            if (status != noErr) {
                NSAssert(status == noErr,@"Error");
            }
        } else {
            NSAssert(NO, @"Error");
        }
    }];
}



- (IBAction)stopButtonPressed:(id)sender {
    OSStatus status = AudioOutputUnitStop(audioUnit);
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }
    NSError *error;
    [[AVAudioSession sharedInstance] setActive:NO error:&error];
    if (error != nil) {
        NSAssert(error == nil, @"Error");
    }

}

- (void)didReceiveMemoryWarning {
    [super didReceiveMemoryWarning];
    // Dispose of any resources that can be recreated.
}

- (void) dealloc {
    OSStatus status = AudioComponentInstanceDispose(audioUnit);
    if (status != noErr) {
        NSAssert(status == noErr,@"Error");
    }
}

@end

Solution

  • After you configure the category for the session:

    [[AVAudioSession sharedInstance] setMode:AVAudioSessionModeMeasurement error:&error];
    if (error != nil) {
        NSAssert(error == nil, @"Error");
    }
    

    Also, your error handling doesn't follow the established pattern. You should check the return value of setMode:error:. The error in/out parameter is only guaranteed to be valid when the return value from the method is NO. (In practice, checking that error is nil probably works fine in most cases, but it's not documented to work that way - so you shouldn't rely on it.)