Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Merge pull request #49 from alexbw/ARC-Modernized

ARC-compatibility and modernization of Obj-c interfaces
  • Loading branch information...
commit e9fa26e89f88ec5a6147139d173f326db8cb4547 2 parents 9f21e3c + b1d252c
@alexbw authored
View
13 Novocaine Mac Example/AppDelegate.h
@@ -28,13 +28,12 @@
#import "AudioFileWriter.h"
@interface AppDelegate : NSObject <NSApplicationDelegate>
-{
- RingBuffer *ringBuffer;
- Novocaine *audioManager;
- AudioFileReader *fileReader;
- AudioFileWriter *fileWriter;
-}
-@property (assign) IBOutlet NSWindow *window;
+@property (nonatomic, strong) Novocaine *audioManager;
+@property (nonatomic, strong) AudioFileReader *fileReader;
+@property (nonatomic, strong) AudioFileWriter *fileWriter;
+@property (nonatomic, assign) RingBuffer * ringBuffer;
+
+@property (nonatomic, weak) IBOutlet NSWindow *window;
@end
View
68 Novocaine Mac Example/AppDelegate.mm
@@ -26,42 +26,43 @@
@implementation AppDelegate
-@synthesize window = _window;
-
- (void)dealloc
{
- [super dealloc];
+ if (_ringBuffer){
+ delete _ringBuffer;
+ }
}
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification
{
-
- audioManager = [Novocaine audioManager];
-// ringBuffer = new RingBuffer(32768, 2);
+ self.audioManager = [Novocaine audioManager];
+
+ self.ringBuffer = new RingBuffer(32768, 2);
+ __weak AppDelegate * wself = self;
// A simple delay that's hard to express without ring buffers
// ========================================
-//
-// [audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels) {
-// ringBuffer->AddNewInterleavedFloatData(data, numFrames, numChannels);
+
+// [self.audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels) {
+// wself.ringBuffer->AddNewInterleavedFloatData(data, numFrames, numChannels);
// }];
//
// int echoDelay = 11025;
// float *holdingBuffer = (float *)calloc(16384, sizeof(float));
-// [audioManager setOutputBlock:^(float *outData, UInt32 numFrames, UInt32 numChannels) {
+// [self.audioManager setOutputBlock:^(float *outData, UInt32 numFrames, UInt32 numChannels) {
//
// // Grab the play-through audio
-// ringBuffer->FetchInterleavedData(outData, numFrames, numChannels);
+// wself.ringBuffer->FetchInterleavedData(outData, numFrames, numChannels);
// float volume = 0.8;
// vDSP_vsmul(outData, 1, &volume, outData, 1, numFrames*numChannels);
//
//
// // Seek back, and grab some delayed audio
-// ringBuffer->SeekReadHeadPosition(-echoDelay-numFrames);
-// ringBuffer->FetchInterleavedData(holdingBuffer, numFrames, numChannels);
-// ringBuffer->SeekReadHeadPosition(echoDelay);
+// wself.ringBuffer->SeekReadHeadPosition(-echoDelay-numFrames);
+// wself.ringBuffer->FetchInterleavedData(holdingBuffer, numFrames, numChannels);
+// wself.ringBuffer->SeekReadHeadPosition(echoDelay);
//
// volume = 0.5;
// vDSP_vsmul(holdingBuffer, 1, &volume, holdingBuffer, 1, numFrames*numChannels);
@@ -73,27 +74,27 @@ - (void)applicationDidFinishLaunching:(NSNotification *)aNotification
// ========================================
NSURL *inputFileURL = [[NSBundle mainBundle] URLForResource:@"TLC" withExtension:@"mp3"];
- fileReader = [[AudioFileReader alloc]
- initWithAudioFileURL:inputFileURL
- samplingRate:audioManager.samplingRate
- numChannels:audioManager.numOutputChannels];
+ self.fileReader = [[AudioFileReader alloc]
+ initWithAudioFileURL:inputFileURL
+ samplingRate:self.audioManager.samplingRate
+ numChannels:self.audioManager.numOutputChannels];
- fileReader.currentTime = 5;
- [fileReader play];
+ self.fileReader.currentTime = 5;
+ [self.fileReader play];
__block int counter = 0;
- [audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+
+
+ [self.audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
{
- [fileReader retrieveFreshAudio:data numFrames:numFrames numChannels:numChannels];
+ [wself.fileReader retrieveFreshAudio:data numFrames:numFrames numChannels:numChannels];
counter++;
if (counter % 80 == 0)
- NSLog(@"Time: %f", fileReader.currentTime);
+ NSLog(@"Time: %f", wself.fileReader.currentTime);
}];
-
-
// AUDIO FILE WRITING YEAH!
// ========================================
@@ -103,19 +104,18 @@ - (void)applicationDidFinishLaunching:(NSNotification *)aNotification
// nil];
// NSURL *outputFileURL = [NSURL fileURLWithPathComponents:pathComponents];
//
-// fileWriter = [[AudioFileWriter alloc]
-// initWithAudioFileURL:outputFileURL
-// samplingRate:audioManager.samplingRate
-// numChannels:audioManager.numInputChannels];
+// self.fileWriter = [[AudioFileWriter alloc]
+// initWithAudioFileURL:outputFileURL
+// samplingRate:self.audioManager.samplingRate
+// numChannels:self.audioManager.numInputChannels];
//
//
// __block int counter = 0;
-// audioManager.inputBlock = ^(float *data, UInt32 numFrames, UInt32 numChannels) {
-// [fileWriter writeNewAudio:data numFrames:numFrames numChannels:numChannels];
+// self.audioManager.inputBlock = ^(float *data, UInt32 numFrames, UInt32 numChannels) {
+// [wself.fileWriter writeNewAudio:data numFrames:numFrames numChannels:numChannels];
// counter += 1;
-// if (counter > 10 * audioManager.samplingRate / numChannels) { // 10 seconds of recording
-// audioManager.inputBlock = nil;
-// [fileWriter release];
+// if (counter > 10 * wself.audioManager.samplingRate / numChannels) { // 10 seconds of recording
+// wself.audioManager.inputBlock = nil;
// }
// };
View
16 Novocaine iOS Example/AppDelegate.mm
@@ -28,24 +28,14 @@
@implementation AppDelegate
-@synthesize window = _window;
-@synthesize viewController = _viewController;
-
-- (void)dealloc
-{
- [_window release];
- [_viewController release];
- [super dealloc];
-}
-
- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions
{
- self.window = [[[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]] autorelease];
+ self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]];
// Override point for customization after application launch.
if ([[UIDevice currentDevice] userInterfaceIdiom] == UIUserInterfaceIdiomPhone) {
- self.viewController = [[[ViewController alloc] initWithNibName:@"ViewController_iPhone" bundle:nil] autorelease];
+ self.viewController = [[ViewController alloc] initWithNibName:@"ViewController_iPhone" bundle:nil];
} else {
- self.viewController = [[[ViewController alloc] initWithNibName:@"ViewController_iPad" bundle:nil] autorelease];
+ self.viewController = [[ViewController alloc] initWithNibName:@"ViewController_iPad" bundle:nil];
}
self.window.rootViewController = self.viewController;
[self.window makeKeyAndVisible];
View
11 Novocaine iOS Example/ViewController.h
@@ -29,10 +29,9 @@
#import "AudioFileWriter.h"
@interface ViewController : UIViewController
-{
- RingBuffer *ringBuffer;
- Novocaine *audioManager;
- AudioFileReader *fileReader;
- AudioFileWriter *fileWriter;
-}
+
+@property (nonatomic, strong) Novocaine *audioManager;
+@property (nonatomic, strong) AudioFileReader *fileReader;
+@property (nonatomic, strong) AudioFileWriter *fileWriter;
+
@end
View
81 Novocaine iOS Example/ViewController.mm
@@ -26,10 +26,17 @@
@interface ViewController ()
+@property (nonatomic, assign) RingBuffer *ringBuffer;
+
@end
@implementation ViewController
+- (void)dealloc
+{
+ delete self.ringBuffer;
+}
+
- (void)viewDidLoad
{
[super viewDidLoad];
@@ -45,27 +52,29 @@ - (void)viewDidUnload
- (void)viewWillAppear:(BOOL)animated
{
[super viewWillAppear:animated];
+
+ __weak ViewController * wself = self;
- ringBuffer = new RingBuffer(32768, 2);
- audioManager = [Novocaine audioManager];
+ self.ringBuffer = new RingBuffer(32768, 2);
+ self.audioManager = [Novocaine audioManager];
// Basic playthru example
-// [audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels) {
+// [self.audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels) {
// float volume = 0.5;
// vDSP_vsmul(data, 1, &volume, data, 1, numFrames*numChannels);
-// ringBuffer->AddNewInterleavedFloatData(data, numFrames, numChannels);
+// wself.ringBuffer->AddNewInterleavedFloatData(data, numFrames, numChannels);
// }];
//
//
-// [audioManager setOutputBlock:^(float *outData, UInt32 numFrames, UInt32 numChannels) {
-// ringBuffer->FetchInterleavedData(outData, numFrames, numChannels);
+// [self.audioManager setOutputBlock:^(float *outData, UInt32 numFrames, UInt32 numChannels) {
+// wself.ringBuffer->FetchInterleavedData(outData, numFrames, numChannels);
// }];
// MAKE SOME NOOOOO OIIIISSSEEE
// ==================================================
-// [audioManager setOutputBlock:^(float *newdata, UInt32 numFrames, UInt32 thisNumChannels)
+// [self.audioManager setOutputBlock:^(float *newdata, UInt32 numFrames, UInt32 thisNumChannels)
// {
// for (int i = 0; i < numFrames * thisNumChannels; i++) {
// newdata[i] = (rand() % 100) / 100.0f / 2;
@@ -76,7 +85,7 @@ - (void)viewWillAppear:(BOOL)animated
// MEASURE SOME DECIBELS!
// ==================================================
// __block float dbVal = 0.0;
-// [audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels) {
+// [self.audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels) {
//
// vDSP_vsq(data, 1, data, 1, numFrames*numChannels);
// float meanVal = 0.0;
@@ -92,10 +101,10 @@ - (void)viewWillAppear:(BOOL)animated
// SIGNAL GENERATOR!
// __block float frequency = 40.0;
// __block float phase = 0.0;
-// [audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+// [self.audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
// {
//
-// float samplingRate = audioManager.samplingRate;
+// float samplingRate = wself.audioManager.samplingRate;
// for (int i=0; i < numFrames; ++i)
// {
// for (int iChannel = 0; iChannel < numChannels; ++iChannel)
@@ -112,18 +121,18 @@ - (void)viewWillAppear:(BOOL)animated
// DALEK VOICE!
// (aka Ring Modulator)
-// [audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+// [self.audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
// {
-// ringBuffer->AddNewInterleavedFloatData(data, numFrames, numChannels);
+// wself.ringBuffer->AddNewInterleavedFloatData(data, numFrames, numChannels);
// }];
//
// __block float frequency = 100.0;
// __block float phase = 0.0;
-// [audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+// [self.audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
// {
-// ringBuffer->FetchInterleavedData(data, numFrames, numChannels);
+// wself.ringBuffer->FetchInterleavedData(data, numFrames, numChannels);
//
-// float samplingRate = audioManager.samplingRate;
+// float samplingRate = wself.audioManager.samplingRate;
// for (int i=0; i < numFrames; ++i)
// {
// for (int iChannel = 0; iChannel < numChannels; ++iChannel)
@@ -135,23 +144,23 @@ - (void)viewWillAppear:(BOOL)animated
// if (phase > 1.0) phase = -1;
// }
// }];
-
+//
// VOICE-MODULATED OSCILLATOR
// __block float magnitude = 0.0;
-// [audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+// [self.audioManager setInputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
// {
// vDSP_rmsqv(data, 1, &magnitude, numFrames*numChannels);
// }];
//
// __block float frequency = 100.0;
// __block float phase = 0.0;
-// [audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+// [self.audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
// {
//
// printf("Magnitude: %f\n", magnitude);
-// float samplingRate = audioManager.samplingRate;
+// float samplingRate = wself.audioManager.samplingRate;
// for (int i=0; i < numFrames; ++i)
// {
// for (int iChannel = 0; iChannel < numChannels; ++iChannel)
@@ -169,18 +178,19 @@ - (void)viewWillAppear:(BOOL)animated
// ========================================
NSURL *inputFileURL = [[NSBundle mainBundle] URLForResource:@"TLC" withExtension:@"mp3"];
- fileReader = [[AudioFileReader alloc]
- initWithAudioFileURL:inputFileURL
- samplingRate:audioManager.samplingRate
- numChannels:audioManager.numOutputChannels];
+ self.fileReader = [[AudioFileReader alloc]
+ initWithAudioFileURL:inputFileURL
+ samplingRate:self.audioManager.samplingRate
+ numChannels:self.audioManager.numOutputChannels];
+
+ [self.fileReader play];
+ self.fileReader.currentTime = 30.0;
- [fileReader play];
- fileReader.currentTime = 30.0;
- [audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
+ [self.audioManager setOutputBlock:^(float *data, UInt32 numFrames, UInt32 numChannels)
{
- [fileReader retrieveFreshAudio:data numFrames:numFrames numChannels:numChannels];
- NSLog(@"Time: %f", fileReader.currentTime);
+ [wself.fileReader retrieveFreshAudio:data numFrames:numFrames numChannels:numChannels];
+ NSLog(@"Time: %f", wself.fileReader.currentTime);
}];
@@ -193,19 +203,18 @@ - (void)viewWillAppear:(BOOL)animated
// NSURL *outputFileURL = [NSURL fileURLWithPathComponents:pathComponents];
// NSLog(@"URL: %@", outputFileURL);
//
-// fileWriter = [[AudioFileWriter alloc]
-// initWithAudioFileURL:outputFileURL
-// samplingRate:audioManager.samplingRate
-// numChannels:audioManager.numInputChannels];
+// self.fileWriter = [[AudioFileWriter alloc]
+// initWithAudioFileURL:outputFileURL
+// samplingRate:self.audioManager.samplingRate
+// numChannels:self.audioManager.numInputChannels];
//
//
// __block int counter = 0;
-// audioManager.inputBlock = ^(float *data, UInt32 numFrames, UInt32 numChannels) {
-// [fileWriter writeNewAudio:data numFrames:numFrames numChannels:numChannels];
+// self.audioManager.inputBlock = ^(float *data, UInt32 numFrames, UInt32 numChannels) {
+// [wself.fileWriter writeNewAudio:data numFrames:numFrames numChannels:numChannels];
// counter += 1;
// if (counter > 400) { // roughly 5 seconds of audio
-// audioManager.inputBlock = nil;
-// [fileWriter release];
+// wself.audioManager.inputBlock = nil;
// }
// };
View
8 Novocaine.xcodeproj/project.pbxproj
@@ -7,6 +7,7 @@
objects = {
/* Begin PBXBuildFile section */
+ 9AE2143E1768D0C60068A4DC /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 9AE2143D1768D0C60068A4DC /* AVFoundation.framework */; };
FA0B92E9154ACB100079834E /* AudioFileReader.mm in Sources */ = {isa = PBXBuildFile; fileRef = FA0B92E8154ACB100079834E /* AudioFileReader.mm */; };
FA0B92EA154ACB100079834E /* AudioFileReader.mm in Sources */ = {isa = PBXBuildFile; fileRef = FA0B92E8154ACB100079834E /* AudioFileReader.mm */; };
FA2015DD154B02CC00F8D3AC /* TLC.mp3 in Resources */ = {isa = PBXBuildFile; fileRef = FA2015DC154B02CC00F8D3AC /* TLC.mp3 */; };
@@ -42,6 +43,7 @@
/* End PBXBuildFile section */
/* Begin PBXFileReference section */
+ 9AE2143D1768D0C60068A4DC /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; };
FA0B92E7154ACB100079834E /* AudioFileReader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioFileReader.h; sourceTree = "<group>"; };
FA0B92E8154ACB100079834E /* AudioFileReader.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AudioFileReader.mm; sourceTree = "<group>"; };
FA2015DC154B02CC00F8D3AC /* TLC.mp3 */ = {isa = PBXFileReference; lastKnownFileType = audio.mp3; path = TLC.mp3; sourceTree = "<group>"; };
@@ -105,6 +107,7 @@
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
+ 9AE2143E1768D0C60068A4DC /* AVFoundation.framework in Frameworks */,
FA5526EF152A6466003D9601 /* Accelerate.framework in Frameworks */,
FA5526F0152A6466003D9601 /* AudioToolbox.framework in Frameworks */,
FA5526F2152A6466003D9601 /* CoreAudio.framework in Frameworks */,
@@ -146,6 +149,7 @@
FA55269E152A6405003D9601 /* AudioToolbox.framework */,
FA5526ED152A6466003D9601 /* AudioUnit.framework */,
FA55269F152A6405003D9601 /* AudioUnit.framework */,
+ 9AE2143D1768D0C60068A4DC /* AVFoundation.framework */,
FA5526EE152A6466003D9601 /* CoreAudio.framework */,
FA5526A0152A6405003D9601 /* CoreAudio.framework */,
FA55263B152A63A4003D9601 /* CoreGraphics.framework */,
@@ -451,6 +455,7 @@
isa = XCBuildConfiguration;
buildSettings = {
ARCHS = "$(ARCHS_STANDARD_64_BIT)";
+ CLANG_ENABLE_OBJC_ARC = YES;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
"\"$(SYSTEM_APPS_DIR)/Xcode.app/Contents/Developer/Library/Frameworks\"",
@@ -472,6 +477,7 @@
isa = XCBuildConfiguration;
buildSettings = {
ARCHS = "$(ARCHS_STANDARD_64_BIT)";
+ CLANG_ENABLE_OBJC_ARC = YES;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
@@ -492,6 +498,7 @@
FA5526E9152A6452003D9601 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CLANG_ENABLE_OBJC_ARC = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
GCC_PRECOMPILE_PREFIX_HEADER = YES;
GCC_PREFIX_HEADER = "Novocaine iOS Example/Novocaine iOS Example-Prefix.pch";
@@ -504,6 +511,7 @@
FA5526EA152A6452003D9601 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CLANG_ENABLE_OBJC_ARC = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
GCC_PRECOMPILE_PREFIX_HEADER = YES;
GCC_PREFIX_HEADER = "Novocaine iOS Example/Novocaine iOS Example-Prefix.pch";
View
36 Novocaine/AudioFileReader.h
@@ -27,35 +27,29 @@
@interface AudioFileReader : NSObject
-{
- float currentTime;
- float duration;
- float samplingRate;
- float latency;
- UInt32 numChannels;
- NSURL *audioFileURL;
-
- InputBlock readerBlock;
-
- BOOL playing;
-}
-@property (getter=getCurrentTime, setter=setCurrentTime:) float currentTime;
-@property (readonly, getter=getDuration) float duration;
-@property float samplingRate;
-@property UInt32 numChannels;
-@property float latency;
-@property (nonatomic, copy) NSURL *audioFileURL;
-@property (nonatomic, copy) InputBlock readerBlock;
-@property BOOL playing;
+// ----- Read-write ------
+
+@property (nonatomic, assign, getter=getCurrentTime, setter=setCurrentTime:) float currentTime;
+@property (nonatomic, copy) NovocaineInputBlock readerBlock;
+@property (nonatomic, assign) float latency;
+
+// ----- Read-only ------
+
+@property (nonatomic, copy, readonly) NSURL *audioFileURL;
+@property (nonatomic, assign, readonly, getter=getDuration) float duration;
+@property (nonatomic, assign, readonly) float samplingRate;
+@property (nonatomic, assign, readonly) UInt32 numChannels;
+@property (nonatomic, assign, readonly) BOOL playing;
- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels;
// You use this method to grab audio if you have your own callback.
// The buffer'll fill at the speed the audio is normally being played.
+
- (void)retrieveFreshAudio:(float *)buffer numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels;
-//- (float)getCurrentTime;
+
- (void)play;
- (void)pause;
- (void)stop;
View
51 Novocaine/AudioFileReader.mm
@@ -32,42 +32,30 @@ @interface AudioFileReader ()
RingBuffer *ringBuffer;
}
-@property AudioStreamBasicDescription outputFormat;
-@property ExtAudioFileRef inputFile;
-@property UInt32 outputBufferSize;
-@property float *outputBuffer;
-@property float *holdingBuffer;
-@property UInt32 numSamplesReadPerPacket;
-@property UInt32 desiredPrebufferedSamples;
-@property SInt64 currentFileTime;
-@property dispatch_source_t callbackTimer;
-
+// redeclaration as readwrite in class continuation
+@property (nonatomic, copy, readwrite) NSURL *audioFileURL;
+@property (nonatomic, assign, readwrite, getter=getDuration) float duration;
+@property (nonatomic, assign, readwrite) float samplingRate;
+@property (nonatomic, assign, readwrite) UInt32 numChannels;
+@property (nonatomic, assign, readwrite) BOOL playing;
+
+@property (nonatomic, assign) AudioStreamBasicDescription outputFormat;
+@property (nonatomic, assign) ExtAudioFileRef inputFile;
+@property (nonatomic, assign) UInt32 outputBufferSize;
+@property (nonatomic, assign) float *outputBuffer;
+@property (nonatomic, assign) float *holdingBuffer;
+@property (nonatomic, assign) UInt32 numSamplesReadPerPacket;
+@property (nonatomic, assign) UInt32 desiredPrebufferedSamples;
+@property (nonatomic, assign) SInt64 currentFileTime;
+@property (nonatomic, assign) dispatch_source_t callbackTimer;
- (void)bufferNewAudio;
@end
-
@implementation AudioFileReader
-@synthesize outputFormat = _outputFormat;
-@synthesize inputFile = _inputFile;
-@synthesize outputBuffer = _outputBuffer;
-@synthesize holdingBuffer = _holdingBuffer;
-@synthesize outputBufferSize = _outputBufferSize;
-@synthesize numSamplesReadPerPacket = _numSamplesReadPerPacket;
-@synthesize desiredPrebufferedSamples = _desiredPrebufferedSamples;
-@synthesize currentFileTime = _currentFileTime;
-@synthesize callbackTimer = _callbackTimer;
-@synthesize currentTime = _currentTime;
-@synthesize duration = _duration;
-@synthesize samplingRate = _samplingRate;
-@synthesize latency = _latency;
-@synthesize numChannels = _numChannels;
-@synthesize audioFileURL = _audioFileURL;
-@synthesize readerBlock = _readerBlock;
-@synthesize playing = _playing;
- (void)dealloc
{
@@ -85,7 +73,6 @@ - (void)dealloc
delete ringBuffer;
- [super dealloc];
}
@@ -98,10 +85,9 @@ - (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSampl
// Zero-out our timer, so we know we're not using our callback yet
self.callbackTimer = nil;
-
// Open a reference to the audio file
self.audioFileURL = urlToAudioFile;
- CFURLRef audioFileRef = (CFURLRef)self.audioFileURL;
+ CFURLRef audioFileRef = (__bridge CFURLRef)self.audioFileURL;
CheckError(ExtAudioFileOpenURL(audioFileRef, &_inputFile), "Opening file URL (ExtAudioFileOpenURL)");
@@ -228,7 +214,6 @@ - (float)getDuration
- (void)configureReaderCallback
{
-
if (!self.callbackTimer)
{
self.callbackTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue());
@@ -266,7 +251,7 @@ - (void)retrieveFreshAudio:(float *)buffer numFrames:(UInt32)thisNumFrames numCh
}
-- (void)play;
+- (void)play
{
// Configure (or if necessary, create and start) the timer for retrieving audio
View
31 Novocaine/AudioFileWriter.h
@@ -30,27 +30,18 @@
@interface AudioFileWriter : NSObject
-{
- float currentTime;
- float duration;
- float samplingRate;
- float latency;
- UInt32 numChannels;
- NSURL *audioFileURL;
-
- OutputBlock writerBlock;
-
- BOOL recording;
-}
-@property (getter=getDuration, readonly) float currentTime;
-@property (getter=getDuration) float duration;
-@property float samplingRate;
-@property UInt32 numChannels;
-@property float latency;
-@property (nonatomic, copy) NSURL *audioFileURL;
-@property (nonatomic, copy) InputBlock writerBlock;
-@property BOOL recording;
+// ----- Read-write ------
+@property (nonatomic, copy) NovocaineInputBlock writerBlock;
+
+// ----- Read-only ------
+@property (nonatomic, assign, getter=getDuration, readonly) float currentTime;
+@property (nonatomic, assign, getter=getDuration, readonly) float duration;
+@property (nonatomic, assign, readonly) float samplingRate;
+@property (nonatomic, assign, readonly) UInt32 numChannels;
+@property (nonatomic, assign, readonly) float latency;
+@property (nonatomic, copy, readonly) NSURL *audioFileURL;
+@property (nonatomic, assign, readonly) BOOL recording;
- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels;
View
45 Novocaine/AudioFileWriter.m
@@ -29,40 +29,30 @@
@interface AudioFileWriter()
-@property AudioStreamBasicDescription outputFormat;
-@property ExtAudioFileRef outputFile;
-@property UInt32 outputBufferSize;
-@property float *outputBuffer;
-@property float *holdingBuffer;
-@property SInt64 currentFileTime;
-@property dispatch_source_t callbackTimer;
-@property (readwrite) float currentTime;
+// redeclare as readwrite in class continuation
+@property (nonatomic, assign, getter=getDuration, readwrite) float currentTime;
+@property (nonatomic, assign, getter=getDuration, readwrite) float duration;
+@property (nonatomic, assign, readwrite) float samplingRate;
+@property (nonatomic, assign, readwrite) UInt32 numChannels;
+@property (nonatomic, assign, readwrite) float latency;
+@property (nonatomic, copy, readwrite) NSURL *audioFileURL;
+@property (nonatomic, assign, readwrite) BOOL recording;
+
+@property (nonatomic, assign) AudioStreamBasicDescription outputFormat;
+@property (nonatomic, assign) ExtAudioFileRef outputFile;
+@property (nonatomic, assign) UInt32 outputBufferSize;
+@property (nonatomic, assign) float *outputBuffer;
+@property (nonatomic, assign) float *holdingBuffer;
+@property (nonatomic, assign) SInt64 currentFileTime;
+@property (nonatomic, assign) dispatch_source_t callbackTimer;
@end
-
@implementation AudioFileWriter
static pthread_mutex_t outputAudioFileLock;
-@synthesize outputFormat = _outputFormat;
-@synthesize outputFile = _outputFile;
-@synthesize outputBuffer = _outputBuffer;
-@synthesize holdingBuffer = _holdingBuffer;
-@synthesize outputBufferSize = _outputBufferSize;
-@synthesize currentFileTime = _currentFileTime;
-@synthesize callbackTimer = _callbackTimer;
-
-@synthesize currentTime = _currentTime;
-@synthesize duration = _duration;
-@synthesize samplingRate = _samplingRate;
-@synthesize latency = _latency;
-@synthesize numChannels = _numChannels;
-@synthesize audioFileURL = _audioFileURL;
-@synthesize writerBlock = _writerBlock;
-@synthesize recording = _recording;
-
- (void)dealloc
{
[self stop];
@@ -70,7 +60,6 @@ - (void)dealloc
free(self.outputBuffer);
free(self.holdingBuffer);
- [super dealloc];
}
- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels
@@ -85,7 +74,7 @@ - (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSampl
// Open a reference to the audio file
self.audioFileURL = urlToAudioFile;
- CFURLRef audioFileRef = (CFURLRef)self.audioFileURL;
+ CFURLRef audioFileRef = (__bridge CFURLRef)self.audioFileURL;
AudioStreamBasicDescription outputFileDesc = {44100.0, kAudioFormatMPEG4AAC, 0, 0, 1024, 0, thisNumChannels, 0, 0};
View
105 Novocaine/Novocaine.h
@@ -30,6 +30,7 @@
#include <CoreAudio/CoreAudio.h>
#else
#define USING_IOS
+ #include <AVFoundation/AVFoundation.h>
#endif
#include <Block.h>
@@ -89,91 +90,61 @@ void sessionInterruptionListener(void *inClientData, UInt32 inInterruption);
}
#endif
-typedef void (^OutputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
-typedef void (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
+typedef void (^NovocaineOutputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
+typedef void (^NovocaineInputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
#if defined (USING_IOS)
@interface Novocaine : NSObject <UIAlertViewDelegate>
#elif defined (USING_OSX)
@interface Novocaine : NSObject
#endif
-{
- // Audio Handling
- AudioUnit inputUnit;
- AudioUnit outputUnit;
- AudioBufferList *inputBuffer;
-
- // Session Properties
- BOOL inputAvailable;
- NSString *inputRoute;
- UInt32 numInputChannels;
- UInt32 numOutputChannels;
- Float64 samplingRate;
- BOOL isInterleaved;
- UInt32 numBytesPerSample;
- AudioStreamBasicDescription inputFormat;
- AudioStreamBasicDescription outputFormat;
-
- // Audio Processing
- OutputBlock outputBlock;
- InputBlock inputBlock;
-
- float *inData;
- float *outData;
-
- BOOL playing;
- // BOOL playThroughEnabled;
-
-
-#if defined (USING_OSX)
- AudioDeviceID *deviceIDs;
- NSMutableArray *deviceNames;
- AudioDeviceID defaultInputDeviceID;
- NSString *defaultDeviceName;
+
+// ------ These properties/methods are used for configuration -------
+
+@property (nonatomic, copy) NSString *inputRoute;
+
+// TODO: Not yet implemented. No effect right now.
+//@property (nonatomic, assign) BOOL inputEnabled;
+
+#ifdef USING_IOS
+@property (nonatomic, assign) BOOL forceOutputToSpeaker;
#endif
-
-}
-@property AudioUnit inputUnit;
-@property AudioUnit outputUnit;
-@property AudioBufferList *inputBuffer;
-@property (nonatomic, copy) OutputBlock outputBlock;
-@property (nonatomic, copy) InputBlock inputBlock;
-@property BOOL inputAvailable;
-@property (nonatomic, retain) NSString *inputRoute;
-@property UInt32 numInputChannels;
-@property UInt32 numOutputChannels;
-@property Float64 samplingRate;
-@property BOOL isInterleaved;
-@property UInt32 numBytesPerSample;
-@property AudioStreamBasicDescription inputFormat;
-@property AudioStreamBasicDescription outputFormat;
+// ND: Exposing the block setters this way will create the correct block signature for auto-complete.
+// These will map to "copy" property setters in class continuation in source file
+- (void)setInputBlock:(NovocaineInputBlock)block;
+- (void)setOutputBlock:(NovocaineOutputBlock)block;
+
+// ND: Not sure if there is a need to reference these elsewhere, but here are the getters just in case
+// These will also map to the property getters in the class continuation.
+- (NovocaineInputBlock)inputBlock;
+- (NovocaineOutputBlock)outputBlock;
+
+// ------------------------------------------------------------------
+
+// these should be readonly in public interface - no need for public write access
+@property (nonatomic, assign, readonly) AudioUnit inputUnit;
+@property (nonatomic, assign, readonly) AudioUnit outputUnit;
+@property (nonatomic, assign, readonly) AudioBufferList *inputBuffer;
+@property (nonatomic, assign, readonly) BOOL inputAvailable;
+@property (nonatomic, assign, readonly) UInt32 numInputChannels;
+@property (nonatomic, assign, readonly) UInt32 numOutputChannels;
+@property (nonatomic, assign, readonly) Float64 samplingRate;
+@property (nonatomic, assign, readonly) BOOL isInterleaved;
+@property (nonatomic, assign, readonly) UInt32 numBytesPerSample;
+@property (nonatomic, assign, readonly) AudioStreamBasicDescription inputFormat;
+@property (nonatomic, assign, readonly) AudioStreamBasicDescription outputFormat;
+@property (nonatomic, assign, readonly) BOOL playing;
// @property BOOL playThroughEnabled;
-@property BOOL playing;
-@property float *inData;
-@property float *outData;
-
-#if defined (USING_OSX)
-@property AudioDeviceID *deviceIDs;
-@property (nonatomic, retain) NSMutableArray *deviceNames;
-@property AudioDeviceID defaultInputDeviceID;
-@property (nonatomic, retain) NSString *defaultInputDeviceName;
-@property AudioDeviceID defaultOutputDeviceID;
-@property (nonatomic, retain) NSString *defaultOutputDeviceName;
-- (void)enumerateAudioDevices;
-#endif
// Singleton methods
+ (Novocaine *) audioManager;
-
// Audio Unit methods
- (void)play;
- (void)pause;
-- (void)setupAudio;
-- (void)ifAudioInputIsAvailableThenSetupAudioSession;
#if defined ( USING_IOS )
- (void)checkSessionProperties;
View
567 Novocaine/Novocaine.m
@@ -44,39 +44,48 @@
static Novocaine *audioManager = nil;
@interface Novocaine()
-- (void)setupAudio;
+
+// redeclare readwrite for class continuation
+@property (nonatomic, assign, readwrite) AudioUnit inputUnit;
+@property (nonatomic, assign, readwrite) AudioUnit outputUnit;
+@property (nonatomic, assign, readwrite) AudioBufferList *inputBuffer;
+@property (nonatomic, assign, readwrite) BOOL inputAvailable;
+@property (nonatomic, assign, readwrite) UInt32 numInputChannels;
+@property (nonatomic, assign, readwrite) UInt32 numOutputChannels;
+@property (nonatomic, assign, readwrite) Float64 samplingRate;
+@property (nonatomic, assign, readwrite) BOOL isInterleaved;
+@property (nonatomic, assign, readwrite) UInt32 numBytesPerSample;
+@property (nonatomic, assign, readwrite) AudioStreamBasicDescription inputFormat;
+@property (nonatomic, assign, readwrite) AudioStreamBasicDescription outputFormat;
+@property (nonatomic, assign, readwrite) BOOL playing;
+@property (nonatomic, assign, readwrite) float *inData;
+@property (nonatomic, assign, readwrite) float *outData;
+
+@property (nonatomic, copy) NovocaineOutputBlock outputBlock;
+@property (nonatomic, copy) NovocaineInputBlock inputBlock;
+
+#if defined (USING_OSX)
+@property (nonatomic, assign) AudioDeviceID *deviceIDs;
+@property (nonatomic, strong) NSMutableArray *deviceNames;
+@property (nonatomic, assign) AudioDeviceID defaultInputDeviceID;
+@property (nonatomic, strong) NSString *defaultInputDeviceName;
+@property (nonatomic, assign) AudioDeviceID defaultOutputDeviceID;
+@property (nonatomic, strong) NSString *defaultOutputDeviceName;
+- (void)enumerateAudioDevices;
+#endif
+
+// must be called prior to playing audio
+- (void)setupAudioSession;
+- (void)setupAudioUnits;
- (NSString *)applicationDocumentsDirectory;
+- (void)freeBuffers;
+
@end
@implementation Novocaine
-@synthesize inputUnit;
-@synthesize outputUnit;
-@synthesize inputBuffer;
-@synthesize inputRoute, inputAvailable;
-@synthesize numInputChannels, numOutputChannels;
-@synthesize inputBlock, outputBlock;
-@synthesize samplingRate;
-@synthesize isInterleaved;
-@synthesize numBytesPerSample;
-@synthesize inData;
-@synthesize outData;
-@synthesize playing;
-
-@synthesize outputFormat;
-@synthesize inputFormat;
-// @synthesize playThroughEnabled;
-
-#if defined( USING_OSX )
-@synthesize deviceIDs;
-@synthesize deviceNames;
-@synthesize defaultInputDeviceID;
-@synthesize defaultInputDeviceName;
-@synthesize defaultOutputDeviceID;
-@synthesize defaultOutputDeviceName;
-#endif
#pragma mark - Singleton Methods
+ (Novocaine *) audioManager
@@ -100,31 +109,17 @@ + (id)allocWithZone:(NSZone *)zone {
return nil; // on subsequent allocation attempts return nil
}
-- (id)copyWithZone:(NSZone *)zone
-{
- return self;
-}
+// ND: If NSCopying protocol is to be supported, it should be declared with class and done correctly. Disabled for now.
-- (id)retain {
- return self;
-}
-
-- (unsigned)retainCount {
- return UINT_MAX; // denotes an object that cannot be released
-}
-
-- (oneway void)release {
- //do nothing
-}
+//- (id)copyWithZone:(NSZone *)zone
+//{
+// return self;
+//}
- (id)init
{
if (self = [super init])
{
-
- // Initialize some stuff k?
- outputBlock = nil;
- inputBlock = nil;
// Initialize a float buffer to hold audio
self.inData = (float *)calloc(8192, sizeof(float)); // probably more than we'll need
@@ -141,7 +136,10 @@ - (id)init
// self.playThroughEnabled = NO;
// Fire up the audio session ( with steady error checking ... )
- [self ifAudioInputIsAvailableThenSetupAudioSession];
+ [self setupAudioSession];
+
+ // start audio units
+ [self setupAudioUnits];
return self;
@@ -150,55 +148,88 @@ - (id)init
return nil;
}
+- (void)dealloc
+{
+ free(self.inData);
+ free(self.outData);
+
+#if defined (USING_OSX)
+ if (self.deviceIDs){
+ free(self.deviceIDs);
+ }
+#endif
+
+ [self freeBuffers];
+}
+
+- (void)freeBuffers
+{
+ if (self.inputBuffer){
+
+ for(UInt32 i =0; i< self.inputBuffer->mNumberBuffers ; i++) {
+
+ if(self.inputBuffer->mBuffers[i].mData){
+ free(self.inputBuffer->mBuffers[i].mData);
+ }
+ }
+
+ free(self.inputBuffer);
+ self.inputBuffer = NULL;
+ }
+}
+
+#pragma mark - Properties
+
+// TODO: Implement this.
+//- (void)setInputEnabled:(BOOL)inputEnabled
+//{
+// _inputEnabled = inputEnabled;
+//}
+
+#ifdef USING_IOS
+- (void)setForceOutputToSpeaker:(BOOL)forceOutputToSpeaker
+{
+ UInt32 value = forceOutputToSpeaker ? 1 : 0;
+
+#if !TARGET_IPHONE_SIMULATOR
+ // should not be fatal error
+ OSStatus err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(UInt32), &value);
+ if (err != noErr){
+ NSLog(@"Could not override audio output route to speaker");
+ }
+ else{
+ _forceOutputToSpeaker = forceOutputToSpeaker;
+ }
+#else
+ _forceOutputToSpeaker = forceOutputToSpeaker;
+#endif
+}
+#endif
#pragma mark - Audio Methods
-- (void)ifAudioInputIsAvailableThenSetupAudioSession {
- // Initialize and configure the audio session, and add an interuption listener
+- (void)setupAudioSession
+{
+ // Initialize and configure the audio session, and add an interuption listener
#if defined ( USING_IOS )
- CheckError( AudioSessionInitialize(NULL, NULL, sessionInterruptionListener, self), "Couldn't initialize audio session");
+ NSError *err = nil;
+ if (![[AVAudioSession sharedInstance] setActive:YES error:&err]){
+ NSLog(@"Could not activate audio session: %@", err);
+ }
[self checkAudioSource];
#elif defined ( USING_OSX )
// TODO: grab the audio device
[self enumerateAudioDevices];
self.inputAvailable = YES;
#endif
-
- // Check the session properties (available input routes, number of channels, etc)
-
-
-
- // If we do have input, then let's rock 'n roll.
- if (self.inputAvailable) {
- [self setupAudio];
- [self play];
- }
-
- // If we don't have input, then ask the user to provide some
- else
- {
-#if defined ( USING_IOS )
- UIAlertView *noInputAlert =
- [[UIAlertView alloc] initWithTitle:@"No Audio Input"
- message:@"Couldn't find any audio input. Plug in your Apple headphones or another microphone."
- delegate:self
- cancelButtonTitle:@"OK"
- otherButtonTitles:nil];
-
- [noInputAlert show];
- [noInputAlert release];
-#endif
-
- }
}
-- (void)setupAudio
+- (void)setupAudioUnits
{
-
// --- Audio Session Setup ---
// ---------------------------
@@ -211,7 +242,7 @@ - (void)setupAudio
// Add a property listener, to listen to changes to the session
- CheckError( AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, sessionPropertyListener, self), "Couldn't add audio session property listener");
+ CheckError( AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, sessionPropertyListener, (__bridge void*)self), "Couldn't add audio session property listener");
// Set the buffer size, this will affect the number of samples that get rendered every time the audio callback is fired
// A small number will get you lower latency audio, but will make your processor work harder
@@ -222,7 +253,11 @@ - (void)setupAudio
// Set the audio session active
- CheckError( AudioSessionSetActive(YES), "Couldn't activate the audio session");
+ NSError *err = nil;
+ if (![[AVAudioSession sharedInstance] setActive:YES error:&err]){
+ NSLog(@"Couldn't activate audio session: %@", err);
+ }
+
[self checkSessionProperties];
@@ -263,28 +298,19 @@ - (void)setupAudio
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &inputDescription);
- CheckError( AudioComponentInstanceNew(inputComponent, &inputUnit), "Couldn't create the output audio unit");
+ CheckError( AudioComponentInstanceNew(inputComponent, &_inputUnit), "Couldn't create the output audio unit");
#if defined ( USING_OSX )
AudioComponent outputComponent = AudioComponentFindNext(NULL, &outputDescription);
- CheckError( AudioComponentInstanceNew(outputComponent, &outputUnit), "Couldn't create the output audio unit");
+ CheckError( AudioComponentInstanceNew(outputComponent, &_outputUnit), "Couldn't create the output audio unit");
#endif
- // Enable input
- UInt32 one = 1;
- CheckError( AudioUnitSetProperty(inputUnit,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- kInputBus,
- &one,
- sizeof(one)), "Couldn't enable IO on the input scope of output unit");
-
#if defined ( USING_OSX )
// Disable output on the input unit
// (only on Mac, since on the iPhone, the input unit is also the output unit)
UInt32 zero = 0;
- CheckError( AudioUnitSetProperty(inputUnit,
+ CheckError( AudioUnitSetProperty(_inputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
@@ -292,7 +318,7 @@ - (void)setupAudio
sizeof(UInt32)), "Couldn't disable output on the audio unit");
// Enable output
- CheckError( AudioUnitSetProperty(outputUnit,
+ CheckError( AudioUnitSetProperty(_outputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
@@ -300,7 +326,7 @@ - (void)setupAudio
sizeof(one)), "Couldn't enable IO on the input scope of output unit");
// Disable input
- CheckError( AudioUnitSetProperty(outputUnit,
+ CheckError( AudioUnitSetProperty(_outputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
@@ -315,36 +341,36 @@ - (void)setupAudio
# if defined ( USING_IOS )
UInt32 size;
size = sizeof( AudioStreamBasicDescription );
- CheckError( AudioUnitGetProperty( inputUnit,
+ CheckError( AudioUnitGetProperty(_inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
1,
- &inputFormat,
- &size ),
+ &_inputFormat,
+ &size ),
"Couldn't get the hardware input stream format");
// Check the output stream format
size = sizeof( AudioStreamBasicDescription );
- CheckError( AudioUnitGetProperty( inputUnit,
+ CheckError( AudioUnitGetProperty(_inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
- &outputFormat,
+ &_outputFormat,
&size ),
"Couldn't get the hardware output stream format");
// TODO: check this works on iOS!
- inputFormat.mSampleRate = 44100.0;
- outputFormat.mSampleRate = 44100.0;
- self.samplingRate = inputFormat.mSampleRate;
- self.numBytesPerSample = inputFormat.mBitsPerChannel / 8;
+ _inputFormat.mSampleRate = 44100.0;
+ _outputFormat.mSampleRate = 44100.0;
+ self.samplingRate = _inputFormat.mSampleRate;
+ self.numBytesPerSample = _inputFormat.mBitsPerChannel / 8;
size = sizeof(AudioStreamBasicDescription);
- CheckError(AudioUnitSetProperty(inputUnit,
+ CheckError(AudioUnitSetProperty(_inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
- &outputFormat,
+ &_outputFormat,
size),
"Couldn't set the ASBD on the audio unit (after setting its sampling rate)");
@@ -353,73 +379,83 @@ - (void)setupAudio
UInt32 size = sizeof(AudioDeviceID);
if(self.defaultInputDeviceID == kAudioDeviceUnknown)
- {
+ {
+ AudioObjectPropertyAddress propertyAddress;
+ propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propertyAddress.mElement = kAudioObjectPropertyElementMaster;
+
AudioDeviceID thisDeviceID;
UInt32 propsize = sizeof(AudioDeviceID);
- CheckError(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propsize, &thisDeviceID), "Could not get the default device");
+ CheckError(AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propsize, &thisDeviceID), "Could not get the default device");
self.defaultInputDeviceID = thisDeviceID;
}
if (self.defaultOutputDeviceID == kAudioDeviceUnknown)
{
+ AudioObjectPropertyAddress propertyAddress;
+ propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propertyAddress.mElement = kAudioObjectPropertyElementMaster;
+
AudioDeviceID thisDeviceID;
UInt32 propsize = sizeof(AudioDeviceID);
- CheckError(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &propsize, &thisDeviceID), "Could not get the default device");
+ CheckError(AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propsize, &thisDeviceID), "Could not get the default device");
self.defaultOutputDeviceID = thisDeviceID;
}
// Set the current device to the default input unit.
- CheckError( AudioUnitSetProperty( inputUnit,
+ CheckError( AudioUnitSetProperty( _inputUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
kOutputBus,
- &defaultInputDeviceID,
+ &_defaultInputDeviceID,
sizeof(AudioDeviceID) ), "Couldn't set the current input audio device");
- CheckError( AudioUnitSetProperty( outputUnit,
+ CheckError( AudioUnitSetProperty( _outputUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
kOutputBus,
- &defaultOutputDeviceID,
+ &_defaultOutputDeviceID,
sizeof(AudioDeviceID) ), "Couldn't set the current output audio device");
UInt32 propertySize = sizeof(AudioStreamBasicDescription);
- CheckError(AudioUnitGetProperty(inputUnit,
+ CheckError(AudioUnitGetProperty(_inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
- &outputFormat,
+ &_outputFormat,
&propertySize),
"Couldn't get ASBD from input unit");
// 9/6/10 - check the input device's stream format
- CheckError(AudioUnitGetProperty(inputUnit,
+ CheckError(AudioUnitGetProperty(_inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kInputBus,
- &inputFormat,
+ &_inputFormat,
&propertySize),
"Couldn't get ASBD from input unit");
- outputFormat.mSampleRate = inputFormat.mSampleRate;
+ _outputFormat.mSampleRate = _inputFormat.mSampleRate;
// outputFormat.mFormatFlags = kAudioFormatFlagsCanonical;
- self.samplingRate = inputFormat.mSampleRate;
- self.numBytesPerSample = inputFormat.mBitsPerChannel / 8;
+ self.samplingRate = _inputFormat.mSampleRate;
+ self.numBytesPerSample = _inputFormat.mBitsPerChannel / 8;
- self.numInputChannels = inputFormat.mChannelsPerFrame;
- self.numOutputChannels = outputFormat.mChannelsPerFrame;
+ self.numInputChannels = _inputFormat.mChannelsPerFrame;
+ self.numOutputChannels = _outputFormat.mChannelsPerFrame;
propertySize = sizeof(AudioStreamBasicDescription);
- CheckError(AudioUnitSetProperty(inputUnit,
+ CheckError(AudioUnitSetProperty(_inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
- &outputFormat,
+ &_outputFormat,
propertySize),
"Couldn't set the ASBD on the audio unit (after setting its sampling rate)");
@@ -431,7 +467,7 @@ - (void)setupAudio
#if defined ( USING_IOS )
UInt32 numFramesPerBuffer;
size = sizeof(UInt32);
- CheckError(AudioUnitGetProperty(inputUnit,
+ CheckError(AudioUnitGetProperty(_inputUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
kOutputBus,
@@ -439,7 +475,7 @@ - (void)setupAudio
&size),
"Couldn't get the number of frames per callback");
- UInt32 bufferSizeBytes = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket * numFramesPerBuffer;
+ UInt32 bufferSizeBytes = _outputFormat.mBytesPerFrame * _outputFormat.mFramesPerPacket * numFramesPerBuffer;
#elif defined ( USING_OSX )
// Get the size of the IO buffer(s)
@@ -457,17 +493,17 @@ - (void)setupAudio
- if (outputFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
+ if (_outputFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
// The audio is non-interleaved
printf("Not interleaved!\n");
self.isInterleaved = NO;
// allocate an AudioBufferList plus enough space for array of AudioBuffers
- UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * outputFormat.mChannelsPerFrame);
+ UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * _outputFormat.mChannelsPerFrame);
//malloc buffer lists
self.inputBuffer = (AudioBufferList *)malloc(propsize);
- self.inputBuffer->mNumberBuffers = outputFormat.mChannelsPerFrame;
+ self.inputBuffer->mNumberBuffers = _outputFormat.mChannelsPerFrame;
//pre-malloc buffers for AudioBufferLists
for(UInt32 i =0; i< self.inputBuffer->mNumberBuffers ; i++) {
@@ -489,7 +525,7 @@ - (void)setupAudio
self.inputBuffer->mNumberBuffers = 1;
//pre-malloc buffers for AudioBufferLists
- self.inputBuffer->mBuffers[0].mNumberChannels = outputFormat.mChannelsPerFrame;
+ self.inputBuffer->mBuffers[0].mNumberChannels = _outputFormat.mChannelsPerFrame;
self.inputBuffer->mBuffers[0].mDataByteSize = bufferSizeBytes;
self.inputBuffer->mBuffers[0].mData = malloc(bufferSizeBytes);
memset(self.inputBuffer->mBuffers[0].mData, 0, bufferSizeBytes);
@@ -500,9 +536,9 @@ - (void)setupAudio
// Slap a render callback on the unit
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = inputCallback;
- callbackStruct.inputProcRefCon = self;
+ callbackStruct.inputProcRefCon = (__bridge void *)(self);
- CheckError( AudioUnitSetProperty(inputUnit,
+ CheckError( AudioUnitSetProperty(_inputUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
@@ -511,9 +547,9 @@ - (void)setupAudio
callbackStruct.inputProc = renderCallback;
- callbackStruct.inputProcRefCon = self;
+ callbackStruct.inputProcRefCon = (__bridge void *)(self);
# if defined ( USING_OSX )
- CheckError( AudioUnitSetProperty(outputUnit,
+ CheckError( AudioUnitSetProperty(_outputUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
@@ -522,7 +558,7 @@ - (void)setupAudio
"Couldn't set the render callback on the input unit");
#elif defined ( USING_IOS )
- CheckError( AudioUnitSetProperty(inputUnit,
+ CheckError( AudioUnitSetProperty(_inputUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
@@ -534,45 +570,67 @@ - (void)setupAudio
- CheckError(AudioUnitInitialize(inputUnit), "Couldn't initialize the output unit");
+ CheckError(AudioUnitInitialize(_inputUnit), "Couldn't initialize the output unit");
#if defined ( USING_OSX )
- CheckError(AudioUnitInitialize(outputUnit), "Couldn't initialize the output unit");
+ CheckError(AudioUnitInitialize(_outputUnit), "Couldn't initialize the output unit");
#endif
-
-
}
#if defined (USING_OSX)
- (void)enumerateAudioDevices
{
- UInt32 propSize;
+ AudioObjectPropertyAddress propertyAddress;
+ propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propertyAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ UInt32 propSize = sizeof(AudioDeviceID);
+ CheckError(AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propSize, &_defaultInputDeviceID), "Could not get the default device");
- UInt32 propsize = sizeof(AudioDeviceID);
- CheckError(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propsize, &defaultInputDeviceID), "Could not get the default device");
+ propertyAddress.mSelector = kAudioHardwarePropertyDevices;
+ propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propertyAddress.mElement = kAudioObjectPropertyElementMaster;
- AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices, &propSize, NULL );
+ AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propSize);
uint32_t deviceCount = ( propSize / sizeof(AudioDeviceID) );
// Allocate the device IDs
- self.deviceIDs = (AudioDeviceID *)calloc(deviceCount, sizeof(AudioDeviceID));
- [deviceNames removeAllObjects];
+ _deviceIDs = (AudioDeviceID *)calloc(deviceCount, sizeof(AudioDeviceID));
+ [_deviceNames removeAllObjects];
// Get all the device IDs
- CheckError( AudioHardwareGetProperty( kAudioHardwarePropertyDevices, &propSize, self.deviceIDs ), "Could not get device IDs");
+ CheckError( AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propSize, _deviceIDs ), "Could not get device IDs");
+
// Get the names of all the device IDs
- for( int i = 0; i < deviceCount; i++ )
+ // 256 chars should be big enough for pretty much any name
+ char deviceNameBuffer[256];
+ char mfrNameBuffer[256];
+ UInt32 nameBufSize;
+ AudioObjectPropertyAddress deviceAddress;
+
+ for( int i = 0; i < deviceCount; i++ )
{
- UInt32 size = sizeof(AudioDeviceID);
- CheckError( AudioDeviceGetPropertyInfo( self.deviceIDs[i], 0, true, kAudioDevicePropertyDeviceName, &size, NULL ), "Could not get device name length");
+ deviceAddress.mSelector = kAudioDevicePropertyDeviceName;
+ deviceAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ deviceAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ nameBufSize = sizeof(deviceNameBuffer);
+
+ CheckError( AudioObjectGetPropertyData(self.deviceIDs[i], &deviceAddress, 0, NULL, &nameBufSize, deviceNameBuffer), "Could not get device name");
+
+ deviceAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
+ deviceAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ deviceAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ nameBufSize = sizeof(mfrNameBuffer);
- char cStringOfDeviceName[size];
- CheckError( AudioDeviceGetProperty( self.deviceIDs[i], 0, true, kAudioDevicePropertyDeviceName, &size, cStringOfDeviceName ), "Could not get device name");
- NSString *thisDeviceName = [NSString stringWithCString:cStringOfDeviceName encoding:NSUTF8StringEncoding];
+ CheckError( AudioObjectGetPropertyData(self.deviceIDs[i], &deviceAddress, 0, NULL, &nameBufSize, mfrNameBuffer), "Could not get device manufacturer");
+ NSString *thisDeviceName = [NSString stringWithFormat:@"%@: %@", [NSString stringWithUTF8String:mfrNameBuffer], [NSString stringWithUTF8String:deviceNameBuffer]];
NSLog(@"Device: %@, ID: %d", thisDeviceName, self.deviceIDs[i]);
- [deviceNames addObject:thisDeviceName];
+ [self.deviceNames addObject:thisDeviceName];
}
}
@@ -583,12 +641,12 @@ - (void)enumerateAudioDevices
- (void)pause {
- if (playing) {
- CheckError( AudioOutputUnitStop(inputUnit), "Couldn't stop the output unit");
+ if (self.playing) {
+ CheckError( AudioOutputUnitStop(_inputUnit), "Couldn't stop the output unit");
#if defined ( USING_OSX )
- CheckError( AudioOutputUnitStop(outputUnit), "Couldn't stop the output unit");
+ CheckError( AudioOutputUnitStop(_outputUnit), "Couldn't stop the output unit");
#endif
- playing = NO;
+ self.playing = NO;
}
}
@@ -596,9 +654,9 @@ - (void)pause {
- (void)play {
UInt32 isInputAvailable=0;
- UInt32 size = sizeof(isInputAvailable);
#if defined ( USING_IOS )
+ UInt32 size = sizeof(isInputAvailable);
CheckError( AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable,
&size,
&isInputAvailable), "Couldn't check if input was available");
@@ -608,15 +666,14 @@ - (void)play {
#endif
-
self.inputAvailable = isInputAvailable;
if ( self.inputAvailable ) {
// Set the audio session category for simultaneous play and record
- if (!playing) {
- CheckError( AudioOutputUnitStart(inputUnit), "Couldn't start the output unit");
+ if (!self.playing) {
+ CheckError( AudioOutputUnitStart(_inputUnit), "Couldn't start the output unit");
#if defined ( USING_OSX )
- CheckError( AudioOutputUnitStart(outputUnit), "Couldn't start the output unit");
+ CheckError( AudioOutputUnitStart(_outputUnit), "Couldn't start the output unit");
#endif
self.playing = YES;
@@ -635,64 +692,66 @@ OSStatus inputCallback (void *inRefCon,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
+ @autoreleasepool {
-
- Novocaine *sm = (Novocaine *)inRefCon;
-
- if (!sm.playing)
- return noErr;
- if (sm.inputBlock == nil)
- return noErr;
-
-
- // Check the current number of channels
- // Let's actually grab the audio
+ Novocaine *sm = (__bridge Novocaine *)inRefCon;
+
+ if (!sm.playing)
+ return noErr;
+ if (sm.inputBlock == nil)
+ return noErr;
+
+
+ // Check the current number of channels
+ // Let's actually grab the audio
#if TARGET_IPHONE_SIMULATOR
- // this is a workaround for an issue with core audio on the simulator, //
- // likely due to 44100 vs 48000 difference in OSX //
- if( inNumberFrames == 471 )
- inNumberFrames = 470;
+ // this is a workaround for an issue with core audio on the simulator, //
+ // likely due to 44100 vs 48000 difference in OSX //
+ if( inNumberFrames == 471 )
+ inNumberFrames = 470;
#endif
- CheckError( AudioUnitRender(sm.inputUnit, ioActionFlags, inTimeStamp, inOutputBusNumber, inNumberFrames, sm.inputBuffer), "Couldn't render the output unit");
-
-
- // Convert the audio in something manageable
- // For Float32s ...
- if ( sm.numBytesPerSample == 4 ) // then we've already got flaots
- {
+ CheckError( AudioUnitRender(sm.inputUnit, ioActionFlags, inTimeStamp, inOutputBusNumber, inNumberFrames, sm.inputBuffer), "Couldn't render the output unit");
- float zero = 0.0f;
- if ( ! sm.isInterleaved ) { // if the data is in separate buffers, make it interleaved
- for (int i=0; i < sm.numInputChannels; ++i) {
- vDSP_vsadd((float *)sm.inputBuffer->mBuffers[i].mData, 1, &zero, sm.inData+i,
- sm.numInputChannels, inNumberFrames);
+
+ // Convert the audio in something manageable
+ // For Float32s ...
+ if ( sm.numBytesPerSample == 4 ) // then we've already got flaots
+ {
+
+ float zero = 0.0f;
+ if ( ! sm.isInterleaved ) { // if the data is in separate buffers, make it interleaved
+ for (int i=0; i < sm.numInputChannels; ++i) {
+ vDSP_vsadd((float *)sm.inputBuffer->mBuffers[i].mData, 1, &zero, sm.inData+i,
+ sm.numInputChannels, inNumberFrames);
+ }
+ }
+ else { // if the data is already interleaved, copy it all in one happy block.
+ // TODO: check mDataByteSize is proper
+ memcpy(sm.inData, (float *)sm.inputBuffer->mBuffers[0].mData, sm.inputBuffer->mBuffers[0].mDataByteSize);
}
- }
- else { // if the data is already interleaved, copy it all in one happy block.
- // TODO: check mDataByteSize is proper
- memcpy(sm.inData, (float *)sm.inputBuffer->mBuffers[0].mData, sm.inputBuffer->mBuffers[0].mDataByteSize);
- }
- }
-
- // For SInt16s ...
- else if ( sm.numBytesPerSample == 2 ) // then we're dealing with SInt16's
- {
- if ( ! sm.isInterleaved ) {
- for (int i=0; i < sm.numInputChannels; ++i) {
- vDSP_vflt16((SInt16 *)sm.inputBuffer->mBuffers[i].mData, 1, sm.inData+i, sm.numInputChannels, inNumberFrames);
- }
}
- else {
- vDSP_vflt16((SInt16 *)sm.inputBuffer->mBuffers[0].mData, 1, sm.inData, 1, inNumberFrames*sm.numInputChannels);
+
+ // For SInt16s ...
+ else if ( sm.numBytesPerSample == 2 ) // then we're dealing with SInt16's
+ {
+ if ( ! sm.isInterleaved ) {
+ for (int i=0; i < sm.numInputChannels; ++i) {
+ vDSP_vflt16((SInt16 *)sm.inputBuffer->mBuffers[i].mData, 1, sm.inData+i, sm.numInputChannels, inNumberFrames);
+ }
+ }
+ else {
+ vDSP_vflt16((SInt16 *)sm.inputBuffer->mBuffers[0].mData, 1, sm.inData, 1, inNumberFrames*sm.numInputChannels);
+ }
+
+ float scale = 1.0 / (float)INT16_MAX;
+ vDSP_vsmul(sm.inData, 1, &scale, sm.inData, 1, inNumberFrames*sm.numInputChannels);
}
- float scale = 1.0 / (float)INT16_MAX;
- vDSP_vsmul(sm.inData, 1, &scale, sm.inData, 1, inNumberFrames*sm.numInputChannels);
+ // Now do the processing!
+ sm.inputBlock(sm.inData, inNumberFrames, sm.numInputChannels);
+
}
- // Now do the processing!
- sm.inputBlock(sm.inData, inNumberFrames, sm.numInputChannels);
-
return noErr;
@@ -705,54 +764,56 @@ OSStatus renderCallback (void *inRefCon,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
-
-
- Novocaine *sm = (Novocaine *)inRefCon;
- float zero = 0.0;
-
-
- for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
- memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize);
- }
-
- if (!sm.playing)
- return noErr;
- if (!sm.outputBlock)
- return noErr;
-
-
- // Collect data to render from the callbacks
- sm.outputBlock(sm.outData, inNumberFrames, sm.numOutputChannels);
-
-
- // Put the rendered data into the output buffer
- // TODO: convert SInt16 ranges to float ranges.
- if ( sm.numBytesPerSample == 4 ) // then we've already got floats
- {
+ // autorelease pool for much faster ARC performance on repeated calls from separate thread
+ @autoreleasepool {
- for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
-
- int thisNumChannels = ioData->mBuffers[iBuffer].mNumberChannels;
-
- for (int iChannel = 0; iChannel < thisNumChannels; ++iChannel) {
- vDSP_vsadd(sm.outData+iChannel, sm.numOutputChannels, &zero, (float *)ioData->mBuffers[iBuffer].mData, thisNumChannels, inNumberFrames);
- }
+ Novocaine *sm = (__bridge Novocaine *)inRefCon;
+ float zero = 0.0;
+
+
+ for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
+ memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize);
}
- }
- else if ( sm.numBytesPerSample == 2 ) // then we need to convert SInt16 -> Float (and also scale)
- {
- float scale = (float)INT16_MAX;
- vDSP_vsmul(sm.outData, 1, &scale, sm.outData, 1, inNumberFrames*sm.numOutputChannels);
- for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
+ if (!sm.playing)
+ return noErr;
+ if (!sm.outputBlock)
+ return noErr;
+
+
+ // Collect data to render from the callbacks
+ sm.outputBlock(sm.outData, inNumberFrames, sm.numOutputChannels);
+
+
+ // Put the rendered data into the output buffer
+ // TODO: convert SInt16 ranges to float ranges.
+ if ( sm.numBytesPerSample == 4 ) // then we've already got floats
+ {
- int thisNumChannels = ioData->mBuffers[iBuffer].mNumberChannels;
+ for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
+
+ int thisNumChannels = ioData->mBuffers[iBuffer].mNumberChannels;
+
+ for (int iChannel = 0; iChannel < thisNumChannels; ++iChannel) {
+ vDSP_vsadd(sm.outData+iChannel+iBuffer, sm.numOutputChannels, &zero, (float *)ioData->mBuffers[iBuffer].mData, thisNumChannels, inNumberFrames);
+ }
+ }
+ }
+ else if ( sm.numBytesPerSample == 2 ) // then we need to convert SInt16 -> Float (and also scale)
+ {
+ float scale = (float)INT16_MAX;
+ vDSP_vsmul(sm.outData, 1, &scale, sm.outData, 1, inNumberFrames*sm.numOutputChannels);
- for (int iChannel = 0; iChannel < thisNumChannels; ++iChannel) {
- vDSP_vfix16(sm.outData+iChannel, sm.numOutputChannels, (SInt16 *)ioData->mBuffers[iBuffer].mData+iChannel, thisNumChannels, inNumberFrames);
+ for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
+
+ int thisNumChannels = ioData->mBuffers[iBuffer].mNumberChannels;
+
+ for (int iChannel = 0; iChannel < thisNumChannels; ++iChannel) {
+ vDSP_vfix16(sm.outData+iChannel, sm.numOutputChannels, (SInt16 *)ioData->mBuffers[iBuffer].mData+iChannel, thisNumChannels, inNumberFrames);
+ }
}
+
}
-
}
return noErr;
@@ -774,7 +835,7 @@ void sessionPropertyListener(void * inClientData,
if (inID == kAudioSessionProperty_AudioRouteChange && routeChangeReason != kAudioSessionRouteChangeReason_CategoryChange)
{
- Novocaine *sm = (Novocaine *)inClientData;
+ Novocaine *sm = (__bridge Novocaine *)inClientData;
[sm checkSessionProperties];
}
@@ -785,7 +846,7 @@ - (void)checkAudioSource {
UInt32 propertySize = sizeof(CFStringRef);
CFStringRef route;
CheckError( AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &route), "Couldn't check the audio route");
- self.inputRoute = (NSString *)route;
+ self.inputRoute = (__bridge NSString *)route;
CFRelease(route);
NSLog(@"AudioRoute: %@", self.inputRoute);
@@ -840,7 +901,7 @@ - (void)checkSessionProperties
void sessionInterruptionListener(void *inClientData, UInt32 inInterruption) {
- Novocaine *sm = (Novocaine *)inClientData;
+ Novocaine *sm = (__bridge Novocaine *)inClientData;
if (inInterruption == kAudioSessionBeginInterruption) {
NSLog(@"Begin interuption");
Please sign in to comment.
Something went wrong with that request. Please try again.