diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..e223300 Binary files /dev/null and b/.DS_Store differ diff --git a/AudioDSPUtils/AudioFileReader.mm b/AudioDSPUtils/AudioFileReader.mm deleted file mode 100644 index 9f5c0bd..0000000 --- a/AudioDSPUtils/AudioFileReader.mm +++ /dev/null @@ -1,296 +0,0 @@ -// -// AudioFileReader.m -// Novocaine -// -// Copyright (c) 2012 Alex Wiltschko -// -// Permission is hereby granted, free of charge, to any person -// obtaining a copy of this software and associated documentation -// files (the "Software"), to deal in the Software without -// restriction, including without limitation the rights to use, -// copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. - -#import "AudioFileReader.h" - -@interface AudioFileReader () -{ - RingBuffer *ringBuffer; -} - -@property AudioStreamBasicDescription outputFormat; -@property ExtAudioFileRef inputFile; -@property UInt32 outputBufferSize; -@property float *outputBuffer; -@property float *holdingBuffer; -@property UInt32 numSamplesReadPerPacket; -@property UInt32 desiredPrebufferedSamples; -@property SInt64 currentFileTime; -@property dispatch_source_t callbackTimer; - - -- (void)bufferNewAudio; - -@end - - - -@implementation AudioFileReader - -@synthesize outputFormat = _outputFormat; -@synthesize inputFile = _inputFile; -@synthesize outputBuffer = _outputBuffer; -@synthesize holdingBuffer = _holdingBuffer; -@synthesize outputBufferSize = _outputBufferSize; -@synthesize numSamplesReadPerPacket = _numSamplesReadPerPacket; -@synthesize desiredPrebufferedSamples = _desiredPrebufferedSamples; -@synthesize currentFileTime = _currentFileTime; -@synthesize callbackTimer = _callbackTimer; -@synthesize currentTime = _currentTime; -@synthesize duration = _duration; -@synthesize samplingRate = _samplingRate; -@synthesize latency = _latency; -@synthesize numChannels = _numChannels; -@synthesize audioFileURL = _audioFileURL; -@synthesize readerBlock = _readerBlock; -@synthesize playing = _playing; - -- (void)dealloc -{ - // If the dispatch timer is active, close it off - if (self.playing) - [self pause]; - - self.readerBlock = nil; - - // Close the ExtAudioFile - ExtAudioFileDispose(self.inputFile); - - free(self.outputBuffer); - free(self.holdingBuffer); - - delete ringBuffer; - - [super dealloc]; -} - - -- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels -{ - self = [super init]; - if (self) - { - - // Zero-out our timer, so we know we're not using our callback yet - self.callbackTimer = nil; - - - // Open a reference to the audio file - self.audioFileURL = urlToAudioFile; - CFURLRef audioFileRef = (CFURLRef)self.audioFileURL; - CheckError(ExtAudioFileOpenURL(audioFileRef, &_inputFile), "Opening file URL (ExtAudioFileOpenURL)"); - - - // Set a few defaults and presets - self.samplingRate = thisSamplingRate; - self.numChannels = thisNumChannels; - self.latency = .011609977; // 512 samples / ( 44100 samples / sec ) default - - - // We're going to impose a format upon the input file - // Single-channel float does the trick. - _outputFormat.mSampleRate = self.samplingRate; - _outputFormat.mFormatID = kAudioFormatLinearPCM; - _outputFormat.mFormatFlags = kAudioFormatFlagIsFloat; - _outputFormat.mBytesPerPacket = 4*self.numChannels; - _outputFormat.mFramesPerPacket = 1; - _outputFormat.mBytesPerFrame = 4*self.numChannels; - _outputFormat.mChannelsPerFrame = self.numChannels; - _outputFormat.mBitsPerChannel = 32; - - // Apply the format to our file - ExtAudioFileSetProperty(_inputFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &_outputFormat); - - - // Arbitrary buffer sizes that don't matter so much as long as they're "big enough" - self.outputBufferSize = 65536; - self.numSamplesReadPerPacket = 8192; - self.desiredPrebufferedSamples = self.numSamplesReadPerPacket*2; - self.outputBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); - self.holdingBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); - - - // Allocate a ring buffer (this is what's going to buffer our audio) - ringBuffer = new RingBuffer(self.outputBufferSize, self.numChannels); - - - // Fill up the buffers, so we're ready to play immediately - [self bufferNewAudio]; - - } - return self; -} - -- (void)clearBuffer -{ - ringBuffer->Clear(); -} - -- (void)bufferNewAudio -{ - - if (ringBuffer->NumUnreadFrames() > self.desiredPrebufferedSamples) - return; - - memset(self.outputBuffer, 0, sizeof(float)*self.desiredPrebufferedSamples); - - AudioBufferList incomingAudio; - incomingAudio.mNumberBuffers = 1; - incomingAudio.mBuffers[0].mNumberChannels = self.numChannels; - incomingAudio.mBuffers[0].mDataByteSize = self.outputBufferSize; - incomingAudio.mBuffers[0].mData = self.outputBuffer; - - // Figure out where we are in the file - SInt64 frameOffset = 0; - ExtAudioFileTell(self.inputFile, &frameOffset); - self.currentFileTime = (float)frameOffset / self.samplingRate; - - // Read the audio - UInt32 framesRead = self.numSamplesReadPerPacket; - ExtAudioFileRead(self.inputFile, &framesRead, &incomingAudio); - - // Update where we are in the file - ExtAudioFileTell(self.inputFile, &frameOffset); - self.currentFileTime = (float)frameOffset / self.samplingRate; - - // Add the new audio to the ring buffer - ringBuffer->AddNewInterleavedFloatData(self.outputBuffer, framesRead, self.numChannels); - - if ((self.currentFileTime - self.duration) < 0.01 && framesRead == 0) { - // modified to allow for auto-stopping. // - // Need to change your output block to check for [fileReader playing] and nuke your fileReader if it is // - // not playing and not paused, on the next frame. Otherwise, the sound clip's final buffer is not played. // -// self.currentTime = 0.0f; - [self stop]; - ringBuffer->Clear(); - } - - -} - -- (float)getCurrentTime -{ - return self.currentFileTime - ringBuffer->NumUnreadFrames()/self.samplingRate; -} - - -- (void)setCurrentTime:(float)thisCurrentTime -{ - dispatch_async(dispatch_get_main_queue(), ^{ - [self pause]; - ExtAudioFileSeek(self.inputFile, thisCurrentTime*self.samplingRate); - - [self clearBuffer]; - [self bufferNewAudio]; - - [self play]; - }); -} - -- (float)getDuration -{ - // We're going to directly calculate the duration of the audio file (in seconds) - SInt64 framesInThisFile; - UInt32 propertySize = sizeof(framesInThisFile); - ExtAudioFileGetProperty(self.inputFile, kExtAudioFileProperty_FileLengthFrames, &propertySize, &framesInThisFile); - - AudioStreamBasicDescription fileStreamFormat; - propertySize = sizeof(AudioStreamBasicDescription); - ExtAudioFileGetProperty(self.inputFile, kExtAudioFileProperty_FileDataFormat, &propertySize, &fileStreamFormat); - - return (float)framesInThisFile/(float)fileStreamFormat.mSampleRate; - -} - -- (void)configureReaderCallback -{ - - if (!self.callbackTimer) - { - self.callbackTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); - UInt32 numSamplesPerCallback = (UInt32)( self.latency * self.samplingRate ); - dispatch_source_set_timer(self.callbackTimer, dispatch_walltime(NULL, 0), self.latency*NSEC_PER_SEC, 0); - dispatch_source_set_event_handler(self.callbackTimer, ^{ - - if (self.playing) { - - if (self.readerBlock) { - // Suck some audio down from our ring buffer - [self retrieveFreshAudio:self.holdingBuffer numFrames:numSamplesPerCallback numChannels:self.numChannels]; - - // Call out with the audio that we've got. - self.readerBlock(self.holdingBuffer, numSamplesPerCallback, self.numChannels); - } - - // Asynchronously fill up the buffer (if it needs filling) - dispatch_async(dispatch_get_main_queue(), ^{ - [self bufferNewAudio]; - }); - - } - - }); - - dispatch_resume(self.callbackTimer); - } -} - - -- (void)retrieveFreshAudio:(float *)buffer numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels -{ - ringBuffer->FetchInterleavedData(buffer, thisNumFrames, thisNumChannels); -} - - -- (void)play; -{ - - // Configure (or if necessary, create and start) the timer for retrieving audio - if (!self.playing) { - [self configureReaderCallback]; - self.playing = TRUE; - } - -} - -- (void)pause -{ - // Pause the dispatch timer for retrieving the MP3 audio - self.playing = FALSE; -} - -- (void)stop -{ - // Release the dispatch timer because it holds a reference to this class instance - [self pause]; - if (self.callbackTimer) { - dispatch_release(self.callbackTimer); - } -} - - -@end diff --git a/AudioDSPUtils/AudioFileWriter.h b/AudioDSPUtils/AudioFileWriter.h deleted file mode 100644 index 62c5c3f..0000000 --- a/AudioDSPUtils/AudioFileWriter.h +++ /dev/null @@ -1,66 +0,0 @@ -// -// AudioFileWriter.h -// Novocaine -// -// Copyright (c) 2012 Alex Wiltschko -// -// Permission is hereby granted, free of charge, to any person -// obtaining a copy of this software and associated documentation -// files (the "Software"), to deal in the Software without -// restriction, including without limitation the rights to use, -// copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. - -#import -#import "Novocaine.h" - - -@interface AudioFileWriter : NSObject -{ - float currentTime; - float duration; - float samplingRate; - float latency; - UInt32 numChannels; - NSURL *audioFileURL; - - OutputBlock writerBlock; - - BOOL recording; -} - -@property (nonatomic, getter=getDuration, readonly) float currentTime; -@property (nonatomic, getter=getDuration) float duration; -@property float samplingRate; -@property UInt32 numChannels; -@property float latency; -@property (nonatomic, copy) NSURL *audioFileURL; -@property (nonatomic, copy) InputBlock writerBlock; -@property BOOL recording; - - -- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels; - -// You use this method to grab audio if you have your own callback. -// The buffer'll fill at the speed the audio is normally being played. -- (void)writeNewAudio:(float *)newData numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels; - -- (void)record; -- (void)pause; - - -@end diff --git a/AudioDSPUtils/AudioFileWriter.m b/AudioDSPUtils/AudioFileWriter.m deleted file mode 100644 index 5fe5b1d..0000000 --- a/AudioDSPUtils/AudioFileWriter.m +++ /dev/null @@ -1,243 +0,0 @@ -// -// AudioFileWriter.m -// Novocaine -// -// Copyright (c) 2012 Alex Wiltschko -// -// Permission is hereby granted, free of charge, to any person -// obtaining a copy of this software and associated documentation -// files (the "Software"), to deal in the Software without -// restriction, including without limitation the rights to use, -// copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. -#import "AudioFileWriter.h" -#import - -@interface AudioFileWriter() - -@property AudioStreamBasicDescription outputFormat; -@property ExtAudioFileRef outputFile; -@property UInt32 outputBufferSize; -@property float *outputBuffer; -@property float *holdingBuffer; -@property SInt64 currentFileTime; -@property dispatch_source_t callbackTimer; -@property (readwrite) float currentTime; - -@end - - - -@implementation AudioFileWriter - -static pthread_mutex_t outputAudioFileLock; - -@synthesize outputFormat = _outputFormat; -@synthesize outputFile = _outputFile; -@synthesize outputBuffer = _outputBuffer; -@synthesize holdingBuffer = _holdingBuffer; -@synthesize outputBufferSize = _outputBufferSize; -@synthesize currentFileTime = _currentFileTime; -@synthesize callbackTimer = _callbackTimer; - -@synthesize currentTime = _currentTime; -@synthesize duration = _duration; -@synthesize samplingRate = _samplingRate; -@synthesize latency = _latency; -@synthesize numChannels = _numChannels; -@synthesize audioFileURL = _audioFileURL; -@synthesize writerBlock = _writerBlock; -@synthesize recording = _recording; - -- (void)dealloc -{ - [self stop]; - - free(self.outputBuffer); - free(self.holdingBuffer); - - [super dealloc]; -} - -- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels -{ - self = [super init]; - if (self) - { - - // Zero-out our timer, so we know we're not using our callback yet - self.callbackTimer = nil; - - - // Open a reference to the audio file - self.audioFileURL = urlToAudioFile; - CFURLRef audioFileRef = (CFURLRef)self.audioFileURL; - - AudioStreamBasicDescription outputFileDesc = {44100.0, kAudioFormatMPEG4AAC, 0, 0, 1024, 0, thisNumChannels, 0, 0}; - - CheckError(ExtAudioFileCreateWithURL(audioFileRef, kAudioFileM4AType, &outputFileDesc, NULL, kAudioFileFlags_EraseFile, &_outputFile), "Creating file"); - - - // Set a few defaults and presets - self.samplingRate = thisSamplingRate; - self.numChannels = thisNumChannels; - self.currentTime = 0.0; - self.latency = .011609977; // 512 samples / ( 44100 samples / sec ) default - - - // We're going to impose a format upon the input file - // Single-channel float does the trick. - _outputFormat.mSampleRate = self.samplingRate; - _outputFormat.mFormatID = kAudioFormatLinearPCM; - _outputFormat.mFormatFlags = kAudioFormatFlagIsFloat; - _outputFormat.mBytesPerPacket = 4*self.numChannels; - _outputFormat.mFramesPerPacket = 1; - _outputFormat.mBytesPerFrame = 4*self.numChannels; - _outputFormat.mChannelsPerFrame = self.numChannels; - _outputFormat.mBitsPerChannel = 32; - - // Apply the format to our file - ExtAudioFileSetProperty(_outputFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &_outputFormat); - - - // Arbitrary buffer sizes that don't matter so much as long as they're "big enough" - self.outputBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); - self.holdingBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); - - pthread_mutex_init(&outputAudioFileLock, NULL); - - // mutex here // - if( 0 == pthread_mutex_trylock( &outputAudioFileLock ) ) - { - CheckError( ExtAudioFileWriteAsync(self.outputFile, 0, NULL), "Initializing audio file"); - } - pthread_mutex_unlock( &outputAudioFileLock ); - - } - return self; -} - -- (void)writeNewAudio:(float *)newData numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels -{ - UInt32 numIncomingBytes = thisNumFrames*thisNumChannels*sizeof(float); - memcpy(self.outputBuffer, newData, numIncomingBytes); - - AudioBufferList outgoingAudio; - outgoingAudio.mNumberBuffers = 1; - outgoingAudio.mBuffers[0].mNumberChannels = thisNumChannels; - outgoingAudio.mBuffers[0].mDataByteSize = numIncomingBytes; - outgoingAudio.mBuffers[0].mData = self.outputBuffer; - - if( 0 == pthread_mutex_trylock( &outputAudioFileLock ) ) - { - ExtAudioFileWriteAsync(self.outputFile, thisNumFrames, &outgoingAudio); - } - pthread_mutex_unlock( &outputAudioFileLock ); - - // Figure out where we are in the file - SInt64 frameOffset = 0; - ExtAudioFileTell(self.outputFile, &frameOffset); - self.currentTime = (float)frameOffset / self.samplingRate; - -} - - -- (float)getDuration -{ - // We're going to directly calculate the duration of the audio file (in seconds) - SInt64 framesInThisFile; - UInt32 propertySize = sizeof(framesInThisFile); - ExtAudioFileGetProperty(self.outputFile, kExtAudioFileProperty_FileLengthFrames, &propertySize, &framesInThisFile); - - AudioStreamBasicDescription fileStreamFormat; - propertySize = sizeof(AudioStreamBasicDescription); - ExtAudioFileGetProperty(self.outputFile, kExtAudioFileProperty_FileDataFormat, &propertySize, &fileStreamFormat); - - return (float)framesInThisFile/(float)fileStreamFormat.mSampleRate; - -} - - - -- (void)configureWriterCallback -{ - - if (!self.callbackTimer) - { - self.callbackTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); - } - - if (self.callbackTimer) - { - UInt32 numSamplesPerCallback = (UInt32)( self.latency * self.samplingRate ); - dispatch_source_set_timer(self.callbackTimer, dispatch_walltime(NULL, 0), self.latency*NSEC_PER_SEC, 0); - dispatch_source_set_event_handler(self.callbackTimer, ^{ - - - if (self.writerBlock) { - // Call out with the audio that we've got. - self.writerBlock(self.outputBuffer, numSamplesPerCallback, self.numChannels); - - // Get audio from the block supplier - [self writeNewAudio:self.outputBuffer numFrames:numSamplesPerCallback numChannels:self.numChannels]; - - } - - }); - - } - -} - - - -- (void)record; -{ - - // Configure (or if necessary, create and start) the timer for retrieving MP3 audio - [self configureWriterCallback]; - - if (!self.recording) - { - dispatch_resume(self.callbackTimer); - self.recording = TRUE; - } - -} - -- (void)stop -{ - // Close the - pthread_mutex_lock( &outputAudioFileLock ); - ExtAudioFileDispose(self.outputFile); - pthread_mutex_unlock( &outputAudioFileLock ); - self.recording = FALSE; -} - -- (void)pause -{ - // Pause the dispatch timer for retrieving the MP3 audio - if (self.callbackTimer) { - dispatch_suspend(self.callbackTimer); - self.recording = FALSE; - } -} - - - -@end - diff --git a/AudioDSPUtils/BufferedOverlapQueue.h b/AudioDSPUtils/BufferedOverlapQueue.h deleted file mode 100644 index 1e780ba..0000000 --- a/AudioDSPUtils/BufferedOverlapQueue.h +++ /dev/null @@ -1,38 +0,0 @@ -// -// BufferedOverlapQueue.h -// OpenSpirometry -// -// Created by Eric Larson -// Copyright (c) 2015 Eric Larson. All rights reserved. -// - -#import -#import "DataBufferBlock.h" - -typedef void (^ConsumeBlock)(); //TODO: only processing through delegation is currently supported - -// delegation code manipulated from: http://stackoverflow.com/questions/626898/how-do-i-create-delegates-in-objective-c -@protocol DataBufferProcessDelegate -@optional --(void)didFillBuffer:(DataBufferBlock*)block; // optional so that consume blocks can be used (BUT NOT BOTH!!) --(void)didFinishProcessingAllBuffers; -@end - -@interface BufferedOverlapQueue : NSObject - -@property (nonatomic, weak) id delegate; // best way to process the data on demand in a separate concurrent queue - -@property (nonatomic,readonly) NSUInteger numOverlapSamples; -@property (nonatomic,readonly) NSUInteger numSamplesPerBuffer; -@property (nonatomic,readonly) NSUInteger numFullBuffers; - --(id)initWithBufferLength:(NSUInteger)buffLength andOverlapLength:(NSUInteger)overlapLength; --(void)addFreshFloatData:(float*)data withLength:(NSUInteger)numSamples; --(void)addFreshInterleavedFloatData:(float*)data withLength:(NSUInteger)numSamples fromChannel:(NSUInteger)whichChannel withNumChannels:(NSUInteger)numChannels; // can only add one channel at a time --(DataBufferBlock*)dequeueAndTakeOwnership; // TODO: you are responsible for freeing memory, more versatile --(void)consumeBufferWithBlock:ConsumeBlock; // TODO: process buffer and free it, less versatile --(void)deleteAt:(NSUInteger)indexToDelete; --(void)clear; --(void)processRemainingBlocks; - -@end diff --git a/AudioDSPUtils/BufferedOverlapQueue.m b/AudioDSPUtils/BufferedOverlapQueue.m deleted file mode 100644 index 7f2858f..0000000 --- a/AudioDSPUtils/BufferedOverlapQueue.m +++ /dev/null @@ -1,175 +0,0 @@ -// -// BufferedOverlapQueue.m -// OpenSpirometry -// -// Created by Eric Larson -// Copyright (c) 2015 Eric Larson. All rights reserved. -// - -#import "BufferedOverlapQueue.h" - - -@interface BufferedOverlapQueue() - -@property (strong, atomic) NSMutableArray* overlapQueue; -@property (atomic) NSUInteger currentFillQueueIndex; - -@property (nonatomic,readwrite) NSUInteger numOverlapSamples; -@property (nonatomic,readwrite) NSUInteger numSamplesPerBuffer; -@property (nonatomic,readwrite) NSUInteger numFullBuffers; - -@end - -@implementation BufferedOverlapQueue { - struct { - unsigned int didFillBuffer:1; - unsigned int didFinishProcessingAllBuffers:1; - } delegateRespondsTo; -} - -- (void)setDelegate:(id )aDelegate { - if (_delegate != aDelegate) { - _delegate = aDelegate; - delegateRespondsTo.didFillBuffer = [_delegate respondsToSelector:@selector(didFillBuffer:)]; - delegateRespondsTo.didFinishProcessingAllBuffers = [_delegate respondsToSelector:@selector(didFinishProcessingAllBuffers)]; - } -} - --(id)initWithBufferLength:(NSUInteger)buffLength andOverlapLength:(NSUInteger)overlapLength{ - if(self = [super init]){ - _numFullBuffers = 0; - _currentFillQueueIndex = 0; - _numSamplesPerBuffer = buffLength; - _numOverlapSamples = overlapLength; - _overlapQueue = [[NSMutableArray alloc]init]; - // init the first member of the queue - [_overlapQueue addObject:[[DataBufferBlock alloc]initWithCapacity:_numSamplesPerBuffer]]; - return self; - } - return nil; -} - --(id) init{ - // probably not what you want. Use the designated init above - return [self initWithBufferLength:512 andOverlapLength:256]; -} - --(void)addFreshFloatData:(float*)data withLength:(NSUInteger)numSamples{ - [self addFreshInterleavedFloatData:data withLength:numSamples - fromChannel:0 withNumChannels:1]; -} - - --(void)addFreshInterleavedFloatData:(float*)data withLength:(NSUInteger)numSamples fromChannel:(NSUInteger)whichChannel withNumChannels:(NSUInteger)numChannels{ - // copy data in a hurry, this block likely occurs in a streaming process - NSUInteger increment = self.numSamplesPerBuffer - self.numOverlapSamples; - NSUInteger dataCopyLength = numSamples; - - NSUInteger idx = 0; - - @synchronized(self){ - - if(numSamples > self.numSamplesPerBuffer){ - // this only works for input data greater than BufferSize - for(int i=0; i= [self.overlapQueue count]){ // add object if we need it - [self.overlapQueue addObject:[[DataBufferBlock alloc]initWithCapacity:self.numSamplesPerBuffer]]; - } - [self addData:&data[i] withSize:dataCopyLength fromChannel:whichChannel withNumChannels:numChannels - toBufferBlock:self.overlapQueue[self.currentFillQueueIndex+idx]]; - } - }else{ - - // this only works for input data fewer than BufferSize - DataBufferBlock* block = [self.overlapQueue lastObject]; - if(block.writePosition+numSamples>increment){ // need new entry - [self.overlapQueue addObject:[[DataBufferBlock alloc]initWithCapacity:self.numSamplesPerBuffer]]; - } - - // add given data to each block - for(DataBufferBlock* block in self.overlapQueue){ - if(!block.isFull){ - [self addData:data withSize:numSamples fromChannel:whichChannel withNumChannels:numChannels - toBufferBlock:block]; - } - } - } - - // Update write position - idx = 0; - for(DataBufferBlock* block in self.overlapQueue){ - if(block.isFull){ - idx++; - } - } - self.currentFillQueueIndex = idx; - self.numFullBuffers = idx; - - if(idx>0) { //at least one buffer to process - [self didFillDataWrapper]; - } - } -} - --(void)addData:(float*)data withSize:(NSUInteger)length fromChannel:(NSUInteger)whichChannel withNumChannels:(NSUInteger)numChannels toBufferBlock:(DataBufferBlock*)block { - if(block) - [block addInterleavedFloatData:data fromChannel:whichChannel withNumChannels:numChannels withLength:length]; - else - NSLog(@"Could not add data to block"); -} - --(DataBufferBlock*)dequeueAndTakeOwnership{ - //TODO -// self.overlapQueue - return nil; -} - --(void)consumeBufferWithBlock:ConsumeBlock{ - //TODO -} - --(void)deleteAt:(NSUInteger)indexToDelete{ - [self.overlapQueue removeObjectAtIndex:indexToDelete]; -} - --(void)didFillDataWrapper{ - if([self.overlapQueue count]>0){ - __block DataBufferBlock* block = [self.overlapQueue firstObject]; - - //TODO: mutex protection? might be too slow to use here - [self.overlapQueue removeObjectAtIndex:0]; - self.currentFillQueueIndex--; - self.numFullBuffers--; - - if(delegateRespondsTo.didFillBuffer){ - // spin off the data into high priority queue, assuming that this data analysis needs run UI - dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0),^{ - [self.delegate didFillBuffer:block]; // call delegate, and it does whatever it wants with data - //after this block executes ARC will clean up the data block for us! - // if we have processed all buffers, set status - if(self.numFullBuffers<=0 && delegateRespondsTo.didFinishProcessingAllBuffers){ - [self.delegate didFinishProcessingAllBuffers]; - } - - }); - } - } -} - --(void)processRemainingBlocks{ - @synchronized(self){ - while(self.numFullBuffers >0){ - // process until done - [self didFillDataWrapper]; - } - //TODO: block here until completion? - [self clear]; // and clear any blocks that are not full - } - -} - --(void)clear{ - [self.overlapQueue removeAllObjects]; -} - -@end diff --git a/AudioDSPUtils/CubicSpline.h b/AudioDSPUtils/CubicSpline.h deleted file mode 100644 index acf9773..0000000 --- a/AudioDSPUtils/CubicSpline.h +++ /dev/null @@ -1,31 +0,0 @@ -// -// CubicSpline.h -// CubicSpline -// -// Created by Sam Soffes on 12/16/13. -// Copyright (c) 2013-2014 Sam Soffes. All rights reserved. -// Manipulated from https://github.com/soffes/SAMCubicSpline -// Updated by Dr. Eric Larson July 2015 - -#import - - -@interface CubicSpline : NSObject - -/** - Initialize a new cubic spline. - - @param points An array of `NSValue` objects containing `x and y` structs. These points are the control points of the curve. - - @return A new cubic spline. - */ -- (instancetype)initWithPointsX:(NSArray *)x andY:(NSArray *)y; - -/** - Input an X value between 0 and 1. - - @return The corresponding Y value. - */ -- (float)interpolateX:(float)x; - -@end diff --git a/AudioDSPUtils/CubicSpline.m b/AudioDSPUtils/CubicSpline.m deleted file mode 100644 index b2a2ef2..0000000 --- a/AudioDSPUtils/CubicSpline.m +++ /dev/null @@ -1,119 +0,0 @@ -// -// CubicSpline.m -// CubicSpline -// -// Created by Sam Soffes on 12/16/13. -// Copyright (c) 2013-2014 Sam Soffes. All rights reserved. -// - -#import "CubicSpline.h" - -@interface CubicSpline () -@property (nonatomic, strong) NSArray *x; -@property (nonatomic, strong) NSArray *y; -@property (nonatomic, strong) NSArray *b; -@property (nonatomic, strong) NSArray *c; -@property (nonatomic, strong) NSArray *d; -@end - -@implementation CubicSpline - -- (instancetype)initWithPointsX:(NSArray *)x andY:(NSArray *)y { - if ((self = [super init])) { - self.x = x; - self.y = y; - - if (x.count > 0) { - NSUInteger count = x.count; - NSUInteger n = count; // - 1; - float x[count]; - float a[count]; - float h[count]; - float y[count]; - float l[count]; - float u[count]; - float z[count]; - float k[count]; - float s[count]; - - for (NSUInteger i = 0; i < self.x.count; i++) { - x[i] = [self.x[i] floatValue]; - a[i] = [self.y[i] floatValue]; - } - - for (NSUInteger i = 0; i < n; i++) { - h[i] = x[i + 1] - x[i]; - k[i] = a[i + 1] - a[i]; - s[i] = k[i] / h[i]; - } - - for (NSUInteger i = 1; i < n; i++) { - y[i] = 3 / h[i] * (a[i + 1] - a[i]) - 3 / h[i - 1] * (a[i] - a[i - 1]); - } - - l[0] = 1; - u[0] = 0; - z[0] = 0; - - for (NSUInteger i = 1; i < n; i++) { - l[i] = 2 * (x[i + 1] - x[i - 1]) - h[i - 1] * u[i - 1]; - u[i] = h[i] / l[i]; - z[i] = (y[i] - h[i - 1] * z[i - 1]) / l[i]; - } - - l[n] = 1; - z[n] = 0; - - NSMutableArray *b = [[NSMutableArray alloc] initWithCapacity:n]; - NSMutableArray *c = [[NSMutableArray alloc] initWithCapacity:n]; - NSMutableArray *d = [[NSMutableArray alloc] initWithCapacity:n]; - - for (NSUInteger i = 0; i <= n; i++) { - b[i] = @0; - c[i] = @0; - d[i] = @0; - } - - for (NSInteger i = n - 1; i >= 0; i--) { - c[i] = @(z[i] - u[i] * [c[i + 1] floatValue]); - b[i] = @((a[i + 1] - a[i]) / h[i] - h[i] * ([c[i + 1] floatValue] + 2.0f * [c[i] floatValue]) / 3.0f); - d[i] = @(([c[i + 1] floatValue] - [c[i] floatValue]) / (3 * h[i])); - } - - c[n] = @0; - - self.b = b; - self.c = c; - self.d = d; - } - } - return self; -} - - -- (float)interpolateX:(float)input { - if (self.x.count == 0) { - // No points. Return identity. - return input; - } - - float x[self.x.count]; - float a[self.x.count]; - - for (NSUInteger i = 0; i < self.x.count; i++) { - x[i] = [self.x[i] floatValue]; - a[i] = [self.y[i] floatValue]; - } - - NSInteger i = 0; - for (i = self.x.count - 1; i > 0; i--) { - if (x[i] <= input) { - break; - } - } - - float deltaX = input - x[i]; - return a[i] + [self.b[i] floatValue] * deltaX + [self.c[i] floatValue] * pow(deltaX, 2) + [self.d[i] floatValue] * pow(deltaX, 3); -} - -@end diff --git a/AudioDSPUtils/DataBufferBlock.h b/AudioDSPUtils/DataBufferBlock.h deleted file mode 100644 index 003dd4a..0000000 --- a/AudioDSPUtils/DataBufferBlock.h +++ /dev/null @@ -1,23 +0,0 @@ -// -// DataBufferBlock.h -// OpenSpirometry -// -// Created by Eric Larson -// Copyright (c) 2015 Eric Larson. All rights reserved. -// - -#import - -@interface DataBufferBlock : NSObject - -@property (nonatomic) float *data; -@property (nonatomic,readonly) NSUInteger writePosition; -@property (nonatomic,readonly) NSUInteger length; -@property (nonatomic,readonly) CFTimeInterval timeCreated; -@property (nonatomic,readonly) BOOL isFull; - --(id)initWithCapacity:(NSUInteger)numItems; --(void)addFloatData:(float*)data withLength:(NSUInteger)dataLength; // if this goes over length, only partial data copy --(void)addInterleavedFloatData:(float*)data fromChannel:(NSUInteger)whichChannel withNumChannels:(NSUInteger)numChannels withLength:(NSUInteger)dataLength; - -@end diff --git a/AudioDSPUtils/DataBufferBlock.m b/AudioDSPUtils/DataBufferBlock.m deleted file mode 100644 index a9b8ae6..0000000 --- a/AudioDSPUtils/DataBufferBlock.m +++ /dev/null @@ -1,91 +0,0 @@ -// -// DataBufferBlock.m -// OpenSpirometry -// -// Created by Eric Larson -// Copyright (c) 2015 Eric Larson. All rights reserved. -// - -#import "DataBufferBlock.h" -#import - -@interface DataBufferBlock() - -@property (nonatomic,readwrite) NSUInteger writePosition; -@property (nonatomic,readwrite) NSUInteger length; -@property (nonatomic,readwrite) CFTimeInterval timeCreated; -@property (nonatomic,readwrite) BOOL isFull; - -@end - -@implementation DataBufferBlock - --(float*)data{ // on demand in case never used - if(!_data){ - _data = (float *)calloc(self.length,sizeof(float)); - } - return _data; -} - --(id)initWithCapacity:(NSUInteger)numItems{ - if(self = [super init]){ - //set backing variables - _length = numItems; - _writePosition = 0; - _timeCreated = CACurrentMediaTime(); - _isFull = NO; - return self; - } - return nil; -} - --(id)init{ - return [self initWithCapacity:512]; //probably not what you want, use the designated init above -} - --(void)addFloatData:(float*)data withLength:(NSUInteger)dataLength{ - if(self.writePosition+dataLength <= self.length){ // wont go off the end, just copy - memcpy(&self.data[self.writePosition], data, dataLength*sizeof(float)); - self.writePosition += dataLength; - }else{ // we will go over the end, only copy some - NSUInteger floatsToCopy = self.length - self.writePosition; - memcpy(&self.data[self.writePosition], data, floatsToCopy*sizeof(float)); - self.writePosition += floatsToCopy; - } - - if(self.writePosition >= self.length) - self.isFull = YES; -} - --(void)addInterleavedFloatData:(float*)data fromChannel:(NSUInteger)whichChannel - withNumChannels:(NSUInteger)numChannels withLength:(NSUInteger)dataLength -{ - if(self.writePosition+dataLength <= self.length){ // wont go off the end, just copy - float *p = &data[whichChannel]; - for(int i=0;i= self.length) - self.isFull = YES; - -} - - --(void)dealloc{ - if(_data){ - free(_data); - _data = nil; - } -} - -@end diff --git a/AudioDSPUtils/PeakFinder.h b/AudioDSPUtils/PeakFinder.h deleted file mode 100644 index 9814b67..0000000 --- a/AudioDSPUtils/PeakFinder.h +++ /dev/null @@ -1,37 +0,0 @@ -// -// PeakFinder.h -// OpenSpirometry -// -// Created by Eric Larson -// Copyright (c) 2015 Eric Larson. All rights reserved. -// - -#import - -// helper class for the analysis (used like a struct, but allows ARC and Obj-C objects) -@interface Peak : NSObject - -@property (nonatomic) NSUInteger index; -@property (nonatomic) float frequency; -@property (strong, nonatomic) NSMutableArray *harmonics; -@property (nonatomic) NSUInteger multiple; -@property (nonatomic) float magnitude; - -@end - -@interface PeakFinder : NSObject - --(id)initWithFrequencyResolution:(float)res; - --(NSArray*)getFundamentalPeaksFromBuffer:(float *)magBuffer - withLength:(NSUInteger)length - usingWindowSize:(NSUInteger)windowSize - andPeakMagnitudeMinimum:(float)peakMagnitude - aboveFrequency:(float)minimumFrequency; - -- (float)getFrequencyFromIndex:(NSUInteger)index - usingData:(float*)data; - -@end - - diff --git a/AudioDSPUtils/PeakFinder.m b/AudioDSPUtils/PeakFinder.m deleted file mode 100644 index 7a0684a..0000000 --- a/AudioDSPUtils/PeakFinder.m +++ /dev/null @@ -1,156 +0,0 @@ -// -// PeakFinder.m -// OpenSpirometry -// -// Created by Eric Larson -// Copyright (c) 2015 Eric Larson. All rights reserved. -// - -#import "PeakFinder.h" -#import - - - - -@implementation Peak - --(id)initWithIndex:(NSUInteger)index andMagnitude:(float)mag andFreq:(float)freq{ - if(self = [super init]){ - _index = index; - _harmonics = [[NSMutableArray alloc]init]; - _frequency = freq; - _multiple = 1; - _magnitude = mag; - return self; - } - return nil; -} - -@end - -@interface PeakFinder() - -@property (nonatomic) float frequencyResolution; - -@end - -@implementation PeakFinder - --(id)initWithFrequencyResolution:(float)res{ - if(self = [super init]){ - _frequencyResolution = res; - return self; - } - return nil; -} - - -// Use dilation to find local max peaks (use harmonics to refine the peak estimation) -// Using starter code from Candie Solis, Charlie Albright, and Spencer Kaiser, MSLC 2015 -// this returns an array of peaks fundamental frequencies in the spectrum (if any) -// return type: index, frequency, magnitude, and list of harmonics --(NSArray*)getFundamentalPeaksFromBuffer:(float *)magBuffer - withLength:(NSUInteger)length - usingWindowSize:(NSUInteger)windowSize - andPeakMagnitudeMinimum:(float)peakMagnitude - aboveFrequency:(float)minimumFrequency -{ - NSMutableArray* peaks = [[NSMutableArray alloc] init]; - int startIndex = minimumFrequency / self.frequencyResolution; // must be above X Hz - - for (int i = startIndex; i < length-windowSize; i++) { - unsigned long mid = (i + windowSize/2); - - // find maximum of spectrum in window - // (this is a nested for loop, but parallelized in hardware) - float maxValue; - unsigned long maxIndex; - vDSP_maxvi(&(magBuffer[i]), 1, &maxValue, &maxIndex, windowSize); - maxIndex += i; - - if ((maxValue > peakMagnitude) && (mid == maxIndex)) { // Local max AND large enough magnitude - - Peak *peakFound = [[Peak alloc]initWithIndex:mid - andMagnitude:maxValue - andFreq:[self getFrequencyFromIndex:mid usingData:magBuffer]]; - - if ([peaks count] == 0) { // nothing to check, just add in - - [peaks addObject:peakFound]; - } - else { // Check if harmonic multiple exists below the peak - - BOOL unique = YES; - - for (Peak* peakInPeaks in peaks) { - NSUInteger numVal = peakInPeaks.index; // index of peak - NSUInteger multiple = mid / numVal; // integer value of harmonic multiple - NSUInteger modulus = mid % numVal; // num frequency bins above multiple - if (modulus > numVal/multiple) { - modulus = numVal - modulus; // num frequency bins below multiple, if closer - multiple++; // multiple is next harmonic up - } - float freqInHzAway = self.frequencyResolution * modulus; // deviation in Hz from harmonic (1 Hz tolerance) - - if (freqInHzAway <= self.frequencyResolution * multiple) { // scale difference by harmonic multiple (to account for mis-estimation of the fundamental up to 1Hz) - unique = false; - peakFound.multiple = multiple; // remember the multiple - [peakInPeaks.harmonics addObject:peakFound]; - - break; // found least common multiple and it is within deviation, add it in - } - } - - if (unique) { // it was not a harmonic - [peaks addObject:peakFound]; - } - } - } - } - - if ([peaks count] == 0) { - return nil; - } - else { - // go through and fix the frequencies - for (Peak* peak in peaks){ - if([peak.harmonics count]>0){ // only fundamental, just add in - float frequency = peak.frequency; - int numFrequenciesToAverage = 1; - for(Peak *harmonic in peak.harmonics){ - frequency += (harmonic.frequency / ((float)harmonic.multiple)); - numFrequenciesToAverage++; - } - peak.frequency= frequency/((float)numFrequenciesToAverage); - } - } - - NSArray* returnArraySortedByMagnitude = [peaks sortedArrayUsingComparator: - ^NSComparisonResult(Peak* a, Peak* b) { - return a.magnitude + + + + SchemeUserState + + AudioLabSwift.xcscheme_^#shared#^_ + + orderHint + 0 + + + + diff --git a/AudioLabSwift.xcodeproj/xcuserdata/naimalibhai.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist b/AudioLabSwift.xcodeproj/xcuserdata/naimalibhai.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist new file mode 100644 index 0000000..d7b4649 --- /dev/null +++ b/AudioLabSwift.xcodeproj/xcuserdata/naimalibhai.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist @@ -0,0 +1,24 @@ + + + + + + + + + diff --git a/AudioLabSwift.xcodeproj/xcuserdata/naimalibhai.xcuserdatad/xcschemes/xcschememanagement.plist b/AudioLabSwift.xcodeproj/xcuserdata/naimalibhai.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 0000000..8acb524 --- /dev/null +++ b/AudioLabSwift.xcodeproj/xcuserdata/naimalibhai.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,14 @@ + + + + + SchemeUserState + + AudioLabSwift.xcscheme_^#shared#^_ + + orderHint + 0 + + + + diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/Contents.json b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/Contents.json index 9221b9b..0384086 100644 --- a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/Contents.json +++ b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -1,91 +1,109 @@ { "images" : [ { + "filename" : "peruna40 2.png", "idiom" : "iphone", "scale" : "2x", "size" : "20x20" }, { + "filename" : "peruna60.png", "idiom" : "iphone", "scale" : "3x", "size" : "20x20" }, { + "filename" : "peruna58 1.png", "idiom" : "iphone", "scale" : "2x", "size" : "29x29" }, { + "filename" : "peruna87.png", "idiom" : "iphone", "scale" : "3x", "size" : "29x29" }, { + "filename" : "peruna80 1.png", "idiom" : "iphone", "scale" : "2x", "size" : "40x40" }, { + "filename" : "peruna120.png", "idiom" : "iphone", "scale" : "3x", "size" : "40x40" }, { + "filename" : "peruna120 1.png", "idiom" : "iphone", "scale" : "2x", "size" : "60x60" }, { + "filename" : "peruna180.png", "idiom" : "iphone", "scale" : "3x", "size" : "60x60" }, { + "filename" : "peruna20.png", "idiom" : "ipad", "scale" : "1x", "size" : "20x20" }, { + "filename" : "peruna40.png", "idiom" : "ipad", "scale" : "2x", "size" : "20x20" }, { + "filename" : "peruna29.png", "idiom" : "ipad", "scale" : "1x", "size" : "29x29" }, { + "filename" : "peruna58.png", "idiom" : "ipad", "scale" : "2x", "size" : "29x29" }, { + "filename" : "peruna40 1.png", "idiom" : "ipad", "scale" : "1x", "size" : "40x40" }, { + "filename" : "peruna80.png", "idiom" : "ipad", "scale" : "2x", "size" : "40x40" }, { + "filename" : "peruna76.png", "idiom" : "ipad", "scale" : "1x", "size" : "76x76" }, { + "filename" : "peruna152.png", "idiom" : "ipad", "scale" : "2x", "size" : "76x76" }, { + "filename" : "peruna167.png", "idiom" : "ipad", "scale" : "2x", "size" : "83.5x83.5" }, { + "filename" : "peruna1024.png", "idiom" : "ios-marketing", "scale" : "1x", "size" : "1024x1024" diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna1024.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna1024.png new file mode 100644 index 0000000..3aa7026 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna1024.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna120 1.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna120 1.png new file mode 100644 index 0000000..582eac8 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna120 1.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna120.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna120.png new file mode 100644 index 0000000..582eac8 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna120.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna152.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna152.png new file mode 100644 index 0000000..57da765 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna152.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna167.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna167.png new file mode 100644 index 0000000..c9edec2 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna167.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna180.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna180.png new file mode 100644 index 0000000..5257526 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna180.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna20.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna20.png new file mode 100644 index 0000000..1435f18 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna20.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna29.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna29.png new file mode 100644 index 0000000..cf50176 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna29.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40 1.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40 1.png new file mode 100644 index 0000000..bde2084 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40 1.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40 2.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40 2.png new file mode 100644 index 0000000..bde2084 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40 2.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40.png new file mode 100644 index 0000000..bde2084 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna40.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna58 1.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna58 1.png new file mode 100644 index 0000000..c744e9e Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna58 1.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna58.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna58.png new file mode 100644 index 0000000..c744e9e Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna58.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna60.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna60.png new file mode 100644 index 0000000..a754565 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna60.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna76.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna76.png new file mode 100644 index 0000000..f78c2ff Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna76.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna80 1.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna80 1.png new file mode 100644 index 0000000..7897a05 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna80 1.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna80.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna80.png new file mode 100644 index 0000000..7897a05 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna80.png differ diff --git a/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna87.png b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna87.png new file mode 100644 index 0000000..1fc6242 Binary files /dev/null and b/AudioLabSwift/Assets.xcassets/AppIcon.appiconset/peruna87.png differ diff --git a/AudioLabSwift/AudioModel.swift b/AudioLabSwift/AudioModel.swift index b176d20..27cd98b 100644 --- a/AudioLabSwift/AudioModel.swift +++ b/AudioLabSwift/AudioModel.swift @@ -1,198 +1,126 @@ -// -// AudioModel.swift -// AudioLabSwift -// -// Created by Eric Larson -// Copyright © 2020 Eric Larson. All rights reserved. -// - import Foundation import Accelerate class AudioModel { // MARK: Properties - private var BUFFER_SIZE:Int - var timeData:[Float] - var fftData:[Float] + private var BUFFER_SIZE: Int + var timeData: [Float] + var fftData: [Float] + lazy var samplingRate: Int = { + return Int(self.audioManager!.samplingRate) + }() + + var sineFrequency1: Float = 300.0 // This frequency will be controlled by the slider + private var phase1: Float = 0.0 + private var phaseIncrement1: Float = 0.0 + private var sineWaveRepeatMax: Float = Float(2 * Double.pi) // MARK: Public Methods - init(buffer_size:Int) { + init(buffer_size: Int) { BUFFER_SIZE = buffer_size - // anything not lazily instatntiated should be allocated here timeData = Array.init(repeating: 0.0, count: BUFFER_SIZE) - fftData = Array.init(repeating: 0.0, count: BUFFER_SIZE/2) - } - - // public function for starting processing of microphone data - func startMicrophoneProcessing(withFps:Double){ - self.audioManager?.inputBlock = self.handleMicrophone - - // repeat this fps times per second using the timer class - Timer.scheduledTimer(timeInterval: 1.0/withFps, target: self, - selector: #selector(self.runEveryInterval), - userInfo: nil, - repeats: true) + fftData = Array.init(repeating: 0.0, count: BUFFER_SIZE / 2) } - // public function for playing from a file reader file - func startProcesingAudioFileForPlayback(){ - self.audioManager?.outputBlock = self.handleSpeakerQueryWithAudioFile - self.fileReader?.play() + func startMicrophoneProcessing(withFps: Double) { + if let manager = self.audioManager { + manager.inputBlock = self.handleMicrophone +// manager.outputBlock = self.handleSpeakerQueryWithSinusoids + + // Repeat this fps times per second using the timer class + Timer.scheduledTimer(withTimeInterval: 1.0 / withFps, repeats: true) { _ in + self.runEveryInterval() + } + } } - func startProcessingSinewaveForPlayback(withFreq:Float=330.0){ - sineFrequency = withFreq - // Two examples are given that use either objective c or that use swift - // the swift code for loop is slightly slower thatn doing this in c, - // but the implementations are very similar - //self.audioManager?.outputBlock = self.handleSpeakerQueryWithSinusoid // swift for loop - self.audioManager?.setOutputBlockToPlaySineWave(sineFrequency) // c for loop + func startMicrophoneProcessingOne(withFps: Double) { + if let manager = self.audioManager { + manager.inputBlock = self.handleMicrophone + manager.outputBlock = self.handleSpeakerQueryWithSinusoids + + // Repeat this fps times per second using the timer class + Timer.scheduledTimer(withTimeInterval: 1.0 / withFps, repeats: true) { _ in + self.runEveryInterval() + } + } } - // You must call this when you want the audio to start being handled by our model - func play(){ - self.audioManager?.play() + func play() { + if let manager = self.audioManager { + manager.play() + } } - - // Here is an example function for getting the maximum frequency - func getMaxFrequencyMagnitude() -> (Float,Float){ - // this is the slow way of getting the maximum... - // you might look into the Accelerate framework to make things more efficient - var max:Float = -1000.0 - var maxi:Int = 0 + + func stop(){ + if let manager = self.audioManager{ + manager.pause() + manager.inputBlock = nil + manager.outputBlock = nil + } - if inputBuffer != nil { - for i in 0..max){ - max = fftData[i] - maxi = i - } - } + if let buffer = self.inputBuffer{ + buffer.clear() // just makes zeros } - let frequency = Float(maxi) / Float(BUFFER_SIZE) * Float(self.audioManager!.samplingRate) - return (max,frequency) + inputBuffer = nil + fftHelper = nil + } - // for sliding max windows, you might be interested in the following: vDSP_vswmax - //========================================== + // MARK: Private Properties - private lazy var audioManager:Novocaine? = { + private lazy var audioManager: Novocaine? = { return Novocaine.audioManager() }() - private lazy var fftHelper:FFTHelper? = { + private lazy var fftHelper: FFTHelper? = { return FFTHelper.init(fftSize: Int32(BUFFER_SIZE)) }() - private lazy var outputBuffer:CircularBuffer? = { - return CircularBuffer.init(numChannels: Int64(self.audioManager!.numOutputChannels), - andBufferSize: Int64(BUFFER_SIZE)) - }() - - private lazy var inputBuffer:CircularBuffer? = { + private lazy var inputBuffer: CircularBuffer? = { return CircularBuffer.init(numChannels: Int64(self.audioManager!.numInputChannels), andBufferSize: Int64(BUFFER_SIZE)) }() - //========================================== - // MARK: Private Methods - private lazy var fileReader:AudioFileReader? = { - - if let url = Bundle.main.url(forResource: "satisfaction", withExtension: "mp3"){ - var tmpFileReader:AudioFileReader? = AudioFileReader.init(audioFileURL: url, - samplingRate: Float(audioManager!.samplingRate), - numChannels: audioManager!.numOutputChannels) - - tmpFileReader!.currentTime = 0.0 - print("Audio file succesfully loaded for \(url)") - return tmpFileReader - }else{ - print("Could not initialize audio input file") - return nil - } - }() - - //========================================== // MARK: Model Callback Methods - @objc - private func runEveryInterval(){ + private func runEveryInterval() { if inputBuffer != nil { - // copy data to swift array self.inputBuffer!.fetchFreshData(&timeData, withNumSamples: Int64(BUFFER_SIZE)) - - // now take FFT and display it - fftHelper!.performForwardFFT(withData: &timeData, - andCopydBMagnitudeToBuffer: &fftData) - - + fftHelper!.performForwardFFT(withData: &timeData, andCopydBMagnitudeToBuffer: &fftData) } } - - - //========================================== - // MARK: Audiocard Callbacks - // in obj-C it was (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels) - // and in swift this translates to: - private func handleMicrophone (data:Optional>, numFrames:UInt32, numChannels: UInt32) { -// var max:Float = 0.0 -// if let arrayData = data{ -// for i in 0..max){ -// max = abs(arrayData[i]) -// } -// } -// } -// // can this max operation be made faster?? -// print(max) - - // copy samples from the microphone into circular buffer + private func handleMicrophone(data: Optional>, numFrames: UInt32, numChannels: UInt32) { self.inputBuffer?.addNewFloatData(data, withNumSamples: Int64(numFrames)) } - private func handleSpeakerQueryWithAudioFile(data:Optional>, numFrames:UInt32, numChannels: UInt32){ - if let file = self.fileReader{ + private func handleSpeakerQueryWithSinusoids(data: Optional>, numFrames: UInt32, numChannels: UInt32) { + if let arrayData = data, let manager = self.audioManager { + // Calculate the phase increment for the current frequency + phaseIncrement1 = Float(2 * Double.pi * Double(sineFrequency1) / Double(manager.samplingRate)) - // read from file, loaidng into data (a float pointer) - file.retrieveFreshAudio(data, - numFrames: numFrames, - numChannels: numChannels) - - // set samples to output speaker buffer - self.outputBuffer?.addNewFloatData(data, - withNumSamples: Int64(numFrames)) - } - } - - // _ _ _ _ _ _ _ _ _ _ - // / \ / \ / \ / \ / \ / \ / \ / \ / \ / - // / \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ - var sineFrequency:Float = 0.0 { // frequency in Hz (changeable by user) - didSet{ - // if using swift for generating the sine wave: when changed, we need to update our increment - //phaseIncrement = Float(2*Double.pi*sineFrequency/audioManager!.samplingRate) - - // if using objective c: this changes the frequency in the novocain block - self.audioManager?.sineFrequency = sineFrequency - } - } - private var phase:Float = 0.0 - private var phaseIncrement:Float = 0.0 - private var sineWaveRepeatMax:Float = Float(2*Double.pi) - - private func handleSpeakerQueryWithSinusoid(data:Optional>, numFrames:UInt32, numChannels: UInt32){ - // while pretty fast, this loop is still not quite as fast as - // writing the code in c, so I placed a function in Novocaine to do it for you - // use setOutputBlockToPlaySineWave() in Novocaine - if let arrayData = data{ var i = 0 - while i= sineWaveRepeatMax) { phase -= sineWaveRepeatMax } - i+=1 + let chan = Int(numChannels) + let frame = Int(numFrames) + + if chan == 1 { + while i < frame { + arrayData[i] = sin(phase1) // Generate the sine wave based on the frequency + phase1 += phaseIncrement1 + if phase1 >= sineWaveRepeatMax { phase1 -= sineWaveRepeatMax } + i += 1 + } + } else if chan == 2 { + let len = frame * chan + while i < len { + arrayData[i] = sin(phase1) + arrayData[i + 1] = arrayData[i] // Stereo output, same on both channels + phase1 += phaseIncrement1 + if phase1 >= sineWaveRepeatMax { phase1 -= sineWaveRepeatMax } + i += 2 + } } } } diff --git a/AudioLabSwift/Base.lproj/LaunchScreen.storyboard b/AudioLabSwift/Base.lproj/LaunchScreen.storyboard deleted file mode 100644 index 865e932..0000000 --- a/AudioLabSwift/Base.lproj/LaunchScreen.storyboard +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/AudioLabSwift/Base.lproj/Main.storyboard b/AudioLabSwift/Base.lproj/Main.storyboard index 31ce0c9..80b4232 100644 --- a/AudioLabSwift/Base.lproj/Main.storyboard +++ b/AudioLabSwift/Base.lproj/Main.storyboard @@ -1,9 +1,11 @@ - - + + - + + + @@ -12,15 +14,153 @@ - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/AudioLabSwift/Info.plist b/AudioLabSwift/Info.plist index b8c43bf..6f35bb8 100644 --- a/AudioLabSwift/Info.plist +++ b/AudioLabSwift/Info.plist @@ -53,14 +53,16 @@ UISupportedInterfaceOrientations + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight UIInterfaceOrientationPortrait UISupportedInterfaceOrientations~ipad - UIInterfaceOrientationPortrait - UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown diff --git a/AudioLabSwift/MetalGraph.swift b/AudioLabSwift/MetalGraph.swift deleted file mode 100644 index b20b8a0..0000000 --- a/AudioLabSwift/MetalGraph.swift +++ /dev/null @@ -1,202 +0,0 @@ -// -// MetalGraph.swift -// AudioLabSwift -// -// Created by Eric Larson -// Copyright © 2020 Eric Larson. All rights reserved. -// - -import Foundation -import UIKit -import Metal -import Accelerate - -class MetalGraph { - - var device: MTLDevice! - var metalLayer: CAMetalLayer! - var pipelineState: MTLRenderPipelineState! - var commandQueue: MTLCommandQueue! - var timer: CADisplayLink! - - - var vertexData: [String:[Float]] = [String: [Float]]() - var vertexBuffer: [String:MTLBuffer] = [String:MTLBuffer]() - var vertexColorBuffer: [String:MTLBuffer] = [String:MTLBuffer]() - var vertexPointer: [String:UnsafeMutablePointer] = [String:UnsafeMutablePointer]() - var vertexNormalize: [String:Bool] = [String:Bool]() - var vertexNum: [String:Int] = [String:Int]() - var dsFactor: [String:Int] = [String:Int]() - - let maxPointsPerGraph = 512 // you can increase this or decrease for different GPU speeds - var needsRender = false - let numShaderFloats = 4 - - //iOS color palette with gradients - let R = [0xFF,0xFF, 0x52,0x5A, 0xFF,0xFF, 0x1A,0x1D, 0xEF,0xC6, 0xDB,0x89, 0x87,0x0B, 0xFF,0xFF] - let G = [0x5E,0x2A, 0xED,0xC8, 0xDB,0xCD, 0xD6,0x62, 0x4D,0x43, 0xDD,0x8C, 0xFC,0xD3, 0x95,0x5E] - let B = [0x3A,0x68, 0xC7,0xFB, 0x4C,0x02, 0xFD,0xF0, 0xB6,0xFC, 0xDE,0x90, 0x70,0x18, 0x00,0x3A] - - init(mainView:UIView) - { - // get device - guard let device = MTLCreateSystemDefaultDevice() else { fatalError("GPU not available") } - self.device = device - - //setup layer (in the back of the views) - metalLayer = CAMetalLayer() - metalLayer.device = self.device - metalLayer.pixelFormat = .bgra8Unorm - metalLayer.framebufferOnly = true - metalLayer.frame = mainView.layer.frame - mainView.layer.insertSublayer(metalLayer, at:0) - - commandQueue = self.device.makeCommandQueue() - - timer = CADisplayLink(target: self, selector: #selector(gameloop)) - timer.add(to: RunLoop.main, forMode: .default) - - guard let defaultLibrary = device.makeDefaultLibrary(), - let fragmentProgram = defaultLibrary.makeFunction(name: "passThroughFragment"), - let vertexProgram = defaultLibrary.makeFunction(name: "passThroughVertex") else { fatalError() } - - let pipelineStateDescriptor = MTLRenderPipelineDescriptor() - pipelineStateDescriptor.vertexFunction = vertexProgram - pipelineStateDescriptor.fragmentFunction = fragmentProgram - pipelineStateDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm - pipelineStateDescriptor.colorAttachments[0].isBlendingEnabled = false - - pipelineState = try! device.makeRenderPipelineState(descriptor: pipelineStateDescriptor) - } - - func addGraph(withName:String, - shouldNormalize:Bool, - numPointsInGraph:Int){ - - //setup graph - let key = withName - let numGraphs = Int(vertexData.count) - - dsFactor[key] = Int(numPointsInGraph/maxPointsPerGraph) // downsample factor for each graph - if dsFactor[key]!<1 { dsFactor[key] = 1 } - - vertexData[key] = Array.init(repeating: 0.0, count: (numPointsInGraph/dsFactor[key]!)*numShaderFloats) - vertexNormalize[key] = shouldNormalize - vertexNum[key] = numGraphs - - // we use a 4D location, so copy over the right things - let maxIdx = Int(vertexData[key]!.count/numShaderFloats) - for j in 0.. 0 { + gestureLabel.text = "Gesture Toward" + } else { + gestureLabel.text = "Gesture Away" + } + + previousPeakFrequency = currentPeakFrequency + } + + // Helper function to find the peak frequency from the FFT data + func findPeakFrequency(fftData: [Float], frequencyResolution: Float) -> Float { + var maxMagnitude: Float = -Float.infinity + var maxIndex: Int = 0 + + let targetFrequency = currentFrequency + let frequencyTolerance: Float = 1000.0 + let startIndex = max(Int((targetFrequency - frequencyTolerance) / frequencyResolution), 0) + let endIndex = min(Int((targetFrequency + frequencyTolerance) / frequencyResolution), fftData.count - 1) + + for i in startIndex...endIndex { + if fftData[i] > maxMagnitude { + maxMagnitude = fftData[i] + maxIndex = i + } + } + + return Float(maxIndex) * frequencyResolution + } +} diff --git a/AudioDSPUtils/AudioFileReader.h b/AudioLabSwift/Utilities/AudioFileReader.h similarity index 100% rename from AudioDSPUtils/AudioFileReader.h rename to AudioLabSwift/Utilities/AudioFileReader.h diff --git a/AudioDSPUtils/AudioFileReader.m b/AudioLabSwift/Utilities/AudioFileReader.m similarity index 97% rename from AudioDSPUtils/AudioFileReader.m rename to AudioLabSwift/Utilities/AudioFileReader.m index 1f7097e..9bdd848 100644 --- a/AudioDSPUtils/AudioFileReader.m +++ b/AudioLabSwift/Utilities/AudioFileReader.m @@ -73,17 +73,21 @@ - (void)dealloc if (self.playing) [self pause]; - self.readerBlock = nil; + _readerBlock = nil; // Close the ExtAudioFile - ExtAudioFileDispose(self.inputFile); + ExtAudioFileDispose(_inputFile); - free(self.outputBuffer); - free(self.holdingBuffer); + free(_outputBuffer); + free(_holdingBuffer); + _outputBuffer = nil; + _holdingBuffer = nil; + _callbackTimer = nil; // [ringBuffer dealloc]; // [super dealloc]; + printf("AudioFileReader object was deallocated\n"); } @@ -298,6 +302,7 @@ - (void)stop // Release the dispatch timer because it holds a reference to this class instance [self pause]; if (self.callbackTimer) { + //dispatch_release(self.callbackTimer); self.callbackTimer = nil; } diff --git a/AudioDSPUtils/CircularBuffer.h b/AudioLabSwift/Utilities/CircularBuffer.h similarity index 100% rename from AudioDSPUtils/CircularBuffer.h rename to AudioLabSwift/Utilities/CircularBuffer.h diff --git a/AudioDSPUtils/CircularBuffer.m b/AudioLabSwift/Utilities/CircularBuffer.m similarity index 98% rename from AudioDSPUtils/CircularBuffer.m rename to AudioLabSwift/Utilities/CircularBuffer.m index 3190a6a..303716f 100644 --- a/AudioDSPUtils/CircularBuffer.m +++ b/AudioLabSwift/Utilities/CircularBuffer.m @@ -77,10 +77,12 @@ -(id)initWithNumChannels:(SInt64)numChannels -(void) dealloc{ for (int i=0; i] = [String:UnsafeMutablePointer]() + private var vertexNum: [String:Int] = [String:Int]() + private var vertexShowGrid: [String:Bool] = [String:Bool]() + private var dsFactor: [String:Int] = [String:Int]() + private var vertexGain: [String:Float32] = [String:Float32]() + private var vertexBias: [String:Float32] = [String:Float32]() + private var boxBuffer:[String:MTLBuffer] = [String:MTLBuffer]() + private var boxColorBuffer:[String:MTLBuffer] = [String:MTLBuffer]() + private var needsRender = false + + //MARK: iOS color palette with gradients + private let R = [0xFF,0xFF, 0x52,0x5A, 0xFF,0xFF, 0x1A,0x1D, 0xEF,0xC6, 0xDB,0x89, 0x87,0x0B, 0xFF,0xFF] + private let G = [0x5E,0x2A, 0xED,0xC8, 0xDB,0xCD, 0xD6,0x62, 0x4D,0x43, 0xDD,0x8C, 0xFC,0xD3, 0x95,0x5E] + private let B = [0x3A,0x68, 0xC7,0xFB, 0x4C,0x02, 0xFD,0xF0, 0xB6,0xFC, 0xDE,0x90, 0x70,0x18, 0x00,0x3A] + + + //MARK: Constants + private struct GraphConstants{ + static let fftNormalizer:Float = 64.0 + static let fftAddition:Float = 40.0 + static let maxPointsPerGraph = 512 // you can increase this or decrease for different GPU speeds + static let numShaderFloats = 4 + } + + //MARK: Initialization and Rendering Functions + // Initialize the class, setup where this view will be drawing to + init(userView:UIView) + { + // get device + guard let device = MTLCreateSystemDefaultDevice() else { fatalError("GPU not available") } + self.device = device + + //setup layer (in the back of the views) + metalLayer = CAMetalLayer() + metalLayer.device = self.device + metalLayer.pixelFormat = .bgra8Unorm + metalLayer.framebufferOnly = true + metalLayer.contentsScale = 2.0 + metalLayer.frame = userView.bounds + + userView.layer.insertSublayer(metalLayer, at:0) + + NotificationCenter.default.addObserver(self, selector: #selector(self.onOrientationChange), name: UIDevice.orientationDidChangeNotification, object: nil) + + // TODO: register for orientation changes and update layer bounds + + commandQueue = self.device.makeCommandQueue() + + // setup a repeating render function + timer = CADisplayLink(target: self, selector: #selector(gameloop)) + timer.add(to: RunLoop.main, forMode: .default) + + // add in shaders to the program + guard let defaultLibrary = device.makeDefaultLibrary(), + let fragmentProgram = defaultLibrary.makeFunction(name: "passThroughFragment"), + let vertexProgram = defaultLibrary.makeFunction(name: "passThroughVertex") else { fatalError("Could not find Shaders.metal file.") } + + let pipelineStateDescriptor = MTLRenderPipelineDescriptor() + pipelineStateDescriptor.vertexFunction = vertexProgram + pipelineStateDescriptor.fragmentFunction = fragmentProgram + pipelineStateDescriptor.colorAttachments[0].pixelFormat = metalLayer.pixelFormat + pipelineStateDescriptor.colorAttachments[0].isBlendingEnabled = false + + pipelineState = try! device.makeRenderPipelineState(descriptor: pipelineStateDescriptor) + + } + + @objc + func onOrientationChange(){ + DispatchQueue.main.asyncAfter(deadline: .now() + 0.1) { + if let userView = self.metalLayer.superlayer{ + self.metalLayer.frame = userView.bounds + } + } + } + + deinit { + NotificationCenter.default.removeObserver(self) + print("\(Self.self) object was deallocated") + } + + func teardown(){ + + if let commandBuffer = commandQueue.makeCommandBuffer(){ + commandBuffer.commit() + // do not try to deallocate when the command buffer is rendering + commandBuffer.waitUntilCompleted() + // this was causing errors before teardown + } + + timer.invalidate() + metalLayer.removeFromSuperlayer() + + + for (key,_) in vertexBuffer{ + if let buff = vertexBuffer[key]{ + buff.setPurgeableState(MTLPurgeableState.empty) + } + vertexBuffer[key] = nil + } + for (key,_) in vertexColorBuffer{ + if let buff = vertexColorBuffer[key]{ + buff.setPurgeableState(MTLPurgeableState.empty) + } + vertexColorBuffer[key] = nil + } + for (key,_) in boxBuffer{ + if let buff = boxBuffer[key]{ + buff.setPurgeableState(MTLPurgeableState.empty) + } + boxBuffer[key] = nil + } + for (key,_) in boxColorBuffer{ + if let buff = boxColorBuffer[key]{ + buff.setPurgeableState(MTLPurgeableState.empty) + } + boxColorBuffer[key] = nil + } + + for (key,_) in vertexPointer{ + vertexPointer[key] = nil + } + + timer = nil + metalLayer = nil + commandQueue = nil + device = nil + pipelineState = nil + + + } + + + private var gridLength:Int = 0 + private func createGraphGrid(name:String,min:Float,max:Float){ + let mid = (max-min)/2.0+min + let box:[Float32] = [-0.99, min, 0.0, 0.0, // primitve draw protect + -0.99, min, 0.0, 1.0, + -0.99, max, 0.0, 1.0, + 0.99, max, 0.0, 1.0, + 0.99, min, 0.0, 1.0, + -0.99, min, 0.0, 1.0, // outer box + -0.75, min, 0.0, 1.0, + -0.75, max, 0.0, 1.0, + 0.75, max, 0.0, 1.0, + 0.75, min, 0.0, 1.0, + -0.75, min, 0.0, 1.0, // outer quartile box + -0.25, min, 0.0, 1.0, + -0.25, max, 0.0, 1.0, + 0.25, max, 0.0, 1.0, + 0.25, min, 0.0, 1.0, + -0.25, min, 0.0, 1.0, // inner quartile box + -0.5, min, 0.0, 1.0, + -0.5, max, 0.0, 1.0, + 0.5, max, 0.0, 1.0, + 0.5, min, 0.0, 1.0, + -0.5, min, 0.0, 1.0, // mid quartile box + 0.0, min, 0.0, 1.0, + 0.0, max, 0.0, 1.0, // mid line + -0.99, max, 0.0, 1.0, // center line + -0.99, mid, 0.0, 1.0, + 0.99, mid, 0.0, 1.0, + 0.99, mid, 0.0, 0.0 // primitve draw protect + ] + + let boxColor:[Float32] = [Float32].init(repeating: 0.5, count:box.count) + gridLength = box.count + + var dataSize = box.count * MemoryLayout.size(ofValue: box[0]) + boxBuffer[name] = device.makeBuffer(bytes: box, + length: dataSize, + options: []) //cpuCacheModeWriteCombined + + dataSize = boxColor.count * MemoryLayout.size(ofValue: boxColor[0]) + boxColorBuffer[name] = device.makeBuffer(bytes: boxColor, + length: dataSize, + options: []) + + + } + + private func render() { + if needsRender == false { return } // prevent over-rendering manually + needsRender = false + + guard let drawable = metalLayer?.nextDrawable() else { return } + let renderPassDescriptor = MTLRenderPassDescriptor() + renderPassDescriptor.colorAttachments[0].texture = drawable.texture + renderPassDescriptor.colorAttachments[0].loadAction = .clear + // this sets the background color + renderPassDescriptor.colorAttachments[0].clearColor = self.backgroundColor + + + + if let commandBuffer = commandQueue.makeCommandBuffer(){ + + let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor)! + renderEncoder.setRenderPipelineState(pipelineState) + + + // show graph boxes + for (key,_) in boxBuffer{ + renderEncoder.setVertexBuffer(boxBuffer[key], offset: 0, index: 0) + renderEncoder.setVertexBuffer(boxColorBuffer[key], offset: 0, index: 1) + renderEncoder.drawPrimitives(type: .lineStrip, + vertexStart: 0, + vertexCount: gridLength-2) + } + + + // for each graph, update the values for the line + for (key,_) in vertexBuffer{ + renderEncoder.setVertexBuffer(vertexBuffer[key], offset: 0, index: 0) + renderEncoder.setVertexBuffer(vertexColorBuffer[key], offset:0 , index: 1) + renderEncoder.drawPrimitives(type: .lineStrip, + vertexStart: 0, + vertexCount: vertexData[key]!.count) + } + renderEncoder.endEncoding() + commandBuffer.present(drawable) + commandBuffer.commit() + } + } + + @objc private func gameloop() { + autoreleasepool { + self.render() + } + } + + + //MARK: Public Access Functions + func setBackgroundColor(r:Double,g:Double,b:Double,a:Double){ + self.backgroundColor = MTLClearColor(red: r, green: g, blue: b, alpha: a) + } + + func addGraph(withName:String, + numPointsInGraph:Int){ + // no normalization needed + self.addGraph(withName: withName, + withGain: 1.0, + withBias: 0.0, + numPointsInGraph: numPointsInGraph) + } + + + func addGraph(withName:String, + shouldNormalizeForFFT:Bool, + numPointsInGraph:Int){ + // custom FFT normalization option + if shouldNormalizeForFFT{ + // use built in normalization for the fft + self.addGraph(withName: withName, + withGain: GraphConstants.fftNormalizer, + withBias: GraphConstants.fftAddition, + numPointsInGraph: numPointsInGraph) + }else{ + self.addGraph(withName: withName, + withGain: 1.0, + withBias: 0.0, + numPointsInGraph: numPointsInGraph) + } + } + + func addGraph(withName:String, + withGain:Float, + withBias:Float, + numPointsInGraph:Int){ + + self.addGraph(withName: withName, + withGain: withGain, + withBias: withBias, + numPointsInGraph: numPointsInGraph, + showGrid: true + ) + } + + func addGraph(withName:String, + withGain:Float, + withBias:Float, + numPointsInGraph:Int, + showGrid:Bool){ + //setup graph + let key = withName + let numGraphs = Int(vertexData.count) + + dsFactor[key] = Int(numPointsInGraph/GraphConstants.maxPointsPerGraph) // downsample factor for each graph + if dsFactor[key]!<1 { dsFactor[key] = 1 } + + //init the graph and normalization + vertexData[key] = Array.init(repeating: 0.0, count: (numPointsInGraph/dsFactor[key]!)*GraphConstants.numShaderFloats) + vertexNum[key] = numGraphs + // custom setup from user + vertexGain[key] = withGain + vertexBias[key] = withBias + + + // we use a 4D location, so copy over the right things + let maxIdx = Int(vertexData[key]!.count/GraphConstants.numShaderFloats) + for j in 0..= maxIdx-2{ + vertexColorData[j*GraphConstants.numShaderFloats] = 0.0 + vertexColorData[j*GraphConstants.numShaderFloats+1] = 0.0 + vertexColorData[j*GraphConstants.numShaderFloats+2] = 0.0 + vertexColorData[j*GraphConstants.numShaderFloats+3] = 0.0 + } + } + vertexColorBuffer[key] = device.makeBuffer(bytes: vertexColorData, length: dataSize, options: []) + + // now save if we should have a grid for this graph + vertexShowGrid[key] = showGrid + + } + + func makeGrids(){ + for (forKey,_) in vertexBuffer{ + if vertexShowGrid[forKey]!{ + let numGraphs = Float(vertexData.count) + let addToPlot = -1.0 + 2*(Float(vertexNum[forKey]!) / numGraphs) + 1.0/numGraphs + // get to midpoint of plot on screen + let minVal:Float = addToPlot - (0.9 / numGraphs) + let maxVal:Float = addToPlot + (0.9 / numGraphs) + createGraphGrid(name:forKey, min: minVal, max: maxVal) + } + } + } + + func updateGraph(data:[Float], forKey:String){ + + if vertexData.keys.contains(forKey) { + + let numGraphs = Float(vertexData.count) + var addToPlot = -1.0 + 2*(Float(vertexNum[forKey]!) / numGraphs) + 1.0/numGraphs + + var multiplier:Float = 1.0 + + // get to midpoint of plot on screen + var minVal:Float = addToPlot - (0.89 / numGraphs) + var maxVal:Float = addToPlot + (0.89 / numGraphs) + + // now add custom normalizations + addToPlot += vertexBias[forKey]!/(vertexGain[forKey]! * numGraphs) + multiplier = 1.0/(vertexGain[forKey]! * numGraphs) + + + + // multiply by \(multiplier) and add in \(addToPlot), strided by dsFactor and starting at element one of array + // there is a lot to unpack here, trust me it works and is awesomely fast + + // vector:scalar-multiply:scalar-addition + vDSP_vsmsa(data, // go through this data + vDSP_Stride(dsFactor[forKey]!), // down sample input + &multiplier, &addToPlot, // scalars to mult and add + &(vertexPointer[forKey]![1]),// save to this data (keep zeroth element the same so line do not connect) + vDSP_Stride(GraphConstants.numShaderFloats), // skip through 4D location + vDSP_Length(data.count/dsFactor[forKey]!)) // do this many adds + + // here is what te above code does, but using SIMD + //for i in 0.. +using namespace metal; + + + + +struct VertexInOut +{ + float4 position [[position]]; + float4 color; +}; + +vertex VertexInOut passThroughVertex(uint vid [[ vertex_id ]], + constant packed_float4* position [[ buffer(0) ]], + constant packed_float4* color [[ buffer(1) ]]) +{ + VertexInOut outVertex; + + outVertex.position = position[vid]; + outVertex.color = color[vid]; + + return outVertex; +}; + +fragment half4 passThroughFragment(VertexInOut inFrag [[stage_in]]) +{ + return half4(inFrag.color); +}; diff --git a/AudioLabSwift/ViewController.swift b/AudioLabSwift/ViewController.swift index 01e10aa..b121f5b 100644 --- a/AudioLabSwift/ViewController.swift +++ b/AudioLabSwift/ViewController.swift @@ -1,70 +1,123 @@ -// -// ViewController.swift -// AudioLabSwift -// -// Created by Eric Larson -// Copyright © 2020 Eric Larson. All rights reserved. -// - import UIKit import Metal - -let AUDIO_BUFFER_SIZE = 1024*4 - - class ViewController: UIViewController { - - let audio = AudioModel(buffer_size: AUDIO_BUFFER_SIZE) - lazy var graph:MetalGraph? = { - return MetalGraph(mainView: self.view) + @IBOutlet weak var userView: UIView! + + struct AudioConstants { + static let AUDIO_BUFFER_SIZE = 1024 * 4 + static let MIN_FREQUENCY_DIFFERENCE: Float = 50.0 + static let MAGNITUDE_THRESHOLD: Float = 0.5 + } + + // Setup audio model + let audio = AudioModel(buffer_size: AudioConstants.AUDIO_BUFFER_SIZE) + lazy var graph: MetalGraph? = { + return MetalGraph(userView: self.userView) }() + var timer: Timer? = nil + @IBOutlet weak var labelF1: UILabel! + @IBOutlet weak var labelF2: UILabel! + @IBOutlet weak var vowelLabel: UILabel! // New label to display "oooo" or "ahhhh" + + var lastLockedFrequencies: (Float, Float)? = nil // Stores last significant frequencies override func viewDidLoad() { super.viewDidLoad() + if let graph = self.graph { + graph.setBackgroundColor(r: 0, g: 0, b: 0, a: 1) + + // Add in graph for display + graph.addGraph(withName: "fft", shouldNormalizeForFFT: true, numPointsInGraph: AudioConstants.AUDIO_BUFFER_SIZE / 2) + graph.makeGrids() // Add grids to graph + } - // add in graphs for display - graph?.addGraph(withName: "fft", - shouldNormalize: true, - numPointsInGraph: AUDIO_BUFFER_SIZE/2) - - graph?.addGraph(withName: "time", - shouldNormalize: false, - numPointsInGraph: AUDIO_BUFFER_SIZE) - - // just start up the audio model here - audio.startMicrophoneProcessing(withFps: 10) - //audio.startProcesingAudioFileForPlayback() - audio.startProcessingSinewaveForPlayback(withFreq: 630.0) + // Start microphone processing, no audio output + audio.startMicrophoneProcessing(withFps: 20) + audio.play() - // run the loop for updating the graph peridocially - Timer.scheduledTimer(timeInterval: 0.05, target: self, - selector: #selector(self.updateGraph), - userInfo: nil, - repeats: true) - + // Start the timer for graph updates + timer = Timer.scheduledTimer(withTimeInterval: 0.05, repeats: true) { _ in + self.updateGraph() + self.updateLabelsWithPeakFrequencies() + } } + override func viewDidDisappear(_ animated: Bool) { + timer?.invalidate() + graph?.teardown() + graph = nil + audio.stop() + super.viewDidDisappear(animated) + } - @objc - func updateGraph(){ - self.graph?.updateGraph( - data: self.audio.fftData, - forKey: "fft" - ) + // Periodically update the graph with refreshed FFT data + func updateGraph() { + if let graph = self.graph { + graph.updateGraph(data: self.audio.fftData, forKey: "fft") + } + } + + // Find and display the two loudest frequencies + func updateLabelsWithPeakFrequencies() { + let fftData = audio.fftData + let frequencyResolution = Float(audio.samplingRate) / Float(AudioConstants.AUDIO_BUFFER_SIZE) + + // Find the two loudest peaks in the FFT data + var peaks: [(frequency: Float, magnitude: Float)] = [] - self.graph?.updateGraph( - data: self.audio.timeData, - forKey: "time" - ) + for (index, magnitude) in fftData.enumerated() { + let frequency = Float(index) * frequencyResolution + if magnitude > AudioConstants.MAGNITUDE_THRESHOLD { + peaks.append((frequency, magnitude)) + } + } + // Sort peaks by magnitude in descending order + peaks.sort { $0.magnitude > $1.magnitude } + + if peaks.count >= 2 { + let topTwoFrequencies = (peaks[0].frequency, peaks[1].frequency) + + // Check if they are at least 50Hz apart and have large magnitudes + if abs(topTwoFrequencies.0 - topTwoFrequencies.1) >= AudioConstants.MIN_FREQUENCY_DIFFERENCE { + labelF1.text = String(format: "F1: %.2f Hz", topTwoFrequencies.0) + labelF2.text = String(format: "F2: %.2f Hz", topTwoFrequencies.1) + + // Lock the frequencies in place + lastLockedFrequencies = topTwoFrequencies + + // Check if the formant frequencies correspond to "oooo" or "ahhhh" + if isOooo(f1: topTwoFrequencies.0, f2: topTwoFrequencies.1) { + vowelLabel.text = "Vowel: Ooooo" + } else if isAhhhh(f1: topTwoFrequencies.0, f2: topTwoFrequencies.1) { + vowelLabel.text = "Vowel: Ahhhh" + } else { + vowelLabel.text = "Vowel: Unknown" + } + } + } else if let lastLocked = lastLockedFrequencies { + // Display locked frequencies if no new large-magnitude frequencies are found + labelF1.text = String(format: "F1: %.2f Hz", lastLocked.0) + labelF2.text = String(format: "F2: %.2f Hz", lastLocked.1) + } else { + // No significant frequencies found and no locked frequencies, show "N/A" + labelF1.text = "F1: N/A" + labelF2.text = "F2: N/A" + vowelLabel.text = "Vowel: N/A" + } } - + // Helper function to determine if the sound is "oooo" + func isOooo(f1: Float, f2: Float) -> Bool { + return (f1 >= 400 && f1 <= 600) && (f2 >= 700 && f2 <= 900) + } + func isAhhhh(f1: Float, f2: Float) -> Bool { + return (f1 >= 600 && f1 <= 900) && (f2 >= 1200 && f2 <= 1400) + } } - diff --git a/AudioLabSwift/satisfaction.mp3 b/AudioLabSwift/satisfaction.mp3 old mode 100755 new mode 100644