Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions AudioDSPUtils/Novocaine.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,15 @@ typedef void (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);

@interface Novocaine : NSObject

@property AudioUnit inputUnit;
@property AudioUnit audioUnit;
@property AudioBufferList *inputBuffer;
@property (nonatomic, copy) OutputBlock outputBlock;
@property (nonatomic, copy) InputBlock inputBlock;
@property BOOL inputAvailable;
@property (nonatomic, retain) NSString *inputRoute;
@property UInt32 numInputChannels;
@property UInt32 numOutputChannels;
@property Float64 samplingRate;
@property (readonly) Float64 samplingRate;
@property BOOL isInterleaved;
@property BOOL isSetUp;
@property UInt32 numBytesPerSample;
Expand Down
220 changes: 115 additions & 105 deletions AudioDSPUtils/Novocaine.m

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions AudioLabSwift.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CODE_SIGN_STYLE = Automatic;
DEVELOPMENT_TEAM = APD62CDC25;
DEVELOPMENT_TEAM = F69WJ27LN4;
INFOPLIST_FILE = AudioLabSwift/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 13.6;
LD_RUNPATH_SEARCH_PATHS = (
Expand All @@ -351,7 +351,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CODE_SIGN_STYLE = Automatic;
DEVELOPMENT_TEAM = APD62CDC25;
DEVELOPMENT_TEAM = F69WJ27LN4;
INFOPLIST_FILE = AudioLabSwift/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 13.6;
LD_RUNPATH_SEARCH_PATHS = (
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<Bucket
uuid = "3369ECCC-8B02-4E4A-834F-087617B38EF8"
type = "1"
version = "2.0">
<Breakpoints>
<BreakpointProxy
BreakpointExtensionID = "Xcode.Breakpoint.FileBreakpoint">
<BreakpointContent
uuid = "C922EE96-4345-4270-ACDA-BC174E1FA1B6"
shouldBeEnabled = "No"
ignoreCount = "0"
continueAfterRunningActions = "No"
filePath = "AudioLabSwift/AudioModel.swift"
startingColumnNumber = "9223372036854775807"
endingColumnNumber = "9223372036854775807"
startingLineNumber = "106"
endingLineNumber = "106"
landmarkName = "runEveryInterval()"
landmarkType = "7">
</BreakpointContent>
</BreakpointProxy>
</Breakpoints>
</Bucket>
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>SchemeUserState</key>
<dict>
<key>AudioLabSwift.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>0</integer>
</dict>
</dict>
</dict>
</plist>
164 changes: 45 additions & 119 deletions AudioLabSwift/AudioModel.swift
Original file line number Diff line number Diff line change
Expand Up @@ -13,67 +13,53 @@ class AudioModel {

// MARK: Properties
private var BUFFER_SIZE:Int
// thse properties are for interfaceing with the API
// the user can access these arrays at any time and plot them if they like
var timeData:[Float]
var fftData:[Float]
var ptsData:[Float]
lazy var samplingRate:Int = {
return Int(self.audioManager!.samplingRate)
}()

// MARK: Public Methods
init(buffer_size:Int) {
BUFFER_SIZE = buffer_size
// anything not lazily instatntiated should be allocated here
timeData = Array.init(repeating: 0.0, count: BUFFER_SIZE)
fftData = Array.init(repeating: 0.0, count: BUFFER_SIZE/2)
ptsData = Array.init(repeating: 0.0, count: 20)
}

// public function for starting processing of microphone data
func startMicrophoneProcessing(withFps:Double){
self.audioManager?.inputBlock = self.handleMicrophone

// repeat this fps times per second using the timer class
Timer.scheduledTimer(timeInterval: 1.0/withFps, target: self,
selector: #selector(self.runEveryInterval),
userInfo: nil,
repeats: true)
}

// public function for playing from a file reader file
func startProcesingAudioFileForPlayback(){
self.audioManager?.outputBlock = self.handleSpeakerQueryWithAudioFile
self.fileReader?.play()
// setup the microphone to copy to circualr buffer
if let manager = self.audioManager{
manager.inputBlock = self.handleMicrophone

// repeat this fps times per second using the timer class
// every time this is called, we update the arrays "timeData" and "fftData"
Timer.scheduledTimer(withTimeInterval: 1.0/withFps, repeats: true) { _ in
self.runEveryInterval()
}

}
}

func startProcessingSinewaveForPlayback(withFreq:Float=330.0){
sineFrequency = withFreq
// Two examples are given that use either objective c or that use swift
// the swift code for loop is slightly slower thatn doing this in c,
// but the implementations are very similar
//self.audioManager?.outputBlock = self.handleSpeakerQueryWithSinusoid // swift for loop
self.audioManager?.setOutputBlockToPlaySineWave(sineFrequency) // c for loop
}

// You must call this when you want the audio to start being handled by our model
func play(){
self.audioManager?.play()
if let manager = self.audioManager{
manager.play()
}
}

// Here is an example function for getting the maximum frequency
func getMaxFrequencyMagnitude() -> (Float,Float){
// this is the slow way of getting the maximum...
// you might look into the Accelerate framework to make things more efficient
var max:Float = -1000.0
var maxi:Int = 0

if inputBuffer != nil {
for i in 0..<Int(fftData.count){
if(fftData[i]>max){
max = fftData[i]
maxi = i
}
}
func pause(){
if let manager = self.audioManager{
manager.pause()
}
let frequency = Float(maxi) / Float(BUFFER_SIZE) * Float(self.audioManager!.samplingRate)
return (max,frequency)
}
// for sliding max windows, you might be interested in the following: vDSP_vswmax


//==========================================
// MARK: Private Properties
Expand All @@ -85,10 +71,6 @@ class AudioModel {
return FFTHelper.init(fftSize: Int32(BUFFER_SIZE))
}()

private lazy var outputBuffer:CircularBuffer? = {
return CircularBuffer.init(numChannels: Int64(self.audioManager!.numOutputChannels),
andBufferSize: Int64(BUFFER_SIZE))
}()

private lazy var inputBuffer:CircularBuffer? = {
return CircularBuffer.init(numChannels: Int64(self.audioManager!.numInputChannels),
Expand All @@ -98,102 +80,46 @@ class AudioModel {

//==========================================
// MARK: Private Methods
private lazy var fileReader:AudioFileReader? = {

if let url = Bundle.main.url(forResource: "satisfaction", withExtension: "mp3"){
var tmpFileReader:AudioFileReader? = AudioFileReader.init(audioFileURL: url,
samplingRate: Float(audioManager!.samplingRate),
numChannels: audioManager!.numOutputChannels)

tmpFileReader!.currentTime = 0.0
print("Audio file succesfully loaded for \(url)")
return tmpFileReader
}else{
print("Could not initialize audio input file")
return nil
}
}()
// NONE for this model

//==========================================
// MARK: Model Callback Methods
@objc
private func runEveryInterval(){
if inputBuffer != nil {
// copy data to swift array
self.inputBuffer!.fetchFreshData(&timeData, withNumSamples: Int64(BUFFER_SIZE))
// copy time data to swift array
self.inputBuffer!.fetchFreshData(&timeData, // copied into this array
withNumSamples: Int64(BUFFER_SIZE))

// now take FFT and display it
// now take FFT
fftHelper!.performForwardFFT(withData: &timeData,
andCopydBMagnitudeToBuffer: &fftData)
andCopydBMagnitudeToBuffer: &fftData) // fft result is copied into fftData array

// at this point, we have saved the data to the arrays:
// timeData: the raw audio samples
// fftData: the FFT of those same samples
// the user can now use these variables however they like
let a = BUFFER_SIZE/40

for i in 0...19 {
let b = i * a
var max:Float = -1000.0
for j in b...(b + a) {
if (fftData[j] > max) { max = fftData[j]}
}
ptsData[i] = max
}

}
}



//==========================================
// MARK: Audiocard Callbacks
// in obj-C it was (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels)
// and in swift this translates to:
private func handleMicrophone (data:Optional<UnsafeMutablePointer<Float>>, numFrames:UInt32, numChannels: UInt32) {
// var max:Float = 0.0
// if let arrayData = data{
// for i in 0..<Int(numFrames){
// if(abs(arrayData[i])>max){
// max = abs(arrayData[i])
// }
// }
// }
// // can this max operation be made faster??
// print(max)

// copy samples from the microphone into circular buffer
self.inputBuffer?.addNewFloatData(data, withNumSamples: Int64(numFrames))
}

private func handleSpeakerQueryWithAudioFile(data:Optional<UnsafeMutablePointer<Float>>, numFrames:UInt32, numChannels: UInt32){
if let file = self.fileReader{

// read from file, loaidng into data (a float pointer)
file.retrieveFreshAudio(data,
numFrames: numFrames,
numChannels: numChannels)

// set samples to output speaker buffer
self.outputBuffer?.addNewFloatData(data,
withNumSamples: Int64(numFrames))
}
}

// _ _ _ _ _ _ _ _ _ _
// / \ / \ / \ / \ / \ / \ / \ / \ / \ /
// / \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/
var sineFrequency:Float = 0.0 { // frequency in Hz (changeable by user)
didSet{
// if using swift for generating the sine wave: when changed, we need to update our increment
//phaseIncrement = Float(2*Double.pi*sineFrequency/audioManager!.samplingRate)

// if using objective c: this changes the frequency in the novocain block
self.audioManager?.sineFrequency = sineFrequency
}
}
private var phase:Float = 0.0
private var phaseIncrement:Float = 0.0
private var sineWaveRepeatMax:Float = Float(2*Double.pi)

private func handleSpeakerQueryWithSinusoid(data:Optional<UnsafeMutablePointer<Float>>, numFrames:UInt32, numChannels: UInt32){
// while pretty fast, this loop is still not quite as fast as
// writing the code in c, so I placed a function in Novocaine to do it for you
// use setOutputBlockToPlaySineWave() in Novocaine
if let arrayData = data{
var i = 0
while i<numFrames{
arrayData[i] = sin(phase)
phase += phaseIncrement
if (phase >= sineWaveRepeatMax) { phase -= sineWaveRepeatMax }
i+=1
}
}
}
}
Loading