-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocessing.py
More file actions
180 lines (144 loc) · 6.52 KB
/
preprocessing.py
File metadata and controls
180 lines (144 loc) · 6.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import keras
import librosa
import math
import numpy as np
import os
import pickle
import random
import tensorflow as tf
# import tensorflow_io as tfio
import constants
class LoadAudio(keras.Model):
def __init__(self, augment=False):
super().__init__()
self.augment = augment
def call(self, inputs, fixed_length):
x = tf.py_function(func=LoadAudio.load_func, inp=[inputs], Tout=tf.float32)
x = clean_audio(x)
if self.augment:
x, _ = chop_audio(x, -1)
if fixed_length is not None:
x = x[:fixed_length]
# https://stackoverflow.com/questions/58847401/how-can-i-fill-a-ragged-tensor-with-zeros-to-make-a-square-tensor
x = tf.pad(x, [[0, fixed_length-tf.shape(x)[0]]])
return x
@staticmethod
def load_func(inputs):
x, sr = librosa.load(inputs.numpy(), sr=constants.model_sr, mono=True)
return x
load_audio_augment = LoadAudio(augment=True)
load_audio = LoadAudio(augment=False)
def get_audio_length(inputs):
"""
Estimate number of samples in the audio file (when resampled)
Arg:
- inputs (tensor string): filename
Returns:
Number of samples (int)
"""
inputs = inputs.numpy()
sr = librosa.get_samplerate(inputs)
duration = librosa.get_duration(path=inputs, sr=sr)
num_samples = duration*sr
norm_num_samples = (constants.model_sr/sr)*num_samples
return round(norm_num_samples)
def get_audio_length_wrap(inputs):
return tf.py_function(func=get_audio_length, inp=[inputs], Tout=tf.int32)
def load_audio_vectorize(x, fixed_length, augment=False):
fn = LoadAudio(augment=augment)
if fixed_length is None:
all_lengths = tf.map_fn(lambda xx: get_audio_length_wrap(xx), x, fn_output_signature=tf.int32)
fixed_length = tf.cast(tf.reduce_max(all_lengths), tf.int32)
x = tf.map_fn(lambda xx: fn(xx, fixed_length=fixed_length), x, fn_output_signature=tf.float32)
return x
def duration(x):
return librosa.get_duration(path=x)
def clean_audio(audio):
largest_val = tf.reduce_max(tf.abs(audio))
silence_thresh = largest_val * 1e-5
non_silence = tf.where(tf.abs(audio) > silence_thresh)
first = non_silence[0][0]
last = non_silence[-1][0]
audio = audio[first:last+1]
# x = x - tf.reduce_mean(x) # TO DO: uncomment so that DC component is removed
audio = audio / largest_val
return audio
def shift_audio(audio):
shift = tf.random.uniform((), 0, len(audio), dtype=tf.int32)
# shift = tf.py_function(lambda x: random.randint(0, x), [len(audio)], tf.int32)
audio_shifted = tf.concat([audio[shift:], audio[:shift]], axis=0)
return audio_shifted
def chop_audio(audio, label):
"""Remove a random amount of data from the beginning of the audio
This relates to Srengel et al's approach by allowing for different start points for a clip.
Args:
audio: input
label: class id
"""
# NOTE: there appears to be a seed that is reset for each iteration through a dataset,
# Making the same amount (sometimes) chopped from an audio clip
chop_ratio = tf.random.uniform(())
return chop_audio_helper(audio, chop_ratio), label
def chop_audio_helper(audio, chop_ratio):
max_chop = tf.cast(tf.math.maximum(0, tf.shape(audio)[0]-constants.frame_length),
tf.float32)
chop_samples = tf.cast(chop_ratio * (max_chop+1),
tf.int32)
return audio[chop_samples:]
def match_lengths(orig_audio, other_audio):
len_orig = len(orig_audio)
if len(other_audio) < len_orig:
num_copies = tf.math.ceil(len_orig / len(other_audio))
num_copies = tf.cast(num_copies, tf.int32)
other_audio = tf.tile(other_audio, (num_copies,))
other_audio = other_audio[:len_orig]
return other_audio
def combine_recordings(orig_audio, label, index_filename_list, weight=None):
"""Add recordings with the same label
(See Srengel et al., "Audio based bird species identification using deep learning techniques",
CLEF2016 Working Notes,
https://ceur-ws.org/Vol-1609/16090547.pdf)
Args:
orig_audio: Audio to augment
label: class id
index_filename_list: list of filename lists (a list for each possible class id)
weight: weight to apply to orig_audio [Default: random value in [.5, 1.]
"""
label = tf.argmax(label)
other_file = tf.py_function(lambda x: random.choices(index_filename_list[x]),
[label],
tf.string)
other_audio = load_audio_augment(other_file, fixed_length=None)
augment_audio = combine_recordings_helper(orig_audio, other_audio, weight)
return augment_audio
def combine_recordings_helper(orig_audio, other_audio, weight):
other_audio = match_lengths(orig_audio=orig_audio, other_audio=other_audio)
if weight is None:
weight = tf.random.uniform((), .5, 1.)
# weight = tf.py_function(lambda x: random.uniform(x, 1.), [tf.constant(.5)], tf.float32)
# augment_audio = weight * orig_audio + (1-weight) * other_audio
augment_audio = tf.reduce_sum([weight*orig_audio, (1-weight)*other_audio],axis=0)
return augment_audio
def combine_labels_helper(orig_label, other_label):
other_label_as_secondary = tf.cast(other_label != 0, other_label.dtype) * constants.secondary_label_val
augment_labels = tf.reduce_max([orig_label, other_label_as_secondary], axis=0)
return augment_labels
def combine_recordings_within_batch(audio, label):
ordering = tf.range(0, tf.shape(audio)[0])
ordering = tf.random.shuffle(ordering)
other_audio = tf.gather(audio, ordering)
other_label = tf.gather(label, ordering)
augment_audio = tf.map_fn(lambda xx: combine_recordings_helper(xx[0], xx[1], None), [audio, other_audio], fn_output_signature=tf.float32)
augment_label = combine_labels_helper(label, other_label)
return augment_audio, augment_label
def combine_recordings_vectorize(orig_audio, label, index_filename_list, weight=None):
augment_audio = tf.map_fn(lambda xx: combine_recordings(xx[0], xx[1], index_filename_list), [orig_audio, label], fn_output_signature=tf.float32)
return augment_audio
def frame_audio(audio):
audio = frame_audio_helper(audio)
return audio[:constants.fixed_num_frames]
def frame_audio_vectorize(audio):
audio = frame_audio_helper(audio)
return audio[:, :constants.fixed_num_frames]
def frame_audio_helper(audio):
return tf.signal.frame(audio, frame_length=constants.frame_length, frame_step=constants.frame_step, pad_end=True)