Datasets:
Tasks:
Audio Classification
Sub-tasks:
keyword-spotting
Languages:
English
Size:
10K - 100K
ArXiv:
License:
soeren
commited on
Commit
·
72d08a5
1
Parent(s):
13bba81
added annotations
Browse files
.gitignore
CHANGED
@@ -4,4 +4,7 @@ Pipfile.lock
|
|
4 |
data/dataset_audio_annotated_and_embedding_with_probs.parquet.gzip
|
5 |
data/dataset_audio_test.parquet.gzip
|
6 |
data/dataset_audio_train.parquet.gzip
|
7 |
-
data/dataset_audio_validation.parquet.gzip
|
|
|
|
|
|
|
|
4 |
data/dataset_audio_annotated_and_embedding_with_probs.parquet.gzip
|
5 |
data/dataset_audio_test.parquet.gzip
|
6 |
data/dataset_audio_train.parquet.gzip
|
7 |
+
data/dataset_audio_validation.parquet.gzip
|
8 |
+
|
9 |
+
*.ipynb*
|
10 |
+
test.py
|
data/clip_metadata.py
CHANGED
@@ -4,7 +4,7 @@ _SPLIT = "train"
|
|
4 |
|
5 |
df = pd.read_parquet("data/dataset_audio_" + _SPLIT +".parquet.gzip")
|
6 |
|
7 |
-
clipped_df = df.filter(["
|
8 |
-
|
9 |
|
10 |
clipped_df.to_parquet("data/dataset_audio_" + _SPLIT +"_clipped.parquet.gzip")
|
|
|
4 |
|
5 |
df = pd.read_parquet("data/dataset_audio_" + _SPLIT +".parquet.gzip")
|
6 |
|
7 |
+
clipped_df = df.filter(["label_string", "probability", "probability_vector", "prediction",
|
8 |
+
"prediction_string", "embedding_reduced"], axis=1)
|
9 |
|
10 |
clipped_df.to_parquet("data/dataset_audio_" + _SPLIT +"_clipped.parquet.gzip")
|
data/create_enriched_annotated_speechcommands.py
CHANGED
@@ -5,8 +5,6 @@
|
|
5 |
#
|
6 |
# We use the Huggingface transformers library to create an embedding for a an audio dataset
|
7 |
#
|
8 |
-
#
|
9 |
-
#
|
10 |
|
11 |
# ## tldr; Play as callable functions
|
12 |
|
@@ -15,12 +13,12 @@
|
|
15 |
import datasets
|
16 |
from transformers import AutoFeatureExtractor, AutoModel, ASTForAudioClassification
|
17 |
import torch
|
18 |
-
from renumics import spotlight
|
19 |
import pandas as pd
|
20 |
import umap
|
21 |
import numpy as np
|
22 |
|
23 |
_SPLIT = "train"
|
|
|
24 |
|
25 |
def __set_device():
|
26 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -28,56 +26,47 @@ def __set_device():
|
|
28 |
torch.cuda.empty_cache()
|
29 |
return device
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True).to(device)
|
39 |
-
embeddings = model(**inputs).last_hidden_state[:, 0].cpu()
|
40 |
-
|
41 |
-
return {"embedding": embeddings}
|
42 |
-
|
43 |
-
|
44 |
-
return pp
|
45 |
-
|
46 |
-
|
47 |
-
def huggingface_embedding(dataset, modelname, batched=True, batch_size=8):
|
48 |
-
# initialize huggingface model
|
49 |
-
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
|
50 |
-
model = AutoModel.from_pretrained(modelname, output_hidden_states=True)
|
51 |
-
|
52 |
-
#compute embedding
|
53 |
-
device = __set_device()
|
54 |
-
extract_fn = extract_embeddings(model.to(device), feature_extractor)
|
55 |
-
updated_dataset = dataset.map(extract_fn, batched=batched, batch_size=batch_size)
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
60 |
|
|
|
61 |
|
62 |
|
63 |
|
64 |
-
def
|
65 |
device = model.device
|
66 |
|
67 |
def processing(batch):
|
68 |
audios = [element["array"] for element in batch["audio"]]
|
69 |
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True, sampling_rate=16000).to(device)
|
70 |
outputs = model(**inputs)
|
71 |
-
|
|
|
|
|
72 |
return processing
|
73 |
|
74 |
|
75 |
-
def
|
|
|
76 |
model = ASTForAudioClassification.from_pretrained(modelname)
|
77 |
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
|
|
|
78 |
|
79 |
device = __set_device()
|
80 |
-
calc_outputs =
|
81 |
output_dataset = dataset.map(calc_outputs, batched = batched, batch_size = batch_size)
|
82 |
|
83 |
return output_dataset
|
@@ -94,15 +83,18 @@ def annotate_batch(model, dataset):
|
|
94 |
logits = [torch.tensor(element) for element in batch["logits"]]
|
95 |
probabilities_per_class = [torch.nn.functional.softmax(logit, dim=-1) for logit in logits]
|
96 |
predicted_class_ids = [torch.argmax(logit).item() for logit in logits]
|
97 |
-
|
98 |
-
#
|
99 |
-
|
|
|
100 |
annotated_labels = [labels[element] for element in batch["label"]]
|
101 |
probabilities = []
|
102 |
for index, prob_per_class in enumerate(probabilities_per_class):
|
103 |
probabilities.append(prob_per_class[predicted_class_ids[index]].item())
|
104 |
-
return {"
|
105 |
-
"
|
|
|
|
|
106 |
|
107 |
return batch_annotation
|
108 |
|
@@ -131,6 +123,16 @@ dataset = datasets.load_dataset('speech_commands', 'v0.01', split=_SPLIT)
|
|
131 |
|
132 |
|
133 |
labels = dataset.features["label"].names
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
|
136 |
# Let's have a look at all of the labels that we want to predict
|
@@ -138,23 +140,15 @@ labels = dataset.features["label"].names
|
|
138 |
print(labels)
|
139 |
|
140 |
|
141 |
-
# ### Compute probabilities and annotate dataset
|
142 |
-
|
143 |
-
# First, calculate logits per sample
|
144 |
|
145 |
|
146 |
-
# calculate logits for each sample and annotate
|
147 |
-
dataset_annotated =
|
148 |
|
149 |
|
150 |
# Now annotate labels and probabilities
|
151 |
-
|
152 |
-
dataset_annotated_complete = annotate_dataset(dataset_annotated, "MIT/ast-finetuned-speech-commands-v2")
|
153 |
-
|
154 |
-
|
155 |
-
# ### Compute embedding with vision transformer from Huggingface
|
156 |
-
|
157 |
-
dataset_enriched = huggingface_embedding(dataset_annotated_complete, "MIT/ast-finetuned-speech-commands-v2")
|
158 |
|
159 |
|
160 |
# ### Reduce embeddings for faster visualization
|
@@ -165,9 +159,6 @@ reduced_embedding = reducer.fit_transform(embeddings)
|
|
165 |
dataset_enriched = dataset_enriched.add_column("embedding_reduced", list(reduced_embedding))
|
166 |
|
167 |
|
168 |
-
print(dataset_enriched.features)
|
169 |
-
|
170 |
-
|
171 |
df = dataset_enriched.to_pandas()
|
172 |
|
173 |
|
|
|
5 |
#
|
6 |
# We use the Huggingface transformers library to create an embedding for a an audio dataset
|
7 |
#
|
|
|
|
|
8 |
|
9 |
# ## tldr; Play as callable functions
|
10 |
|
|
|
13 |
import datasets
|
14 |
from transformers import AutoFeatureExtractor, AutoModel, ASTForAudioClassification
|
15 |
import torch
|
|
|
16 |
import pandas as pd
|
17 |
import umap
|
18 |
import numpy as np
|
19 |
|
20 |
_SPLIT = "train"
|
21 |
+
_MODELNAME = "MIT/ast-finetuned-speech-commands-v2"
|
22 |
|
23 |
def __set_device():
|
24 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
26 |
torch.cuda.empty_cache()
|
27 |
return device
|
28 |
|
29 |
+
def __permute_model2dataset_probabilities(model_probabilities):
|
30 |
+
"""
|
31 |
+
Model and dataset int mapping is different. Therefore, we need to permute the model's
|
32 |
+
logits to match up to the dataset's order.
|
33 |
+
Due to small size of vector do this on cpu
|
34 |
+
"""
|
35 |
+
cpu_copy = model_probabilities.to("cpu").detach().numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
# only select model2dataset_truncated values as they map model ints to existing dataset ints
|
38 |
+
permuted_vector = np.empty(shape=(len(labels),))
|
39 |
+
# Problem: model output has more output classes than dataset
|
40 |
+
# Hence, only add those outputs that are mapped
|
41 |
+
for key in model2dataset_truncated:
|
42 |
+
dataset_int = model2dataset_truncated[key]
|
43 |
+
permuted_vector[dataset_int] = cpu_copy[key]
|
44 |
|
45 |
+
return permuted_vector
|
46 |
|
47 |
|
48 |
|
49 |
+
def batch_probabilities_and_embeddings(model, feature_extractor, classifier):
|
50 |
device = model.device
|
51 |
|
52 |
def processing(batch):
|
53 |
audios = [element["array"] for element in batch["audio"]]
|
54 |
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True, sampling_rate=16000).to(device)
|
55 |
outputs = model(**inputs)
|
56 |
+
|
57 |
+
embeddings = classifier(**inputs).last_hidden_state[:, 0].cpu()
|
58 |
+
return {"logits":torch.tensor([__permute_model2dataset_probabilities(logit) for logit in outputs.logits]), "embedding": embeddings}
|
59 |
return processing
|
60 |
|
61 |
|
62 |
+
def annotate_probabilities_and_embeddings(dataset, modelname, batched=True, batch_size= 8):
|
63 |
+
|
64 |
model = ASTForAudioClassification.from_pretrained(modelname)
|
65 |
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
|
66 |
+
classifier = AutoModel.from_pretrained(modelname, output_hidden_states=True)
|
67 |
|
68 |
device = __set_device()
|
69 |
+
calc_outputs = batch_probabilities_and_embeddings(model.to(device), feature_extractor, classifier.to(device))
|
70 |
output_dataset = dataset.map(calc_outputs, batched = batched, batch_size = batch_size)
|
71 |
|
72 |
return output_dataset
|
|
|
83 |
logits = [torch.tensor(element) for element in batch["logits"]]
|
84 |
probabilities_per_class = [torch.nn.functional.softmax(logit, dim=-1) for logit in logits]
|
85 |
predicted_class_ids = [torch.argmax(logit).item() for logit in logits]
|
86 |
+
|
87 |
+
# logits are already permuted to match dataset ordering -> no additional work needed
|
88 |
+
|
89 |
+
predicted_labels = [labels[predicted_class_id] for predicted_class_id in predicted_class_ids]
|
90 |
annotated_labels = [labels[element] for element in batch["label"]]
|
91 |
probabilities = []
|
92 |
for index, prob_per_class in enumerate(probabilities_per_class):
|
93 |
probabilities.append(prob_per_class[predicted_class_ids[index]].item())
|
94 |
+
return {"label_string": annotated_labels, "probability": probabilities,
|
95 |
+
"probability_vector": probabilities_per_class,
|
96 |
+
"prediction": predicted_class_ids,
|
97 |
+
"prediction_string": predicted_labels}
|
98 |
|
99 |
return batch_annotation
|
100 |
|
|
|
123 |
|
124 |
|
125 |
labels = dataset.features["label"].names
|
126 |
+
# use dict comprehension to build a dict that maps the dataset's string label to the dataset's int label
|
127 |
+
label_dict = {label: i for label, i in zip(labels, range(len(labels)))}
|
128 |
+
# {key: value for key, value in zip(keys, values)}
|
129 |
+
model = ASTForAudioClassification.from_pretrained(_MODELNAME)
|
130 |
+
# look up label from model int in the label dict and translate that label into the dataset int
|
131 |
+
model2dataset_int_conversion = {i: label_dict[model.config.id2label[i]] if model.config.id2label[i] in label_dict.keys()
|
132 |
+
else -1 for i in range(model.config.num_labels)}
|
133 |
+
|
134 |
+
model2dataset_truncated = {key:value for key,value in model2dataset_int_conversion.items() if value != -1}
|
135 |
+
|
136 |
|
137 |
|
138 |
# Let's have a look at all of the labels that we want to predict
|
|
|
140 |
print(labels)
|
141 |
|
142 |
|
143 |
+
# ### Compute probabilities and embeddings and annotate dataset
|
|
|
|
|
144 |
|
145 |
|
146 |
+
# calculate logits and embedding for each sample and annotate
|
147 |
+
dataset_annotated = annotate_probabilities_and_embeddings(dataset, _MODELNAME)
|
148 |
|
149 |
|
150 |
# Now annotate labels and probabilities
|
151 |
+
dataset_enriched = annotate_dataset(dataset_annotated, _MODELNAME)
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
|
154 |
# ### Reduce embeddings for faster visualization
|
|
|
159 |
dataset_enriched = dataset_enriched.add_column("embedding_reduced", list(reduced_embedding))
|
160 |
|
161 |
|
|
|
|
|
|
|
162 |
df = dataset_enriched.to_pandas()
|
163 |
|
164 |
|
data/dataset_audio_test_clipped.parquet.gzip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:334324791b5c45096971f7bbf6a05afa60160d9ff5a9fc966762b9c2923cff02
|
3 |
+
size 642187
|
data/dataset_audio_train_clipped.parquet.gzip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3fed567b3d361fcfff79ad59a2d45233ef663d965ef17a4d9f42d3bc9662693
|
3 |
+
size 7864308
|
data/dataset_audio_validation_clipped.parquet.gzip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0dfba31f1d0587ebb4b9cbeb96fd10ae93cac9036dc482f4802b1495ab6806c
|
3 |
+
size 1443196
|
speech_commands_enriched.py
CHANGED
@@ -168,10 +168,11 @@ class SpeechCommands(datasets.GeneratorBasedBuilder):
|
|
168 |
"speaker_id": datasets.Value("string"),
|
169 |
"utterance_id": datasets.Value("int8"),
|
170 |
#enriched features:
|
171 |
-
"
|
172 |
-
"
|
173 |
-
"
|
174 |
-
"
|
|
|
175 |
"embedding_reduced": datasets.Sequence(feature=datasets.Value("float32"), length=2),
|
176 |
}
|
177 |
),
|
@@ -258,14 +259,15 @@ class SpeechCommands(datasets.GeneratorBasedBuilder):
|
|
258 |
"speaker_id": speaker_id,
|
259 |
"utterance_id": utterance_id,
|
260 |
#enriched features:
|
261 |
-
"
|
262 |
-
"
|
263 |
-
"
|
264 |
-
"
|
|
|
265 |
"embedding_reduced": row[1]["embedding_reduced"]
|
266 |
}
|
267 |
|
268 |
#for debugging, comment out after
|
269 |
-
|
270 |
-
|
271 |
-
|
|
|
168 |
"speaker_id": datasets.Value("string"),
|
169 |
"utterance_id": datasets.Value("int8"),
|
170 |
#enriched features:
|
171 |
+
"label_string": datasets.Value("string"),
|
172 |
+
"probability": datasets.Value("float64"),
|
173 |
+
"probability_vector": datasets.Sequence(feature=datasets.Value("float64"), length=31),
|
174 |
+
"prediction": datasets.ClassLabel(names=self.config.labels),
|
175 |
+
"prediction_string":datasets.Value("string"),
|
176 |
"embedding_reduced": datasets.Sequence(feature=datasets.Value("float32"), length=2),
|
177 |
}
|
178 |
),
|
|
|
259 |
"speaker_id": speaker_id,
|
260 |
"utterance_id": utterance_id,
|
261 |
#enriched features:
|
262 |
+
"label_string": row[1]["label_string"],
|
263 |
+
"probability": row[1]["probability"],
|
264 |
+
"probability_vector": row[1]["probability_vector"],
|
265 |
+
"prediction": row[1]["prediction"],
|
266 |
+
"prediction_string": row[1]["prediction_string"],
|
267 |
"embedding_reduced": row[1]["embedding_reduced"]
|
268 |
}
|
269 |
|
270 |
#for debugging, comment out after
|
271 |
+
if __name__ == "__main__":
|
272 |
+
ds = datasets.load_dataset("speech_commands_enriched.py", 'v0.01', split="test",
|
273 |
+
streaming=False)
|