Update README.md
Browse files
README.md
CHANGED
@@ -32,7 +32,7 @@ from transformers import pipeline
|
|
32 |
dataset = load_dataset("ashraq/esc50")
|
33 |
audio = dataset["train"]["audio"][-1]["array"]
|
34 |
|
35 |
-
audio_classifier = pipeline(task="zero-shot-audio-classification", model="
|
36 |
output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
|
37 |
print(output)
|
38 |
>>> [{"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}]
|
@@ -51,8 +51,8 @@ from transformers import ClapModel, ClapProcessor
|
|
51 |
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
52 |
audio_sample = librispeech_dummy[0]
|
53 |
|
54 |
-
model = ClapModel.from_pretrained("
|
55 |
-
processor = ClapProcessor.from_pretrained("
|
56 |
|
57 |
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt")
|
58 |
audio_embed = model.get_audio_features(**inputs)
|
@@ -67,8 +67,8 @@ from transformers import ClapModel, ClapProcessor
|
|
67 |
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
68 |
audio_sample = librispeech_dummy[0]
|
69 |
|
70 |
-
model = ClapModel.from_pretrained("
|
71 |
-
processor = ClapProcessor.from_pretrained("
|
72 |
|
73 |
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt").to(0)
|
74 |
audio_embed = model.get_audio_features(**inputs)
|
|
|
32 |
dataset = load_dataset("ashraq/esc50")
|
33 |
audio = dataset["train"]["audio"][-1]["array"]
|
34 |
|
35 |
+
audio_classifier = pipeline(task="zero-shot-audio-classification", model="laion/larger_clap_general")
|
36 |
output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
|
37 |
print(output)
|
38 |
>>> [{"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}]
|
|
|
51 |
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
52 |
audio_sample = librispeech_dummy[0]
|
53 |
|
54 |
+
model = ClapModel.from_pretrained("laion/larger_clap_general")
|
55 |
+
processor = ClapProcessor.from_pretrained("laion/larger_clap_general")
|
56 |
|
57 |
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt")
|
58 |
audio_embed = model.get_audio_features(**inputs)
|
|
|
67 |
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
68 |
audio_sample = librispeech_dummy[0]
|
69 |
|
70 |
+
model = ClapModel.from_pretrained("laion/larger_clap_general").to(0)
|
71 |
+
processor = ClapProcessor.from_pretrained("laion/larger_clap_general")
|
72 |
|
73 |
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt").to(0)
|
74 |
audio_embed = model.get_audio_features(**inputs)
|