Spaces:
Runtime error
Runtime error
File size: 5,627 Bytes
1f0ae0a 138b689 6999303 138b689 6999303 138b689 6999303 138b689 6999303 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import os
import gradio as gr
import torch
import pydub
import torchaudio
from torchaudio.sox_effects import apply_effects_tensor
import numpy as np
from transformers import AutoFeatureExtractor, AutoModelForAudioXVector
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_audio(file_name):
audio = pydub.AudioSegment.from_file(file_name)
arr = np.array(audio.get_array_of_samples(), dtype=np.float32)
arr = arr / (1 << (8 * audio.sample_width - 1))
return arr.astype(np.float32), audio.frame_rate
STYLE = """
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" integrity="sha256-YvdLHPgkqJ8DVUxjjnGVlMMJtNimJ6dYkowFFvp4kKs=" crossorigin="anonymous">
"""
OUTPUT_OK = (
STYLE
+ """
<div class="container">
<div class="row"><h1 style="text-align: center">The speakers are</h1></div>
<div class="row"><h1 class="display-1 text-success" style="text-align: center">{:.1f}%</h1></div>
<div class="row"><h1 style="text-align: center">similar</h1></div>
<div class="row"><h1 class="text-success" style="text-align: center">Welcome, human!</h1></div>
<div class="row"><small style="text-align: center">(You must get at least 85% to be considered the same person)</small><div class="row">
</div>
"""
)
OUTPUT_FAIL = (
STYLE
+ """
<div class="container">
<div class="row"><h1 style="text-align: center">The speakers are</h1></div>
<div class="row"><h1 class="display-1 text-danger" style="text-align: center">{:.1f}%</h1></div>
<div class="row"><h1 style="text-align: center">similar</h1></div>
<div class="row"><h1 class="text-danger" style="text-align: center">You shall not pass!</h1></div>
<div class="row"><small style="text-align: center">(You must get at least 85% to be considered the same person)</small><div class="row">
</div>
"""
)
EFFECTS = [
["remix", "-"],
["channels", "1"],
["rate", "16000"],
["gain", "-1.0"],
["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"],
["trim", "0", "10"],
]
THRESHOLD = 0.85
model_name = "microsoft/wavlm-base-plus-sv"
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)
cosine_sim = torch.nn.CosineSimilarity(dim=-1)
def similarity_fn(path1, path2):
if not (path1 and path2):
return '<b style="color:red">ERROR: Please record audio for *both* speakers!</b>'
wav1, sr1 = load_audio(path1)
print(wav1, wav1.shape, wav1.dtype)
wav1, _ = apply_effects_tensor(torch.tensor(wav1).unsqueeze(0), sr1, EFFECTS)
wav2, sr2 = load_audio(path2)
wav2, _ = apply_effects_tensor(torch.tensor(wav2).unsqueeze(0), sr2, EFFECTS)
print(wav1.shape, wav2.shape)
input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
with torch.no_grad():
emb1 = model(input1).embeddings
emb2 = model(input2).embeddings
emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()
emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()
similarity = cosine_sim(emb1, emb2).numpy()[0]
if similarity >= THRESHOLD:
output = OUTPUT_OK.format(similarity * 100)
else:
output = OUTPUT_FAIL.format(similarity * 100)
return output
inputs = [
gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #1"),
gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #2"),
]
output = gr.outputs.HTML(label="")
description = (
"This demo will compare two speech samples and determine if they are from the same speaker. "
"Try it with your own voice!"
)
article = (
"<p style='text-align: center'>"
"<a href='https://huggingface.co/microsoft/wavlm-base-plus-sv' target='_blank'>🎙️ Learn more about WavLM</a> | "
"<a href='https://arxiv.org/abs/2110.13900' target='_blank'>📚 WavLM paper</a> | "
"<a href='https://www.danielpovey.com/files/2018_icassp_xvectors.pdf' target='_blank'>📚 X-Vector paper</a>"
"</p>"
)
examples = [
["samples/cate_blanch.mp3", "samples/cate_blanch_2.mp3"],
["samples/cate_blanch.mp3", "samples/cate_blanch_3.mp3"],
["samples/cate_blanch_2.mp3", "samples/cate_blanch_3.mp3"],
["samples/heath_ledger.mp3", "samples/heath_ledger_3.mp3"],
["samples/russel_crowe.mp3", "samples/russel_crowe_2.mp3"],
["samples/cate_blanch.mp3", "samples/kirsten_dunst.wav"],
["samples/russel_crowe.mp3", "samples/kirsten_dunst.wav"],
["samples/russel_crowe_2.mp3", "samples/kirsten_dunst.wav"],
["samples/heath_ledger.mp3", "samples/denzel_washington.mp3"],
["samples/leonardo_dicaprio.mp3", "samples/russel_crowe.mp3"],
["samples/leonardo_dicaprio.mp3", "samples/russel_crowe_2.mp3"],
["samples/naomi_watts.mp3", "samples/denzel_washington.mp3"],
["samples/naomi_watts.mp3", "samples/leonardo_dicaprio.mp3"],
["samples/naomi_watts.mp3", "samples/cate_blanch_2.mp3"],
["samples/naomi_watts.mp3", "samples/kirsten_dunst.wav"],
]
interface = gr.Interface(
fn=similarity_fn,
inputs=inputs,
outputs=output,
title="Voice Authentication with WavLM + X-Vectors",
description=description,
article=article,
layout="horizontal",
theme="huggingface",
allow_flagging=False,
live=False,
examples=examples,
)
interface.launch(enable_queue=True)
|