fffiloni's picture
Update app.py
3c31edb
raw
history blame
1.22 kB
import gradio as gr
from huggingface_hub import snapshot_download
import numpy as np
from scipy.io import wavfile
model_ids = [
'suno/bark',
]
for model_id in model_ids:
model_name = model_id.split('/')[-1]
snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
from TTS.tts.configs.bark_config import BarkConfig
from TTS.tts.models.bark import Bark
config = BarkConfig()
model = Bark.init_from_config(config)
model.load_checkpoint(config, checkpoint_dir="checkpoints/bark", eval=True)
def infer(prompt):
text = "Hello, my name is Manmay , how are you?"
# with random speaker
#output_dict = model.synthesize(text, config, speaker_id="random", voice_dirs=None)
# cloning a speaker.
# It assumes that you have a speaker file in `bark_voices/speaker_n/speaker.wav` or `bark_voices/speaker_n/speaker.npz`
output_dict = model.synthesize(text, config, speaker_id="speaker", voice_dirs="bark_voices/")
print(output_dict)
sample_rate = 24000 # Replace with the actual sample rate
wavfile.write('output.wav', sample_rate, output_dict['wav'])
return "output.wav"
gr.Interface(fn=infer, inputs=[gr.Textbox()], outputs=[gr.Audio()]).launch()