Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import torch | |
import torchaudio | |
from einops import rearrange | |
from stable_audio_tools import get_pretrained_model | |
from stable_audio_tools.inference.generation import generate_diffusion_cond | |
import gradio as gr | |
import os | |
from huggingface_hub import login | |
# Ordner für temporäre Dateien erstellen | |
os.makedirs('static', exist_ok=True) | |
# Authentifizierung | |
if os.environ.get("HUGGING_FACE_HUB_TOKEN"): | |
token = os.environ["HUGGING_FACE_HUB_TOKEN"].strip() | |
try: | |
login(token=token, add_to_git_credential=True) | |
except Exception as e: | |
print(f"Warnung: Login fehlgeschlagen - {str(e)}") | |
def generate_audio(prompt, duration=10, steps=50, cfg_scale=7): | |
try: | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Modell laden und zum Gerät verschieben | |
model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0") | |
model = model.to(device) | |
sample_rate = model_config["sample_rate"] | |
sample_size = model_config["sample_size"] | |
# Konditionierung einrichten | |
conditioning = [{ | |
"prompt": prompt, | |
"seconds_start": 0, | |
"seconds_total": duration # Keine Begrenzung mehr | |
}] | |
# Audio generieren mit anpassbaren Parametern | |
output = generate_diffusion_cond( | |
model, | |
steps=steps, # Keine Begrenzung mehr | |
cfg_scale=cfg_scale, | |
conditioning=conditioning, | |
sample_size=sample_size, | |
sigma_min=0.3, | |
sigma_max=500, | |
sampler_type="dpmpp-3m-sde", | |
device=device | |
) | |
# Audio verarbeiten | |
output = rearrange(output, "b d n -> d (b n)") | |
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu() | |
# Audio speichern | |
output_path = "static/generated_audio.wav" | |
torchaudio.save(output_path, output, model_config["sample_rate"]) | |
return output_path | |
except Exception as e: | |
print(f"Fehler bei der Audiogenerierung: {str(e)}") | |
raise e | |
# Benutzerdefiniertes CSS für besseres Aussehen | |
custom_css = """ | |
body { background-color: #f6f6f6; } | |
.gradio-container { max-width: 800px; margin: auto; } | |
""" | |
# Gradio Interface mit Blocks | |
with gr.Blocks(css=custom_css) as demo: | |
gr.Markdown("# Stable Audio Generator") | |
gr.Markdown("Generieren Sie Audio aus Textbeschreibungen mit Stable Audio 1.0") | |
with gr.Row(): | |
with gr.Column(): | |
prompt = gr.Textbox( | |
label="Prompt", | |
placeholder="Beschreiben Sie den gewünschten Sound..." | |
) | |
duration = gr.Slider( | |
minimum=1, maximum=30, value=10, | |
step=1, label="Dauer (Sekunden)" | |
) | |
steps = gr.Slider( | |
minimum=20, maximum=100, value=50, | |
step=5, label="Anzahl der Schritte" | |
) | |
cfg_scale = gr.Slider( | |
minimum=1, maximum=15, value=7, | |
step=0.5, label="CFG Scale" | |
) | |
generate_btn = gr.Button("Generieren") | |
with gr.Column(): | |
output = gr.Audio(label="Generiertes Audio", type="filepath") | |
generate_btn.click( | |
fn=generate_audio, | |
inputs=[prompt, duration, steps, cfg_scale], | |
outputs=output | |
) | |
if __name__ == "__main__": | |
demo.launch() |