Spaces:
Sleeping
Sleeping
File size: 5,509 Bytes
1ddb890 ac0c0ce 0ff04fc ac0c0ce 99f4e90 1ddb890 0ff04fc b072ff1 87ca550 1ddb890 b072ff1 87ca550 b072ff1 ff4e567 2a231ba ac0c0ce 2a231ba ac0c0ce ff4e567 ac0c0ce 87ca550 2a231ba ac0c0ce 5e53171 ac0c0ce 5e53171 ac0c0ce 87ca550 ac0c0ce 2a231ba ac0c0ce 1ddb890 99f4e90 ac0c0ce 1ddb890 85fcc07 0479c04 85fcc07 b072ff1 0479c04 99f4e90 b072ff1 ac0c0ce 0479c04 99f4e90 0479c04 eb814f3 b072ff1 99f4e90 b072ff1 eb814f3 b072ff1 1ddb890 7c43d6c ff064e2 7c43d6c ff064e2 b072ff1 ff064e2 7c43d6c 1ddb890 7c43d6c 1ddb890 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import gradio as gr
import os
import subprocess
import spaces
from typing import Tuple, List, Dict
from pydub import AudioSegment
@spaces.GPU
def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, gr.HTML]:
log_messages = []
def stream_log(message):
log_messages.append(f"[{model_name}] {message}")
return gr.HTML("<pre style='margin-bottom: 0;'>" + "<br>".join(log_messages) + "</pre>")
yield None, stream_log("Starting separation process...")
yield None, stream_log(f"Loading audio file: {audio_file}")
if audio_file is None:
yield None, stream_log("Error: No audio file provided")
raise gr.Error("Please upload an audio file")
output_dir = os.path.join("separated", model_name, os.path.splitext(os.path.basename(audio_file))[0])
os.makedirs(output_dir, exist_ok=True)
# Construct the Demucs command
cmd = [
"python", "-m", "demucs",
"--out", output_dir,
"-n", model_name,
audio_file
]
yield None, stream_log(f"Running Demucs command: {' '.join(cmd)}")
try:
# Run the Demucs command
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# Stream the output
for line in process.stdout:
yield None, stream_log(line.strip())
# Wait for the process to complete
process.wait()
if process.returncode != 0:
error_output = process.stderr.read()
yield None, stream_log(f"Error: Demucs command failed with return code {process.returncode}")
yield None, stream_log(f"Error output: {error_output}")
raise gr.Error(f"Demucs separation failed. Check the logs for details.")
except Exception as e:
yield None, stream_log(f"Unexpected error: {str(e)}")
raise gr.Error(f"An unexpected error occurred: {str(e)}")
yield None, stream_log("Separation completed. Processing stems...")
stems: Dict[str, str] = {}
for stem in ["vocals", "drums", "bass", "other"]:
stem_path = os.path.join(output_dir, model_name, f"{stem}.wav")
if os.path.exists(stem_path):
stems[stem] = stem_path
yield None, stream_log(f"Found {stem} stem")
selected_stems: List[str] = []
for stem, selected in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]):
if selected and stem in stems:
selected_stems.append(stems[stem])
yield None, stream_log(f"Selected {stem} stem for mixing")
if not selected_stems:
yield None, stream_log("Error: No stems selected for mixing")
raise gr.Error("Please select at least one stem to mix.")
output_file: str = os.path.join(output_dir, "mixed.wav")
yield None, stream_log("Mixing selected stems...")
if len(selected_stems) == 1:
os.rename(selected_stems[0], output_file)
else:
mixed_audio: AudioSegment = AudioSegment.empty()
for stem_path in selected_stems:
mixed_audio += AudioSegment.from_wav(stem_path)
mixed_audio.export(output_file, format="wav")
if mp3:
yield None, stream_log(f"Converting to MP3 (bitrate: {mp3_bitrate}k)...")
mp3_output_file: str = os.path.splitext(output_file)[0] + ".mp3"
mixed_audio.export(mp3_output_file, format="mp3", bitrate=str(mp3_bitrate) + "k")
output_file = mp3_output_file
yield None, stream_log("Process completed successfully!")
yield output_file, gr.HTML("<pre style='color: green;'>Separation and mixing completed successfully!</pre>")
# Define the Gradio interface
with gr.Blocks() as iface:
gr.Markdown("# Demucs Music Source Separation and Mixing")
gr.Markdown("Separate vocals, drums, bass, and other instruments from your music using Demucs and mix the selected stems.")
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
model_dropdown = gr.Dropdown(
["htdemucs", "htdemucs_ft", "htdemucs_6s", "hdemucs_mmi", "mdx", "mdx_extra", "mdx_q", "mdx_extra_q"],
label="Model Name",
value="htdemucs_ft"
)
with gr.Row():
vocals_checkbox = gr.Checkbox(label="Vocals", value=True)
drums_checkbox = gr.Checkbox(label="Drums", value=True)
with gr.Row():
bass_checkbox = gr.Checkbox(label="Bass", value=True)
other_checkbox = gr.Checkbox(label="Other", value=True)
mp3_checkbox = gr.Checkbox(label="Save as MP3", value=False)
mp3_bitrate = gr.Slider(128, 320, step=32, label="MP3 Bitrate", visible=False)
submit_btn = gr.Button("Process", variant="primary")
with gr.Column(scale=1):
output_audio = gr.Audio(type="filepath", label="Processed Audio")
separation_log = gr.HTML()
submit_btn.click(
fn=inference,
inputs=[audio_input, model_dropdown, vocals_checkbox, drums_checkbox, bass_checkbox, other_checkbox, mp3_checkbox, mp3_bitrate],
outputs=[output_audio, separation_log]
)
mp3_checkbox.change(
fn=lambda mp3: gr.update(visible=mp3),
inputs=mp3_checkbox,
outputs=mp3_bitrate
)
# Launch the Gradio interface
iface.launch()
|