Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,109 +1,115 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
import demucs.
|
4 |
-
import shlex
|
5 |
import os
|
6 |
import spaces
|
7 |
import subprocess
|
|
|
|
|
8 |
|
9 |
-
#
|
10 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
12 |
-
#
|
13 |
try:
|
14 |
subprocess.run(["sox", "--version"], check=True, capture_output=True)
|
15 |
except FileNotFoundError:
|
16 |
-
print("sox is not installed.
|
17 |
try:
|
18 |
subprocess.run(["apt-get", "update"], check=True)
|
19 |
subprocess.run(["apt-get", "install", "-y", "sox"], check=True)
|
20 |
print("sox has been installed.")
|
21 |
except subprocess.CalledProcessError as e:
|
22 |
-
print(f"
|
23 |
-
print("
|
24 |
print("deb http://deb.debian.org/debian stretch main contrib non-free")
|
25 |
exit(1)
|
26 |
|
27 |
-
#
|
28 |
@spaces.GPU
|
29 |
-
def inference(audio_file, model_name, vocals, drums, bass, other, mp3, mp3_bitrate):
|
30 |
"""
|
31 |
-
|
32 |
|
33 |
-
|
34 |
-
audio_file:
|
35 |
-
model_name:
|
36 |
-
vocals:
|
37 |
-
drums:
|
38 |
-
bass:
|
39 |
-
other:
|
40 |
-
mp3:
|
41 |
-
mp3_bitrate:
|
42 |
|
43 |
-
|
44 |
-
|
45 |
"""
|
46 |
|
47 |
-
#
|
48 |
-
|
49 |
-
if mp3:
|
50 |
-
cmd += f" --mp3 --mp3-bitrate={mp3_bitrate}"
|
51 |
-
cmd += f" --filename \"{audio_file}\""
|
52 |
|
53 |
-
#
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
57 |
|
58 |
-
#
|
59 |
-
output_dir = os.path.join("separated", model_name, os.path.splitext(os.path.basename(audio_file))[0])
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
|
67 |
-
#
|
68 |
-
selected_stems = [stems[stem] for stem, include in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]) if include]
|
69 |
if not selected_stems:
|
70 |
-
raise gr.Error("
|
71 |
|
72 |
-
output_file = os.path.join(output_dir, "mixed.wav")
|
73 |
if len(selected_stems) == 1:
|
74 |
-
#
|
75 |
os.rename(selected_stems[0], output_file)
|
76 |
else:
|
77 |
-
#
|
78 |
-
|
79 |
-
|
|
|
|
|
80 |
|
81 |
-
#
|
82 |
if mp3:
|
83 |
-
mp3_output_file = os.path.splitext(output_file)[0] + ".mp3"
|
84 |
-
|
85 |
-
|
86 |
-
output_file = mp3_output_file # Update output_file to the MP3 file
|
87 |
|
88 |
-
return output_file
|
89 |
|
90 |
-
#
|
91 |
-
iface = gr.Interface(
|
92 |
fn=inference,
|
93 |
inputs=[
|
94 |
gr.Audio(type="filepath"),
|
95 |
-
gr.Dropdown(["htdemucs", "htdemucs_ft", "htdemucs_6s", "hdemucs_mmi", "mdx", "mdx_extra", "mdx_q", "mdx_extra_q"], label="
|
96 |
-
gr.Checkbox(label="
|
97 |
-
gr.Checkbox(label="
|
98 |
-
gr.Checkbox(label="
|
99 |
-
gr.Checkbox(label="
|
100 |
-
gr.Checkbox(label="
|
101 |
-
gr.Slider(128, 320, step=32, label="
|
|
|
|
|
|
|
|
|
102 |
],
|
103 |
-
|
104 |
-
|
105 |
-
description="Separate vocals, drums, bass, and other instruments from your music using Demucs and mix the selected stems.",
|
106 |
)
|
107 |
|
108 |
-
#
|
|
|
|
|
|
|
109 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
import demucs.api
|
|
|
4 |
import os
|
5 |
import spaces
|
6 |
import subprocess
|
7 |
+
from pydub import AudioSegment
|
8 |
+
from typing import Tuple, Dict, List
|
9 |
|
10 |
+
# check if cuda is available
|
11 |
+
device: str = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
+
# check if sox is installed and install it if necessary
|
14 |
try:
|
15 |
subprocess.run(["sox", "--version"], check=True, capture_output=True)
|
16 |
except FileNotFoundError:
|
17 |
+
print("sox is not installed. trying to install it now...")
|
18 |
try:
|
19 |
subprocess.run(["apt-get", "update"], check=True)
|
20 |
subprocess.run(["apt-get", "install", "-y", "sox"], check=True)
|
21 |
print("sox has been installed.")
|
22 |
except subprocess.CalledProcessError as e:
|
23 |
+
print(f"error installing sox: {e}")
|
24 |
+
print("please install sox manually or try adding the following repository to your sources list:")
|
25 |
print("deb http://deb.debian.org/debian stretch main contrib non-free")
|
26 |
exit(1)
|
27 |
|
28 |
+
# define the inference function
|
29 |
@spaces.GPU
|
30 |
+
def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, str]:
|
31 |
"""
|
32 |
+
performs inference using demucs and mixes the selected stems.
|
33 |
|
34 |
+
args:
|
35 |
+
audio_file: the audio file to separate.
|
36 |
+
model_name: the name of the demucs model to use.
|
37 |
+
vocals: whether to include vocals in the mix.
|
38 |
+
drums: whether to include drums in the mix.
|
39 |
+
bass: whether to include bass in the mix.
|
40 |
+
other: whether to include other instruments in the mix.
|
41 |
+
mp3: whether to save the output as mp3.
|
42 |
+
mp3_bitrate: the bitrate of the output mp3 file.
|
43 |
|
44 |
+
returns:
|
45 |
+
a tuple containing the path to the mixed audio file and the separation log.
|
46 |
"""
|
47 |
|
48 |
+
# initialize demucs separator
|
49 |
+
separator: demucs.api.Separator = demucs.api.Separator(model=model_name)
|
|
|
|
|
|
|
50 |
|
51 |
+
# separate audio file and capture log
|
52 |
+
import io
|
53 |
+
log_stream = io.StringIO()
|
54 |
+
origin, separated = separator.separate_audio_file(audio_file, progress=True, log_stream=log_stream)
|
55 |
+
separation_log = log_stream.getvalue()
|
56 |
|
57 |
+
# get the output file paths
|
58 |
+
output_dir: str = os.path.join("separated", model_name, os.path.splitext(os.path.basename(audio_file))[0])
|
59 |
+
os.makedirs(output_dir, exist_ok=True) # create output directory if it doesn't exist
|
60 |
+
stems: Dict[str, str] = {}
|
61 |
+
for stem, source in separated.items():
|
62 |
+
stem_path: str = os.path.join(output_dir, f"{stem}.wav")
|
63 |
+
demucs.api.save_audio(source, stem_path, samplerate=separator.samplerate)
|
64 |
+
stems[stem] = stem_path
|
65 |
|
66 |
+
# mix the selected stems
|
67 |
+
selected_stems: List[str] = [stems[stem] for stem, include in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]) if include]
|
68 |
if not selected_stems:
|
69 |
+
raise gr.Error("please select at least one stem to mix.")
|
70 |
|
71 |
+
output_file: str = os.path.join(output_dir, "mixed.wav")
|
72 |
if len(selected_stems) == 1:
|
73 |
+
# if only one stem is selected, just copy it
|
74 |
os.rename(selected_stems[0], output_file)
|
75 |
else:
|
76 |
+
# otherwise, use pydub to mix the stems
|
77 |
+
mixed_audio: AudioSegment = AudioSegment.empty()
|
78 |
+
for stem_path in selected_stems:
|
79 |
+
mixed_audio += AudioSegment.from_wav(stem_path)
|
80 |
+
mixed_audio.export(output_file, format="wav")
|
81 |
|
82 |
+
# automatically convert to mp3 if requested
|
83 |
if mp3:
|
84 |
+
mp3_output_file: str = os.path.splitext(output_file)[0] + ".mp3"
|
85 |
+
mixed_audio.export(mp3_output_file, format="mp3", bitrate=str(mp3_bitrate) + "k")
|
86 |
+
output_file = mp3_output_file # update output_file to the mp3 file
|
|
|
87 |
|
88 |
+
return output_file, separation_log
|
89 |
|
90 |
+
# define the gradio interface
|
91 |
+
iface: gr.Interface = gr.Interface(
|
92 |
fn=inference,
|
93 |
inputs=[
|
94 |
gr.Audio(type="filepath"),
|
95 |
+
gr.Dropdown(["htdemucs", "htdemucs_ft", "htdemucs_6s", "hdemucs_mmi", "mdx", "mdx_extra", "mdx_q", "mdx_extra_q"], label="model name", value="htdemucs_ft"), # set default value
|
96 |
+
gr.Checkbox(label="vocals", value=True),
|
97 |
+
gr.Checkbox(label="drums", value=True),
|
98 |
+
gr.Checkbox(label="bass", value=True),
|
99 |
+
gr.Checkbox(label="other", value=True),
|
100 |
+
gr.Checkbox(label="save as mp3", value=False), # set default value to false
|
101 |
+
gr.Slider(128, 320, step=32, label="mp3 bitrate", visible=False), # set visible to false initially
|
102 |
+
],
|
103 |
+
outputs=[
|
104 |
+
gr.Audio(type="filepath"),
|
105 |
+
gr.Textbox(label="separation log", lines=10),
|
106 |
],
|
107 |
+
title="demucs music source separation and mixing",
|
108 |
+
description="separate vocals, drums, bass, and other instruments from your music using demucs and mix the selected stems.",
|
|
|
109 |
)
|
110 |
|
111 |
+
# make mp3 bitrate slider visible only when "save as mp3" is checked
|
112 |
+
iface.inputs[-2].change(fn=lambda mp3: gr.update(visible=mp3), inputs=iface.inputs[-2], outputs=iface.inputs[-1])
|
113 |
+
|
114 |
+
# launch the gradio interface
|
115 |
iface.launch()
|