Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ import time
|
|
13 |
console = Console()
|
14 |
|
15 |
@spaces.GPU
|
16 |
-
def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, gr.HTML]:
|
17 |
log_messages = []
|
18 |
|
19 |
def stream_log(message, style=""):
|
@@ -21,14 +21,14 @@ def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass:
|
|
21 |
log_messages.append(formatted_message)
|
22 |
return gr.HTML(f"<pre style='margin-bottom: 0;{style}'>{formatted_message}</pre>")
|
23 |
|
24 |
-
yield None, stream_log("Initializing Demucs...", "color: #4CAF50; font-weight: bold;")
|
25 |
time.sleep(1) # Simulate initialization time
|
26 |
|
27 |
-
yield None, stream_log("Loading audio file...", "color: #2196F3;")
|
28 |
time.sleep(0.5) # Simulate loading time
|
29 |
|
30 |
if audio_file is None:
|
31 |
-
yield None, stream_log("Error: No audio file provided", "color: #F44336;")
|
32 |
raise gr.Error("Please upload an audio file")
|
33 |
|
34 |
# Use absolute paths
|
@@ -44,7 +44,7 @@ def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass:
|
|
44 |
audio_file
|
45 |
]
|
46 |
|
47 |
-
yield None, stream_log("Preparing separation process...", "color: #FF9800;")
|
48 |
time.sleep(0.5) # Simulate preparation time
|
49 |
|
50 |
try:
|
@@ -68,85 +68,81 @@ def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass:
|
|
68 |
|
69 |
if process.returncode != 0:
|
70 |
error_output = process.stderr.read()
|
71 |
-
yield None, stream_log(f"Error: Separation failed", "color: #F44336;")
|
72 |
raise gr.Error(f"Demucs separation failed. Check the logs for details.")
|
73 |
|
74 |
except Exception as e:
|
75 |
-
yield None, stream_log(f"Unexpected error: {str(e)}", "color: #F44336;")
|
76 |
raise gr.Error(f"An unexpected error occurred: {str(e)}")
|
77 |
|
78 |
-
yield None, stream_log("Separation completed successfully!", "color: #4CAF50; font-weight: bold;")
|
79 |
time.sleep(0.5) # Pause for effect
|
80 |
|
81 |
-
yield None, stream_log("Processing stems...", "color: #9C27B0;")
|
82 |
time.sleep(0.5) # Simulate processing time
|
83 |
|
84 |
# Change the stem search directory using full path
|
85 |
stem_search_dir = os.path.join(base_output_dir, model_name, os.path.splitext(os.path.basename(audio_file))[0])
|
86 |
-
yield None, stream_log(f"Searching for stems in: {stem_search_dir}")
|
87 |
|
88 |
stems: Dict[str, str] = {}
|
89 |
for stem in ["vocals", "drums", "bass", "other"]:
|
90 |
stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
|
91 |
-
yield None, stream_log(f"Checking for {stem} stem at: {stem_path}")
|
92 |
if os.path.exists(stem_path):
|
93 |
stems[stem] = stem_path
|
94 |
-
yield None, stream_log(f"Found {stem} stem")
|
95 |
else:
|
96 |
-
yield None, stream_log(f"Warning: {stem} stem not found")
|
97 |
|
98 |
if not stems:
|
99 |
-
yield None, stream_log("Error: No stems found. Checking alternative directory...")
|
100 |
stem_search_dir = os.path.join(base_output_dir, model_name)
|
101 |
for stem in ["vocals", "drums", "bass", "other"]:
|
102 |
stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
|
103 |
-
yield None, stream_log(f"Checking for {stem} stem at: {stem_path}")
|
104 |
if os.path.exists(stem_path):
|
105 |
stems[stem] = stem_path
|
106 |
-
yield None, stream_log(f"Found {stem} stem")
|
107 |
else:
|
108 |
-
yield None, stream_log(f"Warning: {stem} stem not found")
|
109 |
|
110 |
-
yield None, stream_log(f"All found stems: {list(stems.keys())}")
|
111 |
|
112 |
selected_stems: List[str] = []
|
113 |
for stem, selected in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]):
|
114 |
if selected:
|
115 |
-
yield None, stream_log(f"{stem} is selected by user")
|
116 |
if stem in stems:
|
117 |
selected_stems.append(stems[stem])
|
118 |
-
yield None, stream_log(f"Selected {stem} stem for mixing")
|
119 |
else:
|
120 |
-
yield None, stream_log(f"Warning: {stem} was selected but not found")
|
121 |
|
122 |
-
yield None, stream_log(f"Final selected stems: {selected_stems}")
|
123 |
|
124 |
if not selected_stems:
|
125 |
-
yield None, stream_log("Error: No stems selected for mixing", "color: #F44336;")
|
126 |
raise gr.Error("Please select at least one stem to mix and ensure it was successfully separated.")
|
127 |
|
128 |
output_file: str = os.path.join(output_dir, "mixed.wav")
|
129 |
-
yield None, stream_log("Mixing selected stems...", "color: #FF5722;")
|
130 |
time.sleep(0.5) # Simulate mixing time
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
mixed_audio.
|
135 |
-
|
136 |
-
mixed_audio: AudioSegment = AudioSegment.empty()
|
137 |
-
for stem_path in selected_stems:
|
138 |
-
mixed_audio += AudioSegment.from_wav(stem_path)
|
139 |
-
mixed_audio.export(output_file, format="wav")
|
140 |
|
141 |
if mp3:
|
142 |
-
yield None, stream_log(f"Converting to MP3...", "color: #795548;")
|
143 |
time.sleep(0.5) # Simulate conversion time
|
144 |
mp3_output_file: str = os.path.splitext(output_file)[0] + ".mp3"
|
145 |
mixed_audio.export(mp3_output_file, format="mp3", bitrate=str(mp3_bitrate) + "k")
|
146 |
output_file = mp3_output_file
|
147 |
|
148 |
-
yield None, stream_log("Process completed successfully!", "color: #4CAF50; font-weight: bold;")
|
149 |
-
yield output_file, gr.HTML(
|
150 |
Panel.fit(
|
151 |
Text("Separation and mixing completed successfully!", style="bold green"),
|
152 |
title="Demucs Result",
|
@@ -178,13 +174,14 @@ with gr.Blocks() as iface:
|
|
178 |
submit_btn = gr.Button("Process", variant="primary")
|
179 |
|
180 |
with gr.Column(scale=1):
|
181 |
-
output_audio = gr.Audio(type="filepath", label="Processed Audio")
|
|
|
182 |
separation_log = gr.HTML()
|
183 |
|
184 |
submit_btn.click(
|
185 |
fn=inference,
|
186 |
inputs=[audio_input, model_dropdown, vocals_checkbox, drums_checkbox, bass_checkbox, other_checkbox, mp3_checkbox, mp3_bitrate],
|
187 |
-
outputs=[output_audio, separation_log]
|
188 |
)
|
189 |
|
190 |
mp3_checkbox.change(
|
|
|
13 |
console = Console()
|
14 |
|
15 |
@spaces.GPU
|
16 |
+
def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass: bool, other: bool, mp3: bool, mp3_bitrate: int) -> Tuple[str, List[str], gr.HTML]:
|
17 |
log_messages = []
|
18 |
|
19 |
def stream_log(message, style=""):
|
|
|
21 |
log_messages.append(formatted_message)
|
22 |
return gr.HTML(f"<pre style='margin-bottom: 0;{style}'>{formatted_message}</pre>")
|
23 |
|
24 |
+
yield None, None, stream_log("Initializing Demucs...", "color: #4CAF50; font-weight: bold;")
|
25 |
time.sleep(1) # Simulate initialization time
|
26 |
|
27 |
+
yield None, None, stream_log("Loading audio file...", "color: #2196F3;")
|
28 |
time.sleep(0.5) # Simulate loading time
|
29 |
|
30 |
if audio_file is None:
|
31 |
+
yield None, None, stream_log("Error: No audio file provided", "color: #F44336;")
|
32 |
raise gr.Error("Please upload an audio file")
|
33 |
|
34 |
# Use absolute paths
|
|
|
44 |
audio_file
|
45 |
]
|
46 |
|
47 |
+
yield None, None, stream_log("Preparing separation process...", "color: #FF9800;")
|
48 |
time.sleep(0.5) # Simulate preparation time
|
49 |
|
50 |
try:
|
|
|
68 |
|
69 |
if process.returncode != 0:
|
70 |
error_output = process.stderr.read()
|
71 |
+
yield None, None, stream_log(f"Error: Separation failed", "color: #F44336;")
|
72 |
raise gr.Error(f"Demucs separation failed. Check the logs for details.")
|
73 |
|
74 |
except Exception as e:
|
75 |
+
yield None, None, stream_log(f"Unexpected error: {str(e)}", "color: #F44336;")
|
76 |
raise gr.Error(f"An unexpected error occurred: {str(e)}")
|
77 |
|
78 |
+
yield None, None, stream_log("Separation completed successfully!", "color: #4CAF50; font-weight: bold;")
|
79 |
time.sleep(0.5) # Pause for effect
|
80 |
|
81 |
+
yield None, None, stream_log("Processing stems...", "color: #9C27B0;")
|
82 |
time.sleep(0.5) # Simulate processing time
|
83 |
|
84 |
# Change the stem search directory using full path
|
85 |
stem_search_dir = os.path.join(base_output_dir, model_name, os.path.splitext(os.path.basename(audio_file))[0])
|
86 |
+
yield None, None, stream_log(f"Searching for stems in: {stem_search_dir}")
|
87 |
|
88 |
stems: Dict[str, str] = {}
|
89 |
for stem in ["vocals", "drums", "bass", "other"]:
|
90 |
stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
|
91 |
+
yield None, None, stream_log(f"Checking for {stem} stem at: {stem_path}")
|
92 |
if os.path.exists(stem_path):
|
93 |
stems[stem] = stem_path
|
94 |
+
yield None, None, stream_log(f"Found {stem} stem")
|
95 |
else:
|
96 |
+
yield None, None, stream_log(f"Warning: {stem} stem not found")
|
97 |
|
98 |
if not stems:
|
99 |
+
yield None, None, stream_log("Error: No stems found. Checking alternative directory...")
|
100 |
stem_search_dir = os.path.join(base_output_dir, model_name)
|
101 |
for stem in ["vocals", "drums", "bass", "other"]:
|
102 |
stem_path = os.path.join(stem_search_dir, f"{stem}.wav")
|
103 |
+
yield None, None, stream_log(f"Checking for {stem} stem at: {stem_path}")
|
104 |
if os.path.exists(stem_path):
|
105 |
stems[stem] = stem_path
|
106 |
+
yield None, None, stream_log(f"Found {stem} stem")
|
107 |
else:
|
108 |
+
yield None, None, stream_log(f"Warning: {stem} stem not found")
|
109 |
|
110 |
+
yield None, None, stream_log(f"All found stems: {list(stems.keys())}")
|
111 |
|
112 |
selected_stems: List[str] = []
|
113 |
for stem, selected in zip(["vocals", "drums", "bass", "other"], [vocals, drums, bass, other]):
|
114 |
if selected:
|
115 |
+
yield None, None, stream_log(f"{stem} is selected by user")
|
116 |
if stem in stems:
|
117 |
selected_stems.append(stems[stem])
|
118 |
+
yield None, None, stream_log(f"Selected {stem} stem for mixing")
|
119 |
else:
|
120 |
+
yield None, None, stream_log(f"Warning: {stem} was selected but not found")
|
121 |
|
122 |
+
yield None, None, stream_log(f"Final selected stems: {selected_stems}")
|
123 |
|
124 |
if not selected_stems:
|
125 |
+
yield None, None, stream_log("Error: No stems selected for mixing", "color: #F44336;")
|
126 |
raise gr.Error("Please select at least one stem to mix and ensure it was successfully separated.")
|
127 |
|
128 |
output_file: str = os.path.join(output_dir, "mixed.wav")
|
129 |
+
yield None, None, stream_log("Mixing selected stems...", "color: #FF5722;")
|
130 |
time.sleep(0.5) # Simulate mixing time
|
131 |
|
132 |
+
mixed_audio: AudioSegment = AudioSegment.empty()
|
133 |
+
for stem_path in selected_stems:
|
134 |
+
mixed_audio += AudioSegment.from_wav(stem_path)
|
135 |
+
mixed_audio.export(output_file, format="wav")
|
|
|
|
|
|
|
|
|
136 |
|
137 |
if mp3:
|
138 |
+
yield None, None, stream_log(f"Converting to MP3...", "color: #795548;")
|
139 |
time.sleep(0.5) # Simulate conversion time
|
140 |
mp3_output_file: str = os.path.splitext(output_file)[0] + ".mp3"
|
141 |
mixed_audio.export(mp3_output_file, format="mp3", bitrate=str(mp3_bitrate) + "k")
|
142 |
output_file = mp3_output_file
|
143 |
|
144 |
+
yield None, None, stream_log("Process completed successfully!", "color: #4CAF50; font-weight: bold;")
|
145 |
+
yield output_file, list(stems.values()), gr.HTML(
|
146 |
Panel.fit(
|
147 |
Text("Separation and mixing completed successfully!", style="bold green"),
|
148 |
title="Demucs Result",
|
|
|
174 |
submit_btn = gr.Button("Process", variant="primary")
|
175 |
|
176 |
with gr.Column(scale=1):
|
177 |
+
output_audio = gr.Audio(type="filepath", label="Processed Audio (Mixed)")
|
178 |
+
stems_output = gr.File(label="Individual Stems", file_count="multiple")
|
179 |
separation_log = gr.HTML()
|
180 |
|
181 |
submit_btn.click(
|
182 |
fn=inference,
|
183 |
inputs=[audio_input, model_dropdown, vocals_checkbox, drums_checkbox, bass_checkbox, other_checkbox, mp3_checkbox, mp3_bitrate],
|
184 |
+
outputs=[output_audio, stems_output, separation_log]
|
185 |
)
|
186 |
|
187 |
mp3_checkbox.change(
|