r0seyyyd33p commited on
Commit
00c0f7a
1 Parent(s): 9efc117

Upload infer-web.py

Browse files
Files changed (1) hide show
  1. infer-web.py +1519 -0
infer-web.py ADDED
@@ -0,0 +1,1519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from multiprocessing import cpu_count
2
+ import threading,pdb,librosa
3
+ from time import sleep
4
+ from subprocess import Popen
5
+ from time import sleep
6
+ import torch, os, traceback, sys, warnings, shutil, numpy as np
7
+ import faiss
8
+ from random import shuffle
9
+ now_dir = os.getcwd()
10
+ sys.path.append(now_dir)
11
+ tmp = os.path.join(now_dir, "TEMP")
12
+ shutil.rmtree(tmp, ignore_errors=True)
13
+ os.makedirs(tmp, exist_ok=True)
14
+ os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
15
+ os.makedirs(os.path.join(now_dir, "weights"), exist_ok=True)
16
+ os.environ["TEMP"] = tmp
17
+ warnings.filterwarnings("ignore")
18
+ torch.manual_seed(114514)
19
+ from i18n import I18nAuto
20
+ import ffmpeg
21
+
22
+ i18n = I18nAuto()
23
+ # 判断是否有能用来训练和加速推理的N卡
24
+ ncpu = cpu_count()
25
+ ngpu = torch.cuda.device_count()
26
+ gpu_infos = []
27
+ mem=[]
28
+ if (not torch.cuda.is_available()) or ngpu == 0:
29
+ if_gpu_ok = False
30
+ else:
31
+ if_gpu_ok = False
32
+ for i in range(ngpu):
33
+ gpu_name = torch.cuda.get_device_name(i)
34
+ if (
35
+ "10" in gpu_name
36
+ or "20" in gpu_name
37
+ or "30" in gpu_name
38
+ or "40" in gpu_name
39
+ or "A2" in gpu_name.upper()
40
+ or "A3" in gpu_name.upper()
41
+ or "A4" in gpu_name.upper()
42
+ or "P4" in gpu_name.upper()
43
+ or "A50" in gpu_name.upper()
44
+ or "70" in gpu_name
45
+ or "80" in gpu_name
46
+ or "90" in gpu_name
47
+ or "M4" in gpu_name.upper()
48
+ or "T4" in gpu_name.upper()
49
+ or "TITAN" in gpu_name.upper()
50
+ ): # A10#A100#V100#A40#P40#M40#K80#A4500
51
+ if_gpu_ok = True # 至少有一张能用的N卡
52
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
53
+ mem.append(int(torch.cuda.get_device_properties(i).total_memory/1024/1024/1024+0.4))
54
+ if if_gpu_ok == True and len(gpu_infos) > 0:
55
+ gpu_info ="\n".join(gpu_infos)
56
+ default_batch_size=min(mem)//2
57
+ else:
58
+ gpu_info = "很遗憾您这没有能用的显卡来支持您训练"
59
+ default_batch_size=1
60
+ gpus = "-".join([i[0] for i in gpu_infos])
61
+ from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
62
+ from scipy.io import wavfile
63
+ from fairseq import checkpoint_utils
64
+ import gradio as gr
65
+ import logging
66
+ from vc_infer_pipeline import VC
67
+ from config import (
68
+ is_half,
69
+ device,
70
+ python_cmd,
71
+ listen_port,
72
+ iscolab,
73
+ noparallel,
74
+ noautoopen,
75
+ )
76
+ from infer_uvr5 import _audio_pre_
77
+ from my_utils import load_audio
78
+ from train.process_ckpt import show_info, change_info, merge, extract_small_model
79
+
80
+ # from trainset_preprocess_pipeline import PreProcess
81
+ logging.getLogger("numba").setLevel(logging.WARNING)
82
+
83
+
84
+ class ToolButton(gr.Button, gr.components.IOComponent):
85
+ """Small button with single emoji as text, fits inside gradio forms"""
86
+
87
+ def __init__(self, **kwargs):
88
+ super().__init__(variant="tool", **kwargs)
89
+
90
+ def get_block_name(self):
91
+ return "button"
92
+
93
+
94
+ hubert_model = None
95
+
96
+
97
+ def load_hubert():
98
+ global hubert_model
99
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
100
+ ["hubert_base.pt"],
101
+ suffix="",
102
+ )
103
+ hubert_model = models[0]
104
+ hubert_model = hubert_model.to(device)
105
+ if is_half:
106
+ hubert_model = hubert_model.half()
107
+ else:
108
+ hubert_model = hubert_model.float()
109
+ hubert_model.eval()
110
+
111
+
112
+ weight_root = "weights"
113
+ weight_uvr5_root = "uvr5_weights"
114
+ names = []
115
+ for name in os.listdir(weight_root):
116
+ if name.endswith(".pth"):
117
+ names.append(name)
118
+ uvr5_names = []
119
+ for name in os.listdir(weight_uvr5_root):
120
+ if name.endswith(".pth"):
121
+ uvr5_names.append(name.replace(".pth", ""))
122
+
123
+
124
+ def vc_single(
125
+ sid,
126
+ input_audio,
127
+ f0_up_key,
128
+ f0_file,
129
+ f0_method,
130
+ file_index,
131
+ # file_big_npy,
132
+ index_rate,
133
+ ): # spk_item, input_audio0, vc_transform0,f0_file,f0method0
134
+ global tgt_sr, net_g, vc, hubert_model
135
+ if input_audio is None:
136
+ return "You need to upload an audio", None
137
+ f0_up_key = int(f0_up_key)
138
+ try:
139
+ audio = load_audio(input_audio, 16000)
140
+ times = [0, 0, 0]
141
+ if hubert_model == None:
142
+ load_hubert()
143
+ if_f0 = cpt.get("f0", 1)
144
+ file_index = (
145
+ file_index.strip(" ")
146
+ .strip('"')
147
+ .strip("\n")
148
+ .strip('"')
149
+ .strip(" ")
150
+ .replace("trained", "added")
151
+ ) # 防止小白写错,自动帮他替换掉
152
+ # file_big_npy = (
153
+ # file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
154
+ # )
155
+ audio_opt = vc.pipeline(
156
+ hubert_model,
157
+ net_g,
158
+ sid,
159
+ audio,
160
+ times,
161
+ f0_up_key,
162
+ f0_method,
163
+ file_index,
164
+ # file_big_npy,
165
+ index_rate,
166
+ if_f0,
167
+ f0_file=f0_file,
168
+ )
169
+ print(
170
+ "npy: ", times[0], "s, f0: ", times[1], "s, infer: ", times[2], "s", sep=""
171
+ )
172
+ return "Success", (tgt_sr, audio_opt)
173
+ except:
174
+ info = traceback.format_exc()
175
+ print(info)
176
+ return info, (None, None)
177
+
178
+
179
+ def vc_multi(
180
+ sid,
181
+ dir_path,
182
+ opt_root,
183
+ paths,
184
+ f0_up_key,
185
+ f0_method,
186
+ file_index,
187
+ # file_big_npy,
188
+ index_rate,
189
+ ):
190
+ try:
191
+ dir_path = (
192
+ dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
193
+ ) # 防止小白拷路径头尾带了空格和"和回车
194
+ opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
195
+ os.makedirs(opt_root, exist_ok=True)
196
+ try:
197
+ if dir_path != "":
198
+ paths = [os.path.join(dir_path, name) for name in os.listdir(dir_path)]
199
+ else:
200
+ paths = [path.name for path in paths]
201
+ except:
202
+ traceback.print_exc()
203
+ paths = [path.name for path in paths]
204
+ infos = []
205
+ file_index = (
206
+ file_index.strip(" ")
207
+ .strip('"')
208
+ .strip("\n")
209
+ .strip('"')
210
+ .strip(" ")
211
+ .replace("trained", "added")
212
+ ) # 防止小白写错,自动帮他替换掉
213
+ for path in paths:
214
+ info, opt = vc_single(
215
+ sid,
216
+ path,
217
+ f0_up_key,
218
+ None,
219
+ f0_method,
220
+ file_index,
221
+ # file_big_npy,
222
+ index_rate,
223
+ )
224
+ if info == "Success":
225
+ try:
226
+ tgt_sr, audio_opt = opt
227
+ wavfile.write(
228
+ "%s/%s" % (opt_root, os.path.basename(path)), tgt_sr, audio_opt
229
+ )
230
+ except:
231
+ info = traceback.format_exc()
232
+ infos.append("%s->%s" % (os.path.basename(path), info))
233
+ yield "\n".join(infos)
234
+ yield "\n".join(infos)
235
+ except:
236
+ yield traceback.format_exc()
237
+
238
+
239
+ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins,agg):
240
+ infos = []
241
+ try:
242
+ inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
243
+ save_root_vocal = (
244
+ save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
245
+ )
246
+ save_root_ins = (
247
+ save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
248
+ )
249
+ pre_fun = _audio_pre_(
250
+ agg=int(agg),
251
+ model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
252
+ device=device,
253
+ is_half=is_half,
254
+ )
255
+ if inp_root != "":
256
+ paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
257
+ else:
258
+ paths = [path.name for path in paths]
259
+ for path in paths:
260
+ inp_path = os.path.join(inp_root, path)
261
+ need_reformat=1
262
+ done=0
263
+ try:
264
+ info = ffmpeg.probe(inp_path, cmd="ffprobe")
265
+ if(info["streams"][0]["channels"]==2 and info["streams"][0]["sample_rate"]=="44100"):
266
+ need_reformat=0
267
+ pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal)
268
+ done=1
269
+ except:
270
+ need_reformat = 1
271
+ traceback.print_exc()
272
+ if(need_reformat==1):
273
+ tmp_path="%s/%s.reformatted.wav"%(tmp,os.path.basename(inp_path))
274
+ os.system("ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"%(inp_path,tmp_path))
275
+ inp_path=tmp_path
276
+ try:
277
+ if(done==0):pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal)
278
+ infos.append("%s->Success" % (os.path.basename(inp_path)))
279
+ yield "\n".join(infos)
280
+ except:
281
+ infos.append(
282
+ "%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
283
+ )
284
+ yield "\n".join(infos)
285
+ except:
286
+ infos.append(traceback.format_exc())
287
+ yield "\n".join(infos)
288
+ finally:
289
+ try:
290
+ del pre_fun.model
291
+ del pre_fun
292
+ except:
293
+ traceback.print_exc()
294
+ print("clean_empty_cache")
295
+ if torch.cuda.is_available():
296
+ torch.cuda.empty_cache()
297
+ yield "\n".join(infos)
298
+
299
+
300
+ # 一个选项卡全局只能有一个音色
301
+ def get_vc(sid):
302
+ global n_spk, tgt_sr, net_g, vc, cpt
303
+ if sid == []:
304
+ global hubert_model
305
+ if hubert_model != None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
306
+ print("clean_empty_cache")
307
+ del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt
308
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
309
+ if torch.cuda.is_available():
310
+ torch.cuda.empty_cache()
311
+ ###楼下不这么折腾清理不干净
312
+ if_f0 = cpt.get("f0", 1)
313
+ if if_f0 == 1:
314
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
315
+ else:
316
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
317
+ del net_g, cpt
318
+ if torch.cuda.is_available():
319
+ torch.cuda.empty_cache()
320
+ cpt = None
321
+ return {"visible": False, "__type__": "update"}
322
+ person = "%s/%s" % (weight_root, sid)
323
+ print("loading %s" % person)
324
+ cpt = torch.load(person, map_location="cpu")
325
+ tgt_sr = cpt["config"][-1]
326
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
327
+ if_f0 = cpt.get("f0", 1)
328
+ if if_f0 == 1:
329
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
330
+ else:
331
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
332
+ del net_g.enc_q
333
+ print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
334
+ net_g.eval().to(device)
335
+ if is_half:
336
+ net_g = net_g.half()
337
+ else:
338
+ net_g = net_g.float()
339
+ vc = VC(tgt_sr, device, is_half)
340
+ n_spk = cpt["config"][-3]
341
+ return {"visible": True, "maximum": n_spk, "__type__": "update"}
342
+
343
+
344
+ def change_choices():
345
+ names = []
346
+ for name in os.listdir(weight_root):
347
+ if name.endswith(".pth"):
348
+ names.append(name)
349
+ return {"choices": sorted(names), "__type__": "update"}
350
+
351
+
352
+ def clean():
353
+ return {"value": "", "__type__": "update"}
354
+
355
+
356
+ def change_f0(if_f0_3, sr2): # np7, f0method8,pretrained_G14,pretrained_D15
357
+ if if_f0_3 == "是":
358
+ return (
359
+ {"visible": True, "__type__": "update"},
360
+ {"visible": True, "__type__": "update"},
361
+ "pretrained/f0G%s.pth" % sr2,
362
+ "pretrained/f0D%s.pth" % sr2,
363
+ )
364
+ return (
365
+ {"visible": False, "__type__": "update"},
366
+ {"visible": False, "__type__": "update"},
367
+ "pretrained/G%s.pth" % sr2,
368
+ "pretrained/D%s.pth" % sr2,
369
+ )
370
+
371
+
372
+ sr_dict = {
373
+ "32k": 32000,
374
+ "40k": 40000,
375
+ "48k": 48000,
376
+ }
377
+
378
+
379
+ def if_done(done, p):
380
+ while 1:
381
+ if p.poll() == None:
382
+ sleep(0.5)
383
+ else:
384
+ break
385
+ done[0] = True
386
+
387
+
388
+ def if_done_multi(done, ps):
389
+ while 1:
390
+ # poll==None代表进程未结束
391
+ # 只要有一个进程未结束都不停
392
+ flag = 1
393
+ for p in ps:
394
+ if p.poll() == None:
395
+ flag = 0
396
+ sleep(0.5)
397
+ break
398
+ if flag == 1:
399
+ break
400
+ done[0] = True
401
+
402
+
403
+ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p=ncpu):
404
+ sr = sr_dict[sr]
405
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
406
+ f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w")
407
+ f.close()
408
+ cmd = (
409
+ python_cmd
410
+ + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "
411
+ % (trainset_dir, sr, n_p, now_dir, exp_dir)
412
+ + str(noparallel)
413
+ )
414
+ print(cmd)
415
+ p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
416
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
417
+ done = [False]
418
+ threading.Thread(
419
+ target=if_done,
420
+ args=(
421
+ done,
422
+ p,
423
+ ),
424
+ ).start()
425
+ while 1:
426
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
427
+ yield (f.read())
428
+ sleep(1)
429
+ if done[0] == True:
430
+ break
431
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
432
+ log = f.read()
433
+ print(log)
434
+ yield log
435
+
436
+
437
+ # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
438
+ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir):
439
+ gpus = gpus.split("-")
440
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
441
+ f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w")
442
+ f.close()
443
+ if if_f0 == "是":
444
+ cmd = python_cmd + " extract_f0_print.py %s/logs/%s %s %s" % (
445
+ now_dir,
446
+ exp_dir,
447
+ n_p,
448
+ f0method,
449
+ )
450
+ print(cmd)
451
+ p = Popen(cmd, shell=True, cwd=now_dir) # , stdin=PIPE, stdout=PIPE,stderr=PIPE
452
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
453
+ done = [False]
454
+ threading.Thread(
455
+ target=if_done,
456
+ args=(
457
+ done,
458
+ p,
459
+ ),
460
+ ).start()
461
+ while 1:
462
+ with open(
463
+ "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r"
464
+ ) as f:
465
+ yield (f.read())
466
+ sleep(1)
467
+ if done[0] == True:
468
+ break
469
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
470
+ log = f.read()
471
+ print(log)
472
+ yield log
473
+ ####对不同part分别开多进程
474
+ """
475
+ n_part=int(sys.argv[1])
476
+ i_part=int(sys.argv[2])
477
+ i_gpu=sys.argv[3]
478
+ exp_dir=sys.argv[4]
479
+ os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
480
+ """
481
+ leng = len(gpus)
482
+ ps = []
483
+ for idx, n_g in enumerate(gpus):
484
+ cmd = python_cmd + " extract_feature_print.py %s %s %s %s %s/logs/%s" % (
485
+ device,
486
+ leng,
487
+ idx,
488
+ n_g,
489
+ now_dir,
490
+ exp_dir,
491
+ )
492
+ print(cmd)
493
+ p = Popen(
494
+ cmd, shell=True, cwd=now_dir
495
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
496
+ ps.append(p)
497
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
498
+ done = [False]
499
+ threading.Thread(
500
+ target=if_done_multi,
501
+ args=(
502
+ done,
503
+ ps,
504
+ ),
505
+ ).start()
506
+ while 1:
507
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
508
+ yield (f.read())
509
+ sleep(1)
510
+ if done[0] == True:
511
+ break
512
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
513
+ log = f.read()
514
+ print(log)
515
+ yield log
516
+
517
+
518
+ def change_sr2(sr2, if_f0_3):
519
+ if if_f0_3 == "是":
520
+ return "pretrained/f0G%s.pth" % sr2, "pretrained/f0D%s.pth" % sr2
521
+ else:
522
+ return "pretrained/G%s.pth" % sr2, "pretrained/D%s.pth" % sr2
523
+
524
+
525
+ # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
526
+ def click_train(
527
+ exp_dir1,
528
+ sr2,
529
+ if_f0_3,
530
+ spk_id5,
531
+ save_epoch10,
532
+ total_epoch11,
533
+ batch_size12,
534
+ if_save_latest13,
535
+ pretrained_G14,
536
+ pretrained_D15,
537
+ gpus16,
538
+ if_cache_gpu17,
539
+ ):
540
+ # 生成filelist
541
+ exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
542
+ os.makedirs(exp_dir, exist_ok=True)
543
+ gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
544
+ co256_dir = "%s/3_feature256" % (exp_dir)
545
+ if if_f0_3 == "是":
546
+ f0_dir = "%s/2a_f0" % (exp_dir)
547
+ f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
548
+ names = (
549
+ set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
550
+ & set([name.split(".")[0] for name in os.listdir(co256_dir)])
551
+ & set([name.split(".")[0] for name in os.listdir(f0_dir)])
552
+ & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
553
+ )
554
+ else:
555
+ names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
556
+ [name.split(".")[0] for name in os.listdir(co256_dir)]
557
+ )
558
+ opt = []
559
+ for name in names:
560
+ if if_f0_3 == "是":
561
+ opt.append(
562
+ "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
563
+ % (
564
+ gt_wavs_dir.replace("\\", "\\\\"),
565
+ name,
566
+ co256_dir.replace("\\", "\\\\"),
567
+ name,
568
+ f0_dir.replace("\\", "\\\\"),
569
+ name,
570
+ f0nsf_dir.replace("\\", "\\\\"),
571
+ name,
572
+ spk_id5,
573
+ )
574
+ )
575
+ else:
576
+ opt.append(
577
+ "%s/%s.wav|%s/%s.npy|%s"
578
+ % (
579
+ gt_wavs_dir.replace("\\", "\\\\"),
580
+ name,
581
+ co256_dir.replace("\\", "\\\\"),
582
+ name,
583
+ spk_id5,
584
+ )
585
+ )
586
+ if if_f0_3 == "是":
587
+ for _ in range(2):
588
+ opt.append(
589
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
590
+ % (now_dir, sr2, now_dir, now_dir, now_dir, spk_id5)
591
+ )
592
+ else:
593
+ for _ in range(2):
594
+ opt.append(
595
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s"
596
+ % (now_dir, sr2, now_dir, spk_id5)
597
+ )
598
+ shuffle(opt)
599
+ with open("%s/filelist.txt" % exp_dir, "w") as f:
600
+ f.write("\n".join(opt))
601
+ print("write filelist done")
602
+ # 生成config#无需生成config
603
+ # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
604
+ print("use gpus:", gpus16)
605
+ if gpus16:
606
+ cmd = (
607
+ python_cmd
608
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s"
609
+ % (
610
+ exp_dir1,
611
+ sr2,
612
+ 1 if if_f0_3 == "是" else 0,
613
+ batch_size12,
614
+ gpus16,
615
+ total_epoch11,
616
+ save_epoch10,
617
+ pretrained_G14,
618
+ pretrained_D15,
619
+ 1 if if_save_latest13 == "是" else 0,
620
+ 1 if if_cache_gpu17 == "是" else 0,
621
+ )
622
+ )
623
+ else:
624
+ cmd = (
625
+ python_cmd
626
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s -pg %s -pd %s -l %s -c %s"
627
+ % (
628
+ exp_dir1,
629
+ sr2,
630
+ 1 if if_f0_3 == "是" else 0,
631
+ batch_size12,
632
+ total_epoch11,
633
+ save_epoch10,
634
+ pretrained_G14,
635
+ pretrained_D15,
636
+ 1 if if_save_latest13 == "是" else 0,
637
+ 1 if if_cache_gpu17 == "是" else 0,
638
+ )
639
+ )
640
+ print(cmd)
641
+ p = Popen(cmd, shell=True, cwd=now_dir)
642
+ p.wait()
643
+ return "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"
644
+
645
+
646
+ # but4.click(train_index, [exp_dir1], info3)
647
+ def train_index(exp_dir1):
648
+ exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
649
+ os.makedirs(exp_dir, exist_ok=True)
650
+ feature_dir = "%s/3_feature256" % (exp_dir)
651
+ if os.path.exists(feature_dir) == False:
652
+ return "请先进行特征提取!"
653
+ listdir_res = list(os.listdir(feature_dir))
654
+ if len(listdir_res) == 0:
655
+ return "请先进行特征提取!"
656
+ npys = []
657
+ for name in sorted(listdir_res):
658
+ phone = np.load("%s/%s" % (feature_dir, name))
659
+ npys.append(phone)
660
+ big_npy = np.concatenate(npys, 0)
661
+ # np.save("%s/total_fea.npy" % exp_dir, big_npy)
662
+ # n_ivf = big_npy.shape[0] // 39
663
+ n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])),big_npy.shape[0]// 39)
664
+ infos=[]
665
+ infos.append("%s,%s"%(big_npy.shape,n_ivf))
666
+ yield "\n".join(infos)
667
+ index = faiss.index_factory(256, "IVF%s,Flat"%n_ivf)
668
+ # index = faiss.index_factory(256, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
669
+ infos.append("training")
670
+ yield "\n".join(infos)
671
+ index_ivf = faiss.extract_index_ivf(index) #
672
+ # index_ivf.nprobe = int(np.power(n_ivf,0.3))
673
+ index_ivf.nprobe = 1
674
+ index.train(big_npy)
675
+ faiss.write_index(index, '%s/trained_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))
676
+ # faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan.index'%(exp_dir,n_ivf))
677
+ infos.append("adding")
678
+ yield "\n".join(infos)
679
+ index.add(big_npy)
680
+ faiss.write_index(index, '%s/added_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))
681
+ infos.append("成功构建索引,added_IVF%s_Flat_nprobe_%s.index"%(n_ivf,index_ivf.nprobe))
682
+ # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan.index'%(exp_dir,n_ivf))
683
+ # infos.append("成功构建索引,added_IVF%s_Flat_FastScan.index"%(n_ivf))
684
+ yield "\n".join(infos)
685
+
686
+
687
+ # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
688
+ def train1key(
689
+ exp_dir1,
690
+ sr2,
691
+ if_f0_3,
692
+ trainset_dir4,
693
+ spk_id5,
694
+ gpus6,
695
+ np7,
696
+ f0method8,
697
+ save_epoch10,
698
+ total_epoch11,
699
+ batch_size12,
700
+ if_save_latest13,
701
+ pretrained_G14,
702
+ pretrained_D15,
703
+ gpus16,
704
+ if_cache_gpu17,
705
+ ):
706
+ infos = []
707
+
708
+ def get_info_str(strr):
709
+ infos.append(strr)
710
+ return "\n".join(infos)
711
+
712
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir1), exist_ok=True)
713
+ #########step1:处理数据
714
+ open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir1), "w").close()
715
+ cmd = (
716
+ python_cmd
717
+ + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "
718
+ % (trainset_dir4, sr_dict[sr2], ncpu, now_dir, exp_dir1)
719
+ + str(noparallel)
720
+ )
721
+ yield get_info_str("step1:正在处理数据")
722
+ yield get_info_str(cmd)
723
+ p = Popen(cmd, shell=True)
724
+ p.wait()
725
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir1), "r") as f:
726
+ print(f.read())
727
+ #########step2a:提取音高
728
+ open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir1), "w")
729
+ if if_f0_3 == "是":
730
+ yield get_info_str("step2a:正在提取音高")
731
+ cmd = python_cmd + " extract_f0_print.py %s/logs/%s %s %s" % (
732
+ now_dir,
733
+ exp_dir1,
734
+ np7,
735
+ f0method8,
736
+ )
737
+ yield get_info_str(cmd)
738
+ p = Popen(cmd, shell=True, cwd=now_dir)
739
+ p.wait()
740
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir1), "r") as f:
741
+ print(f.read())
742
+ else:
743
+ yield get_info_str("step2a:无需提取音高")
744
+ #######step2b:提取特征
745
+ yield get_info_str("step2b:正在提取特征")
746
+ gpus = gpus16.split("-")
747
+ leng = len(gpus)
748
+ ps = []
749
+ for idx, n_g in enumerate(gpus):
750
+ cmd = python_cmd + " extract_feature_print.py %s %s %s %s %s/logs/%s" % (
751
+ device,
752
+ leng,
753
+ idx,
754
+ n_g,
755
+ now_dir,
756
+ exp_dir1,
757
+ )
758
+ yield get_info_str(cmd)
759
+ p = Popen(
760
+ cmd, shell=True, cwd=now_dir
761
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
762
+ ps.append(p)
763
+ for p in ps:
764
+ p.wait()
765
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir1), "r") as f:
766
+ print(f.read())
767
+ #######step3a:训练模型
768
+ yield get_info_str("step3a:正在训练模型")
769
+ # 生成filelist
770
+ exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
771
+ gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
772
+ co256_dir = "%s/3_feature256" % (exp_dir)
773
+ if if_f0_3 == "是":
774
+ f0_dir = "%s/2a_f0" % (exp_dir)
775
+ f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
776
+ names = (
777
+ set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
778
+ & set([name.split(".")[0] for name in os.listdir(co256_dir)])
779
+ & set([name.split(".")[0] for name in os.listdir(f0_dir)])
780
+ & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
781
+ )
782
+ else:
783
+ names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
784
+ [name.split(".")[0] for name in os.listdir(co256_dir)]
785
+ )
786
+ opt = []
787
+ for name in names:
788
+ if if_f0_3 == "是":
789
+ opt.append(
790
+ "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
791
+ % (
792
+ gt_wavs_dir.replace("\\", "\\\\"),
793
+ name,
794
+ co256_dir.replace("\\", "\\\\"),
795
+ name,
796
+ f0_dir.replace("\\", "\\\\"),
797
+ name,
798
+ f0nsf_dir.replace("\\", "\\\\"),
799
+ name,
800
+ spk_id5,
801
+ )
802
+ )
803
+ else:
804
+ opt.append(
805
+ "%s/%s.wav|%s/%s.npy|%s"
806
+ % (
807
+ gt_wavs_dir.replace("\\", "\\\\"),
808
+ name,
809
+ co256_dir.replace("\\", "\\\\"),
810
+ name,
811
+ spk_id5,
812
+ )
813
+ )
814
+ if if_f0_3 == "是":
815
+ for _ in range(2):
816
+ opt.append(
817
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
818
+ % (now_dir, sr2, now_dir, now_dir, now_dir, spk_id5)
819
+ )
820
+ else:
821
+ for _ in range(2):
822
+ opt.append(
823
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s"
824
+ % (now_dir, sr2, now_dir, spk_id5)
825
+ )
826
+ shuffle(opt)
827
+ with open("%s/filelist.txt" % exp_dir, "w") as f:
828
+ f.write("\n".join(opt))
829
+ yield get_info_str("write filelist done")
830
+ if gpus16:
831
+ cmd = (
832
+ python_cmd
833
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s"
834
+ % (
835
+ exp_dir1,
836
+ sr2,
837
+ 1 if if_f0_3 == "是" else 0,
838
+ batch_size12,
839
+ gpus16,
840
+ total_epoch11,
841
+ save_epoch10,
842
+ pretrained_G14,
843
+ pretrained_D15,
844
+ 1 if if_save_latest13 == "是" else 0,
845
+ 1 if if_cache_gpu17 == "是" else 0,
846
+ )
847
+ )
848
+ else:
849
+ cmd = (
850
+ python_cmd
851
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s -pg %s -pd %s -l %s -c %s"
852
+ % (
853
+ exp_dir1,
854
+ sr2,
855
+ 1 if if_f0_3 == "是" else 0,
856
+ batch_size12,
857
+ total_epoch11,
858
+ save_epoch10,
859
+ pretrained_G14,
860
+ pretrained_D15,
861
+ 1 if if_save_latest13 == "是" else 0,
862
+ 1 if if_cache_gpu17 == "是" else 0,
863
+ )
864
+ )
865
+ yield get_info_str(cmd)
866
+ p = Popen(cmd, shell=True, cwd=now_dir)
867
+ p.wait()
868
+ yield get_info_str("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")
869
+ #######step3b:训练索引
870
+ feature_dir = "%s/3_feature256" % (exp_dir)
871
+ npys = []
872
+ listdir_res = list(os.listdir(feature_dir))
873
+ for name in sorted(listdir_res):
874
+ phone = np.load("%s/%s" % (feature_dir, name))
875
+ npys.append(phone)
876
+ big_npy = np.concatenate(npys, 0)
877
+ # np.save("%s/total_fea.npy" % exp_dir, big_npy)
878
+ # n_ivf = big_npy.shape[0] // 39
879
+ n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])),big_npy.shape[0]// 39)
880
+ yield get_info_str("%s,%s" % (big_npy.shape, n_ivf))
881
+ index = faiss.index_factory(256, "IVF%s,Flat" % n_ivf)
882
+ yield get_info_str("training index")
883
+ index_ivf = faiss.extract_index_ivf(index) #
884
+ # index_ivf.nprobe = int(np.power(n_ivf,0.3))
885
+ index_ivf.nprobe = 1
886
+ index.train(big_npy)
887
+ faiss.write_index(
888
+ index,
889
+ "%s/trained_IVF%s_Flat_nprobe_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe),
890
+ )
891
+ yield get_info_str("adding index")
892
+ index.add(big_npy)
893
+ faiss.write_index(
894
+ index,
895
+ "%s/added_IVF%s_Flat_nprobe_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe),
896
+ )
897
+ yield get_info_str(
898
+ "成功构建索引, added_IVF%s_Flat_nprobe_%s.index" % (n_ivf, index_ivf.nprobe)
899
+ )
900
+ yield get_info_str("全流程结束!")
901
+
902
+
903
+ # ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
904
+ def change_info_(ckpt_path):
905
+ if (
906
+ os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log"))
907
+ == False
908
+ ):
909
+ return {"__type__": "update"}, {"__type__": "update"}
910
+ try:
911
+ with open(
912
+ ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r"
913
+ ) as f:
914
+ info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
915
+ sr, f0 = info["sample_rate"], info["if_f0"]
916
+ return sr, str(f0)
917
+ except:
918
+ traceback.print_exc()
919
+ return {"__type__": "update"}, {"__type__": "update"}
920
+
921
+
922
+ from infer_pack.models_onnx_moess import SynthesizerTrnMs256NSFsidM
923
+ from infer_pack.models_onnx import SynthesizerTrnMs256NSFsidO
924
+
925
+
926
+ def export_onnx(ModelPath, ExportedPath, MoeVS=True):
927
+ hidden_channels = 256 # hidden_channels,为768Vec做准备
928
+ cpt = torch.load(ModelPath, map_location="cpu")
929
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
930
+ print(*cpt["config"])
931
+
932
+ test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
933
+ test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
934
+ test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
935
+ test_pitchf = torch.rand(1, 200) # nsf基频
936
+ test_ds = torch.LongTensor([0]) # 说话人ID
937
+ test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
938
+
939
+ device = "cpu" # 导出时设备(不影响使用模型)
940
+
941
+ if MoeVS:
942
+ net_g = SynthesizerTrnMs256NSFsidM(
943
+ *cpt["config"], is_half=False
944
+ ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
945
+ net_g.load_state_dict(cpt["weight"], strict=False)
946
+ input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
947
+ output_names = [
948
+ "audio",
949
+ ]
950
+ torch.onnx.export(
951
+ net_g,
952
+ (
953
+ test_phone.to(device),
954
+ test_phone_lengths.to(device),
955
+ test_pitch.to(device),
956
+ test_pitchf.to(device),
957
+ test_ds.to(device),
958
+ test_rnd.to(device),
959
+ ),
960
+ ExportedPath,
961
+ dynamic_axes={
962
+ "phone": [1],
963
+ "pitch": [1],
964
+ "pitchf": [1],
965
+ "rnd": [2],
966
+ },
967
+ do_constant_folding=False,
968
+ opset_version=16,
969
+ verbose=False,
970
+ input_names=input_names,
971
+ output_names=output_names,
972
+ )
973
+ else:
974
+ net_g = SynthesizerTrnMs256NSFsidO(
975
+ *cpt["config"], is_half=False
976
+ ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
977
+ net_g.load_state_dict(cpt["weight"], strict=False)
978
+ input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds"]
979
+ output_names = [
980
+ "audio",
981
+ ]
982
+ torch.onnx.export(
983
+ net_g,
984
+ (
985
+ test_phone.to(device),
986
+ test_phone_lengths.to(device),
987
+ test_pitch.to(device),
988
+ test_pitchf.to(device),
989
+ test_ds.to(device),
990
+ ),
991
+ ExportedPath,
992
+ dynamic_axes={
993
+ "phone": [1],
994
+ "pitch": [1],
995
+ "pitchf": [1],
996
+ },
997
+ do_constant_folding=False,
998
+ opset_version=16,
999
+ verbose=False,
1000
+ input_names=input_names,
1001
+ output_names=output_names,
1002
+ )
1003
+ return "Finished"
1004
+
1005
+
1006
+ with gr.Blocks() as app:
1007
+ gr.Markdown(
1008
+ value=i18n(
1009
+ "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>使用需遵守的协议-LICENSE.txt</b>."
1010
+ )
1011
+ )
1012
+ with gr.Tabs():
1013
+ with gr.TabItem(i18n("模型推理")):
1014
+ with gr.Row():
1015
+ sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names))
1016
+ refresh_button = gr.Button(i18n("刷新音色列表"), variant="primary")
1017
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[sid0])
1018
+ clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary")
1019
+ spk_item = gr.Slider(
1020
+ minimum=0,
1021
+ maximum=2333,
1022
+ step=1,
1023
+ label=i18n("请选择说话人id"),
1024
+ value=0,
1025
+ visible=False,
1026
+ interactive=True,
1027
+ )
1028
+ clean_button.click(fn=clean, inputs=[], outputs=[sid0])
1029
+ sid0.change(
1030
+ fn=get_vc,
1031
+ inputs=[sid0],
1032
+ outputs=[spk_item],
1033
+ )
1034
+ with gr.Group():
1035
+ gr.Markdown(
1036
+ value=i18n("男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ")
1037
+ )
1038
+ with gr.Row():
1039
+ with gr.Column():
1040
+ vc_transform0 = gr.Number(
1041
+ label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1042
+ )
1043
+ input_audio0 = gr.Textbox(
1044
+ label=i18n("输入待处理音频文件路径(默认是正确格式示例)"),
1045
+ value="E:\\codes\\py39\\vits_vc_gpu_train\\todo-songs\\冬之花clip1.wav",
1046
+ )
1047
+ f0method0 = gr.Radio(
1048
+ label=i18n("选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比"),
1049
+ choices=["pm", "harvest"],
1050
+ value="pm",
1051
+ interactive=True,
1052
+ )
1053
+ with gr.Column():
1054
+ file_index1 = gr.Textbox(
1055
+ label=i18n("特征检索库文件路径"),
1056
+ value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\added_IVF677_Flat_nprobe_7.index",
1057
+ interactive=True,
1058
+ )
1059
+ # file_big_npy1 = gr.Textbox(
1060
+ # label=i18n("特征文件路径"),
1061
+ # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1062
+ # interactive=True,
1063
+ # )
1064
+ index_rate1 = gr.Slider(
1065
+ minimum=0,
1066
+ maximum=1,
1067
+ label="检索特征占比",
1068
+ value=0.76,
1069
+ interactive=True,
1070
+ )
1071
+ f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"))
1072
+ but0 = gr.Button(i18n("转换"), variant="primary")
1073
+ with gr.Column():
1074
+ vc_output1 = gr.Textbox(label=i18n("输出信息"))
1075
+ vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)"))
1076
+ but0.click(
1077
+ vc_single,
1078
+ [
1079
+ spk_item,
1080
+ input_audio0,
1081
+ vc_transform0,
1082
+ f0_file,
1083
+ f0method0,
1084
+ file_index1,
1085
+ # file_big_npy1,
1086
+ index_rate1,
1087
+ ],
1088
+ [vc_output1, vc_output2],
1089
+ )
1090
+ with gr.Group():
1091
+ gr.Markdown(
1092
+ value=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ")
1093
+ )
1094
+ with gr.Row():
1095
+ with gr.Column():
1096
+ vc_transform1 = gr.Number(
1097
+ label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1098
+ )
1099
+ opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1100
+ f0method1 = gr.Radio(
1101
+ label=i18n("选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比"),
1102
+ choices=["pm", "harvest"],
1103
+ value="pm",
1104
+ interactive=True,
1105
+ )
1106
+ with gr.Column():
1107
+ file_index2 = gr.Textbox(
1108
+ label=i18n("特征检索库文件路径"),
1109
+ value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\added_IVF677_Flat_nprobe_7.index",
1110
+ interactive=True,
1111
+ )
1112
+ # file_big_npy2 = gr.Textbox(
1113
+ # label=i18n("特征文件路径"),
1114
+ # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1115
+ # interactive=True,
1116
+ # )
1117
+ index_rate2 = gr.Slider(
1118
+ minimum=0,
1119
+ maximum=1,
1120
+ label=i18n("检索特征占比"),
1121
+ value=1,
1122
+ interactive=True,
1123
+ )
1124
+ with gr.Column():
1125
+ dir_input = gr.Textbox(
1126
+ label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1127
+ value="E:\codes\py39\\vits_vc_gpu_train\\todo-songs",
1128
+ )
1129
+ inputs = gr.File(
1130
+ file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1131
+ )
1132
+ but1 = gr.Button(i18n("转换"), variant="primary")
1133
+ vc_output3 = gr.Textbox(label=i18n("输出信息"))
1134
+ but1.click(
1135
+ vc_multi,
1136
+ [
1137
+ spk_item,
1138
+ dir_input,
1139
+ opt_input,
1140
+ inputs,
1141
+ vc_transform1,
1142
+ f0method1,
1143
+ file_index2,
1144
+ # file_big_npy2,
1145
+ index_rate2,
1146
+ ],
1147
+ [vc_output3],
1148
+ )
1149
+ with gr.TabItem(i18n("伴奏人声分离")):
1150
+ with gr.Group():
1151
+ gr.Markdown(
1152
+ value=i18n(
1153
+ "人声伴奏分离批量处理, 使用UVR5模型. <br>不带和声用HP2, 带和声且提取的人声不需要和声用HP5<br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)"
1154
+ )
1155
+ )
1156
+ with gr.Row():
1157
+ with gr.Column():
1158
+ dir_wav_input = gr.Textbox(
1159
+ label=i18n("输入待处理音频文件夹路径"),
1160
+ value="E:\\codes\\py39\\vits_vc_gpu_train\\todo-songs",
1161
+ )
1162
+ wav_inputs = gr.File(
1163
+ file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1164
+ )
1165
+ with gr.Column():
1166
+ model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names)
1167
+ agg = gr.Slider(
1168
+ minimum=0,
1169
+ maximum=20,
1170
+ step=1,
1171
+ label="人声提取激进程度",
1172
+ value=10,
1173
+ interactive=True,
1174
+ visible=False#先不开放调整
1175
+ )
1176
+ opt_vocal_root = gr.Textbox(
1177
+ label=i18n("指定输出人声文件夹"), value="opt"
1178
+ )
1179
+ opt_ins_root = gr.Textbox(label=i18n("指定输出乐器文件夹"), value="opt")
1180
+ but2 = gr.Button(i18n("转换"), variant="primary")
1181
+ vc_output4 = gr.Textbox(label=i18n("输出信息"))
1182
+ but2.click(
1183
+ uvr,
1184
+ [
1185
+ model_choose,
1186
+ dir_wav_input,
1187
+ opt_vocal_root,
1188
+ wav_inputs,
1189
+ opt_ins_root,
1190
+ agg
1191
+ ],
1192
+ [vc_output4],
1193
+ )
1194
+ with gr.TabItem(i18n("训练")):
1195
+ gr.Markdown(
1196
+ value=i18n(
1197
+ "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. "
1198
+ )
1199
+ )
1200
+ with gr.Row():
1201
+ exp_dir1 = gr.Textbox(label=i18n("输入实验名"), value="mi-test")
1202
+ sr2 = gr.Radio(
1203
+ label=i18n("目标采样率"),
1204
+ choices=["32k", "40k", "48k"],
1205
+ value="40k",
1206
+ interactive=True,
1207
+ )
1208
+ if_f0_3 = gr.Radio(
1209
+ label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1210
+ choices=["是", "否"],
1211
+ value="是",
1212
+ interactive=True,
1213
+ )
1214
+ with gr.Group(): # 暂时单人的, 后面支持最多4人的#数据处理
1215
+ gr.Markdown(
1216
+ value=i18n(
1217
+ "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. "
1218
+ )
1219
+ )
1220
+ with gr.Row():
1221
+ trainset_dir4 = gr.Textbox(
1222
+ label=i18n("输入训练文件夹路径"), value="E:\\语音音频+标注\\米津玄师\\src"
1223
+ )
1224
+ spk_id5 = gr.Slider(
1225
+ minimum=0,
1226
+ maximum=4,
1227
+ step=1,
1228
+ label=i18n("请指定说话人id"),
1229
+ value=0,
1230
+ interactive=True,
1231
+ )
1232
+ but1 = gr.Button(i18n("处理数据"), variant="primary")
1233
+ info1 = gr.Textbox(label=i18n("输出信息"), value="")
1234
+ but1.click(
1235
+ preprocess_dataset, [trainset_dir4, exp_dir1, sr2], [info1]
1236
+ )
1237
+ with gr.Group():
1238
+ gr.Markdown(value=i18n("step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)"))
1239
+ with gr.Row():
1240
+ with gr.Column():
1241
+ gpus6 = gr.Textbox(
1242
+ label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1243
+ value=gpus,
1244
+ interactive=True,
1245
+ )
1246
+ gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1247
+ with gr.Column():
1248
+ np7 = gr.Slider(
1249
+ minimum=0,
1250
+ maximum=ncpu,
1251
+ step=1,
1252
+ label=i18n("提取音高使用的CPU进程数"),
1253
+ value=ncpu,
1254
+ interactive=True,
1255
+ )
1256
+ f0method8 = gr.Radio(
1257
+ label=i18n(
1258
+ "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1259
+ ),
1260
+ choices=["pm", "harvest", "dio"],
1261
+ value="harvest",
1262
+ interactive=True,
1263
+ )
1264
+ but2 = gr.Button(i18n("特征提取"), variant="primary")
1265
+ info2 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
1266
+ but2.click(
1267
+ extract_f0_feature,
1268
+ [gpus6, np7, f0method8, if_f0_3, exp_dir1],
1269
+ [info2],
1270
+ )
1271
+ with gr.Group():
1272
+ gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
1273
+ with gr.Row():
1274
+ save_epoch10 = gr.Slider(
1275
+ minimum=0,
1276
+ maximum=50,
1277
+ step=1,
1278
+ label=i18n("保存频率save_every_epoch"),
1279
+ value=5,
1280
+ interactive=True,
1281
+ )
1282
+ total_epoch11 = gr.Slider(
1283
+ minimum=0,
1284
+ maximum=1000,
1285
+ step=1,
1286
+ label=i18n("总训练轮数total_epoch"),
1287
+ value=20,
1288
+ interactive=True,
1289
+ )
1290
+ batch_size12 = gr.Slider(
1291
+ minimum=0,
1292
+ maximum=40,
1293
+ step=1,
1294
+ label="每张显卡的batch_size",
1295
+ value=default_batch_size,
1296
+ interactive=True,
1297
+ )
1298
+ if_save_latest13 = gr.Radio(
1299
+ label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),
1300
+ choices=["是", "否"],
1301
+ value="否",
1302
+ interactive=True,
1303
+ )
1304
+ if_cache_gpu17 = gr.Radio(
1305
+ label=i18n(
1306
+ "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速"
1307
+ ),
1308
+ choices=["是", "否"],
1309
+ value="否",
1310
+ interactive=True,
1311
+ )
1312
+ with gr.Row():
1313
+ pretrained_G14 = gr.Textbox(
1314
+ label=i18n("加载预训练底模G路径"),
1315
+ value="pretrained/f0G40k.pth",
1316
+ interactive=True,
1317
+ )
1318
+ pretrained_D15 = gr.Textbox(
1319
+ label=i18n("加载预训练底模D路径"),
1320
+ value="pretrained/f0D40k.pth",
1321
+ interactive=True,
1322
+ )
1323
+ sr2.change(
1324
+ change_sr2, [sr2, if_f0_3], [pretrained_G14, pretrained_D15]
1325
+ )
1326
+ if_f0_3.change(
1327
+ change_f0,
1328
+ [if_f0_3, sr2],
1329
+ [np7, f0method8, pretrained_G14, pretrained_D15],
1330
+ )
1331
+ gpus16 = gr.Textbox(
1332
+ label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1333
+ value=gpus,
1334
+ interactive=True,
1335
+ )
1336
+ but3 = gr.Button(i18n("训练模型"), variant="primary")
1337
+ but4 = gr.Button(i18n("训练特征索引"), variant="primary")
1338
+ but5 = gr.Button(i18n("一键训练"), variant="primary")
1339
+ info3 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=10)
1340
+ but3.click(
1341
+ click_train,
1342
+ [
1343
+ exp_dir1,
1344
+ sr2,
1345
+ if_f0_3,
1346
+ spk_id5,
1347
+ save_epoch10,
1348
+ total_epoch11,
1349
+ batch_size12,
1350
+ if_save_latest13,
1351
+ pretrained_G14,
1352
+ pretrained_D15,
1353
+ gpus16,
1354
+ if_cache_gpu17,
1355
+ ],
1356
+ info3,
1357
+ )
1358
+ but4.click(train_index, [exp_dir1], info3)
1359
+ but5.click(
1360
+ train1key,
1361
+ [
1362
+ exp_dir1,
1363
+ sr2,
1364
+ if_f0_3,
1365
+ trainset_dir4,
1366
+ spk_id5,
1367
+ gpus6,
1368
+ np7,
1369
+ f0method8,
1370
+ save_epoch10,
1371
+ total_epoch11,
1372
+ batch_size12,
1373
+ if_save_latest13,
1374
+ pretrained_G14,
1375
+ pretrained_D15,
1376
+ gpus16,
1377
+ if_cache_gpu17,
1378
+ ],
1379
+ info3,
1380
+ )
1381
+
1382
+ with gr.TabItem(i18n("ckpt处理")):
1383
+ with gr.Group():
1384
+ gr.Markdown(value=i18n("模型融合, 可用于测试音色融合"))
1385
+ with gr.Row():
1386
+ ckpt_a = gr.Textbox(label=i18n("A模型路径"), value="", interactive=True)
1387
+ ckpt_b = gr.Textbox(label=i18n("B模型路径"), value="", interactive=True)
1388
+ alpha_a = gr.Slider(
1389
+ minimum=0,
1390
+ maximum=1,
1391
+ label=i18n("A模型权重"),
1392
+ value=0.5,
1393
+ interactive=True,
1394
+ )
1395
+ with gr.Row():
1396
+ sr_ = gr.Radio(
1397
+ label=i18n("目标采样率"),
1398
+ choices=["32k", "40k", "48k"],
1399
+ value="40k",
1400
+ interactive=True,
1401
+ )
1402
+ if_f0_ = gr.Radio(
1403
+ label=i18n("模型是否带音高指导"),
1404
+ choices=["是", "否"],
1405
+ value="是",
1406
+ interactive=True,
1407
+ )
1408
+ info__ = gr.Textbox(
1409
+ label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True
1410
+ )
1411
+ name_to_save0 = gr.Textbox(
1412
+ label=i18n("保存的模型名不带后缀"),
1413
+ value="",
1414
+ max_lines=1,
1415
+ interactive=True,
1416
+ )
1417
+ with gr.Row():
1418
+ but6 = gr.Button(i18n("融合"), variant="primary")
1419
+ info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
1420
+ but6.click(
1421
+ merge,
1422
+ [ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0],
1423
+ info4,
1424
+ ) # def merge(path1,path2,alpha1,sr,f0,info):
1425
+ with gr.Group():
1426
+ gr.Markdown(value=i18n("修改模型信息(仅支持weights文件夹下提取的小模型文件)"))
1427
+ with gr.Row():
1428
+ ckpt_path0 = gr.Textbox(
1429
+ label=i18n("模型路径"), value="", interactive=True
1430
+ )
1431
+ info_ = gr.Textbox(
1432
+ label=i18n("要改的模型信息"), value="", max_lines=8, interactive=True
1433
+ )
1434
+ name_to_save1 = gr.Textbox(
1435
+ label=i18n("保存的文件名, 默认空为和源文件同名"),
1436
+ value="",
1437
+ max_lines=8,
1438
+ interactive=True,
1439
+ )
1440
+ with gr.Row():
1441
+ but7 = gr.Button(i18n("修改"), variant="primary")
1442
+ info5 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
1443
+ but7.click(change_info, [ckpt_path0, info_, name_to_save1], info5)
1444
+ with gr.Group():
1445
+ gr.Markdown(value=i18n("查看模型信息(仅支持weights文件夹下提取的小模型文件)"))
1446
+ with gr.Row():
1447
+ ckpt_path1 = gr.Textbox(
1448
+ label=i18n("模型路径"), value="", interactive=True
1449
+ )
1450
+ but8 = gr.Button(i18n("查看"), variant="primary")
1451
+ info6 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
1452
+ but8.click(show_info, [ckpt_path1], info6)
1453
+ with gr.Group():
1454
+ gr.Markdown(
1455
+ value=i18n(
1456
+ "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况"
1457
+ )
1458
+ )
1459
+ with gr.Row():
1460
+ ckpt_path2 = gr.Textbox(
1461
+ label=i18n("模型路径"),
1462
+ value="E:\\codes\\py39\\logs\\mi-test_f0_48k\\G_23333.pth",
1463
+ interactive=True,
1464
+ )
1465
+ save_name = gr.Textbox(
1466
+ label=i18n("保存名"), value="", interactive=True
1467
+ )
1468
+ sr__ = gr.Radio(
1469
+ label=i18n("目标采样率"),
1470
+ choices=["32k", "40k", "48k"],
1471
+ value="40k",
1472
+ interactive=True,
1473
+ )
1474
+ if_f0__ = gr.Radio(
1475
+ label=i18n("模型是否带音高指导,1是0否"),
1476
+ choices=["1", "0"],
1477
+ value="1",
1478
+ interactive=True,
1479
+ )
1480
+ info___ = gr.Textbox(
1481
+ label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True
1482
+ )
1483
+ but9 = gr.Button(i18n("提取"), variant="primary")
1484
+ info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
1485
+ ckpt_path2.change(change_info_, [ckpt_path2], [sr__, if_f0__])
1486
+ but9.click(
1487
+ extract_small_model,
1488
+ [ckpt_path2, save_name, sr__, if_f0__, info___],
1489
+ info7,
1490
+ )
1491
+
1492
+ with gr.TabItem(i18n("Onnx导出")):
1493
+ with gr.Row():
1494
+ ckpt_dir = gr.Textbox(label=i18n("RVC模型路径"), value="", interactive=True)
1495
+ with gr.Row():
1496
+ onnx_dir = gr.Textbox(
1497
+ label=i18n("Onnx输出路径"), value="", interactive=True
1498
+ )
1499
+ with gr.Row():
1500
+ moevs = gr.Checkbox(label=i18n("MoeVS模型"), value=True)
1501
+ infoOnnx = gr.Label(label="Null")
1502
+ with gr.Row():
1503
+ butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary")
1504
+ butOnnx.click(export_onnx, [ckpt_dir, onnx_dir, moevs], infoOnnx)
1505
+
1506
+ # with gr.TabItem(i18n("招募音高曲线前端编辑器")):
1507
+ # gr.Markdown(value=i18n("加开发群联系我xxxxx"))
1508
+ # with gr.TabItem(i18n("点击查看交流、问题反馈群号")):
1509
+ # gr.Markdown(value=i18n("xxxxx"))
1510
+
1511
+ if iscolab:
1512
+ app.queue(concurrency_count=511, max_size=1022).launch(share=True)
1513
+ else:
1514
+ app.queue(concurrency_count=511, max_size=1022).launch(
1515
+ server_name="0.0.0.0",
1516
+ inbrowser=not noautoopen,
1517
+ server_port=listen_port,
1518
+ quiet=True,
1519
+ )