aliceoq commited on
Commit
3a2cc70
·
1 Parent(s): e3aae0e

simplify GUI

Browse files
Files changed (1) hide show
  1. app.py +42 -1486
app.py CHANGED
@@ -4,10 +4,7 @@ os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
4
  import threading
5
  from time import sleep
6
  from subprocess import Popen
7
- import faiss
8
- from random import shuffle
9
- import json, datetime, requests
10
- from gtts import gTTS
11
  now_dir = os.getcwd()
12
  sys.path.append(now_dir)
13
  tmp = os.path.join(now_dir, "TEMP")
@@ -21,14 +18,20 @@ warnings.filterwarnings("ignore")
21
  torch.manual_seed(114514)
22
  from i18n import I18nAuto
23
 
24
- import signal
25
-
26
- import math
27
-
28
  from utils import load_audio, CSVutil
29
 
30
- global DoFormant, Quefrency, Timbre
 
 
 
 
 
 
 
 
 
31
 
 
32
  if not os.path.isdir('csvdb/'):
33
  os.makedirs('csvdb')
34
  frmnt, stp = open("csvdb/formanting.csv", 'w'), open("csvdb/stop.csv", 'w')
@@ -55,112 +58,12 @@ def download_models():
55
  print("Downloaded hubert base model file successfully. File saved to ./hubert_base.pt.")
56
  else:
57
  raise Exception("Failed to download hubert base model file. Status code: " + str(response.status_code) + ".")
58
-
59
- # Download rmvpe model if not present
60
- if not os.path.isfile('./rmvpe.pt'):
61
- response = requests.get('https://drive.usercontent.google.com/download?id=1Hkn4kNuVFRCNQwyxQFRtmzmMBGpQxptI&export=download&authuser=0&confirm=t&uuid=0b3a40de-465b-4c65-8c41-135b0b45c3f7&at=APZUnTV3lA3LnyTbeuduura6Dmi2:1693724254058')
62
-
63
- if response.status_code == 200:
64
- with open('./rmvpe.pt', 'wb') as f:
65
- f.write(response.content)
66
- print("Downloaded rmvpe model file successfully. File saved to ./rmvpe.pt.")
67
- else:
68
- raise Exception("Failed to download rmvpe model file. Status code: " + str(response.status_code) + ".")
69
 
70
  download_models()
71
 
72
  print("\n-------------------------------\nRVC v2 Easy GUI (Local Edition)\n-------------------------------\n")
73
 
74
- def formant_apply(qfrency, tmbre):
75
- Quefrency = qfrency
76
- Timbre = tmbre
77
- DoFormant = True
78
- CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre)
79
-
80
- return ({"value": Quefrency, "__type__": "update"}, {"value": Timbre, "__type__": "update"})
81
-
82
- def get_fshift_presets():
83
- fshift_presets_list = []
84
- for dirpath, _, filenames in os.walk("./formantshiftcfg/"):
85
- for filename in filenames:
86
- if filename.endswith(".txt"):
87
- fshift_presets_list.append(os.path.join(dirpath,filename).replace('\\','/'))
88
-
89
- if len(fshift_presets_list) > 0:
90
- return fshift_presets_list
91
- else:
92
- return ''
93
-
94
-
95
-
96
- def formant_enabled(cbox, qfrency, tmbre, frmntapply, formantpreset, formant_refresh_button):
97
-
98
- if (cbox):
99
-
100
- DoFormant = True
101
- CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre)
102
- #print(f"is checked? - {cbox}\ngot {DoFormant}")
103
-
104
- return (
105
- {"value": True, "__type__": "update"},
106
- {"visible": True, "__type__": "update"},
107
- {"visible": True, "__type__": "update"},
108
- {"visible": True, "__type__": "update"},
109
- {"visible": True, "__type__": "update"},
110
- {"visible": True, "__type__": "update"},
111
- )
112
-
113
-
114
- else:
115
-
116
- DoFormant = False
117
- CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre)
118
-
119
- #print(f"is checked? - {cbox}\ngot {DoFormant}")
120
- return (
121
- {"value": False, "__type__": "update"},
122
- {"visible": False, "__type__": "update"},
123
- {"visible": False, "__type__": "update"},
124
- {"visible": False, "__type__": "update"},
125
- {"visible": False, "__type__": "update"},
126
- {"visible": False, "__type__": "update"},
127
- {"visible": False, "__type__": "update"},
128
- )
129
-
130
-
131
-
132
- def preset_apply(preset, qfer, tmbr):
133
- if str(preset) != '':
134
- with open(str(preset), 'r') as p:
135
- content = p.readlines()
136
- qfer, tmbr = content[0].split('\n')[0], content[1]
137
-
138
- formant_apply(qfer, tmbr)
139
- else:
140
- pass
141
- return ({"value": qfer, "__type__": "update"}, {"value": tmbr, "__type__": "update"})
142
-
143
- def update_fshift_presets(preset, qfrency, tmbre):
144
-
145
- qfrency, tmbre = preset_apply(preset, qfrency, tmbre)
146
-
147
- if (str(preset) != ''):
148
- with open(str(preset), 'r') as p:
149
- content = p.readlines()
150
- qfrency, tmbre = content[0].split('\n')[0], content[1]
151
-
152
- formant_apply(qfrency, tmbre)
153
- else:
154
- pass
155
- return (
156
- {"choices": get_fshift_presets(), "__type__": "update"},
157
- {"value": qfrency, "__type__": "update"},
158
- {"value": tmbre, "__type__": "update"},
159
- )
160
-
161
  i18n = I18nAuto()
162
- #i18n.print()
163
- # 判断是否有能用来训练和加速推理的N卡
164
  ngpu = torch.cuda.device_count()
165
  gpu_infos = []
166
  mem = []
@@ -253,23 +156,13 @@ for root, dirs, files in os.walk(index_root, topdown=False):
253
  if name.endswith(".index") and "trained" not in name:
254
  index_paths.append("%s/%s" % (root, name))
255
 
256
-
257
-
258
  def vc_single(
259
  sid,
260
  input_audio_path,
261
  f0_up_key,
262
  f0_file,
263
- f0_method,
264
  file_index,
265
- #file_index2,
266
- # file_big_npy,
267
  index_rate,
268
- filter_radius,
269
- resample_sr,
270
- rms_mix_rate,
271
- protect,
272
- crepe_hop_length,
273
  ): # spk_item, input_audio0, vc_transform0,f0_file,f0method0
274
  global tgt_sr, net_g, vc, hubert_model, version
275
  if input_audio_path is None:
@@ -307,7 +200,6 @@ def vc_single(
307
  f0_up_key,
308
  f0_method,
309
  file_index,
310
- # file_big_npy,
311
  index_rate,
312
  if_f0,
313
  filter_radius,
@@ -337,86 +229,6 @@ def vc_single(
337
  print(info)
338
  return info, (None, None)
339
 
340
-
341
- def vc_multi(
342
- sid,
343
- dir_path,
344
- opt_root,
345
- paths,
346
- f0_up_key,
347
- f0_method,
348
- file_index,
349
- file_index2,
350
- # file_big_npy,
351
- index_rate,
352
- filter_radius,
353
- resample_sr,
354
- rms_mix_rate,
355
- protect,
356
- format1,
357
- crepe_hop_length,
358
- ):
359
- try:
360
- dir_path = (
361
- dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
362
- ) # 防止小白拷路径头尾带了空格和"和回车
363
- opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
364
- os.makedirs(opt_root, exist_ok=True)
365
- try:
366
- if dir_path != "":
367
- paths = [os.path.join(dir_path, name) for name in os.listdir(dir_path)]
368
- else:
369
- paths = [path.name for path in paths]
370
- except:
371
- traceback.print_exc()
372
- paths = [path.name for path in paths]
373
- infos = []
374
- for path in paths:
375
- info, opt = vc_single(
376
- sid,
377
- path,
378
- f0_up_key,
379
- None,
380
- f0_method,
381
- file_index,
382
- # file_big_npy,
383
- index_rate,
384
- filter_radius,
385
- resample_sr,
386
- rms_mix_rate,
387
- protect,
388
- crepe_hop_length
389
- )
390
- if "Success" in info:
391
- try:
392
- tgt_sr, audio_opt = opt
393
- if format1 in ["wav", "flac"]:
394
- sf.write(
395
- "%s/%s.%s" % (opt_root, os.path.basename(path), format1),
396
- audio_opt,
397
- tgt_sr,
398
- )
399
- else:
400
- path = "%s/%s.wav" % (opt_root, os.path.basename(path))
401
- sf.write(
402
- path,
403
- audio_opt,
404
- tgt_sr,
405
- )
406
- if os.path.exists(path):
407
- os.system(
408
- "ffmpeg -i %s -vn %s -q:a 2 -y"
409
- % (path, path[:-4] + ".%s" % format1)
410
- )
411
- except:
412
- info += traceback.format_exc()
413
- infos.append("%s->%s" % (os.path.basename(path), info))
414
- yield "\n".join(infos)
415
- yield "\n".join(infos)
416
- except:
417
- yield traceback.format_exc()
418
-
419
- # 一个选项卡全局只能有一个音色
420
  def get_vc(sid):
421
  global n_spk, tgt_sr, net_g, vc, cpt, version
422
  if sid == "" or sid == []:
@@ -477,7 +289,6 @@ def get_vc(sid):
477
  n_spk = cpt["config"][-3]
478
  return {"visible": False, "maximum": n_spk, "__type__": "update"}
479
 
480
-
481
  def change_choices():
482
  names = []
483
  for name in os.listdir(weight_root):
@@ -488,23 +299,17 @@ def change_choices():
488
  for name in files:
489
  if name.endswith(".index") and "trained" not in name:
490
  index_paths.append("%s/%s" % (root, name))
491
- return {"choices": sorted(names), "__type__": "update"}, {
492
- "choices": sorted(index_paths),
493
- "__type__": "update",
494
- }
495
-
496
 
497
  def clean():
498
  return {"value": "", "__type__": "update"}
499
 
500
-
501
  sr_dict = {
502
  "32k": 32000,
503
  "40k": 40000,
504
  "48k": 48000,
505
  }
506
 
507
-
508
  def if_done(done, p):
509
  while 1:
510
  if p.poll() == None:
@@ -513,7 +318,6 @@ def if_done(done, p):
513
  break
514
  done[0] = True
515
 
516
-
517
  def if_done_multi(done, ps):
518
  while 1:
519
  # poll==None代表进程未结束
@@ -528,41 +332,6 @@ def if_done_multi(done, ps):
528
  break
529
  done[0] = True
530
 
531
-
532
- def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
533
- sr = sr_dict[sr]
534
- os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
535
- f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w")
536
- f.close()
537
- cmd = (
538
- config.python_cmd
539
- + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "
540
- % (trainset_dir, sr, n_p, now_dir, exp_dir)
541
- + str(config.noparallel)
542
- )
543
- print(cmd)
544
- p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
545
- ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
546
- done = [False]
547
- threading.Thread(
548
- target=if_done,
549
- args=(
550
- done,
551
- p,
552
- ),
553
- ).start()
554
- while 1:
555
- with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
556
- yield (f.read())
557
- sleep(1)
558
- if done[0] == True:
559
- break
560
- with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
561
- log = f.read()
562
- print(log)
563
- yield log
564
-
565
- # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
566
  def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl):
567
  gpus = gpus.split("-")
568
  os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
@@ -648,611 +417,17 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl):
648
  print(log)
649
  yield log
650
 
651
-
652
- def change_sr2(sr2, if_f0_3, version19):
653
- path_str = "" if version19 == "v1" else "_v2"
654
- f0_str = "f0" if if_f0_3 else ""
655
- if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK)
656
- if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK)
657
- if (if_pretrained_generator_exist == False):
658
- print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
659
- if (if_pretrained_discriminator_exist == False):
660
- print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
661
- return (
662
- ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "",
663
- ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "",
664
- {"visible": True, "__type__": "update"}
665
- )
666
-
667
- def change_version19(sr2, if_f0_3, version19):
668
- path_str = "" if version19 == "v1" else "_v2"
669
- f0_str = "f0" if if_f0_3 else ""
670
- if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK)
671
- if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK)
672
- if (if_pretrained_generator_exist == False):
673
- print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
674
- if (if_pretrained_discriminator_exist == False):
675
- print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
676
- return (
677
- ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "",
678
- ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "",
679
- )
680
-
681
-
682
- def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
683
- path_str = "" if version19 == "v1" else "_v2"
684
- if_pretrained_generator_exist = os.access("pretrained%s/f0G%s.pth" % (path_str, sr2), os.F_OK)
685
- if_pretrained_discriminator_exist = os.access("pretrained%s/f0D%s.pth" % (path_str, sr2), os.F_OK)
686
- if (if_pretrained_generator_exist == False):
687
- print("pretrained%s/f0G%s.pth" % (path_str, sr2), "not exist, will not use pretrained model")
688
- if (if_pretrained_discriminator_exist == False):
689
- print("pretrained%s/f0D%s.pth" % (path_str, sr2), "not exist, will not use pretrained model")
690
- if if_f0_3:
691
- return (
692
- {"visible": True, "__type__": "update"},
693
- "pretrained%s/f0G%s.pth" % (path_str, sr2) if if_pretrained_generator_exist else "",
694
- "pretrained%s/f0D%s.pth" % (path_str, sr2) if if_pretrained_discriminator_exist else "",
695
- )
696
- return (
697
- {"visible": False, "__type__": "update"},
698
- ("pretrained%s/G%s.pth" % (path_str, sr2)) if if_pretrained_generator_exist else "",
699
- ("pretrained%s/D%s.pth" % (path_str, sr2)) if if_pretrained_discriminator_exist else "",
700
- )
701
-
702
-
703
- global log_interval
704
-
705
-
706
- def set_log_interval(exp_dir, batch_size12):
707
- log_interval = 1
708
-
709
- folder_path = os.path.join(exp_dir, "1_16k_wavs")
710
-
711
- if os.path.exists(folder_path) and os.path.isdir(folder_path):
712
- wav_files = [f for f in os.listdir(folder_path) if f.endswith(".wav")]
713
- if wav_files:
714
- sample_size = len(wav_files)
715
- log_interval = math.ceil(sample_size / batch_size12)
716
- if log_interval > 1:
717
- log_interval += 1
718
- return log_interval
719
-
720
- # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
721
- def click_train(
722
- exp_dir1,
723
- sr2,
724
- if_f0_3,
725
- spk_id5,
726
- save_epoch10,
727
- total_epoch11,
728
- batch_size12,
729
- if_save_latest13,
730
- pretrained_G14,
731
- pretrained_D15,
732
- gpus16,
733
- if_cache_gpu17,
734
- if_save_every_weights18,
735
- version19,
736
- ):
737
- CSVutil('csvdb/stop.csv', 'w+', 'formanting', False)
738
- # 生成filelist
739
- exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
740
- os.makedirs(exp_dir, exist_ok=True)
741
- gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
742
- feature_dir = (
743
- "%s/3_feature256" % (exp_dir)
744
- if version19 == "v1"
745
- else "%s/3_feature768" % (exp_dir)
746
- )
747
-
748
- log_interval = set_log_interval(exp_dir, batch_size12)
749
-
750
- if if_f0_3:
751
- f0_dir = "%s/2a_f0" % (exp_dir)
752
- f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
753
- names = (
754
- set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
755
- & set([name.split(".")[0] for name in os.listdir(feature_dir)])
756
- & set([name.split(".")[0] for name in os.listdir(f0_dir)])
757
- & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
758
- )
759
- else:
760
- names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
761
- [name.split(".")[0] for name in os.listdir(feature_dir)]
762
- )
763
- opt = []
764
- for name in names:
765
- if if_f0_3:
766
- opt.append(
767
- "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
768
- % (
769
- gt_wavs_dir.replace("\\", "\\\\"),
770
- name,
771
- feature_dir.replace("\\", "\\\\"),
772
- name,
773
- f0_dir.replace("\\", "\\\\"),
774
- name,
775
- f0nsf_dir.replace("\\", "\\\\"),
776
- name,
777
- spk_id5,
778
- )
779
- )
780
- else:
781
- opt.append(
782
- "%s/%s.wav|%s/%s.npy|%s"
783
- % (
784
- gt_wavs_dir.replace("\\", "\\\\"),
785
- name,
786
- feature_dir.replace("\\", "\\\\"),
787
- name,
788
- spk_id5,
789
- )
790
- )
791
- fea_dim = 256 if version19 == "v1" else 768
792
- if if_f0_3:
793
- for _ in range(2):
794
- opt.append(
795
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
796
- % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
797
- )
798
- else:
799
- for _ in range(2):
800
- opt.append(
801
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
802
- % (now_dir, sr2, now_dir, fea_dim, spk_id5)
803
- )
804
- shuffle(opt)
805
- with open("%s/filelist.txt" % exp_dir, "w") as f:
806
- f.write("\n".join(opt))
807
- print("write filelist done")
808
- # 生成config#无需生成config
809
- # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
810
- print("use gpus:", gpus16)
811
- if pretrained_G14 == "":
812
- print("no pretrained Generator")
813
- if pretrained_D15 == "":
814
- print("no pretrained Discriminator")
815
- if gpus16:
816
- cmd = (
817
- config.python_cmd
818
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s"
819
- % (
820
- exp_dir1,
821
- sr2,
822
- 1 if if_f0_3 else 0,
823
- batch_size12,
824
- gpus16,
825
- total_epoch11,
826
- save_epoch10,
827
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
828
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
829
- 1 if if_save_latest13 == True else 0,
830
- 1 if if_cache_gpu17 == True else 0,
831
- 1 if if_save_every_weights18 == True else 0,
832
- version19,
833
- log_interval,
834
- )
835
- )
836
- else:
837
- cmd = (
838
- config.python_cmd
839
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s"
840
- % (
841
- exp_dir1,
842
- sr2,
843
- 1 if if_f0_3 else 0,
844
- batch_size12,
845
- total_epoch11,
846
- save_epoch10,
847
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "\b",
848
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "\b",
849
- 1 if if_save_latest13 == True else 0,
850
- 1 if if_cache_gpu17 == True else 0,
851
- 1 if if_save_every_weights18 == True else 0,
852
- version19,
853
- log_interval,
854
- )
855
- )
856
- print(cmd)
857
- p = Popen(cmd, shell=True, cwd=now_dir)
858
- global PID
859
- PID = p.pid
860
- p.wait()
861
- return ("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"})
862
-
863
-
864
- # but4.click(train_index, [exp_dir1], info3)
865
- def train_index(exp_dir1, version19):
866
- exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
867
- os.makedirs(exp_dir, exist_ok=True)
868
- feature_dir = (
869
- "%s/3_feature256" % (exp_dir)
870
- if version19 == "v1"
871
- else "%s/3_feature768" % (exp_dir)
872
- )
873
- if os.path.exists(feature_dir) == False:
874
- return "请先进行特征提取!"
875
- listdir_res = list(os.listdir(feature_dir))
876
- if len(listdir_res) == 0:
877
- return "请先进行特征提取!"
878
- npys = []
879
- for name in sorted(listdir_res):
880
- phone = np.load("%s/%s" % (feature_dir, name))
881
- npys.append(phone)
882
- big_npy = np.concatenate(npys, 0)
883
- big_npy_idx = np.arange(big_npy.shape[0])
884
- np.random.shuffle(big_npy_idx)
885
- big_npy = big_npy[big_npy_idx]
886
- np.save("%s/total_fea.npy" % exp_dir, big_npy)
887
- # n_ivf = big_npy.shape[0] // 39
888
- n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
889
- infos = []
890
- infos.append("%s,%s" % (big_npy.shape, n_ivf))
891
- yield "\n".join(infos)
892
- index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
893
- # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
894
- infos.append("training")
895
- yield "\n".join(infos)
896
- index_ivf = faiss.extract_index_ivf(index) #
897
- index_ivf.nprobe = 1
898
- index.train(big_npy)
899
- faiss.write_index(
900
- index,
901
- "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
902
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
903
- )
904
- # faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
905
- infos.append("adding")
906
- yield "\n".join(infos)
907
- batch_size_add = 8192
908
- for i in range(0, big_npy.shape[0], batch_size_add):
909
- index.add(big_npy[i : i + batch_size_add])
910
- faiss.write_index(
911
- index,
912
- "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
913
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
914
- )
915
- infos.append(
916
- "成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index"
917
- % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
918
- )
919
- # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
920
- # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
921
- yield "\n".join(infos)
922
-
923
-
924
- # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
925
- def train1key(
926
- exp_dir1,
927
- sr2,
928
- if_f0_3,
929
- trainset_dir4,
930
- spk_id5,
931
- np7,
932
- f0method8,
933
- save_epoch10,
934
- total_epoch11,
935
- batch_size12,
936
- if_save_latest13,
937
- pretrained_G14,
938
- pretrained_D15,
939
- gpus16,
940
- if_cache_gpu17,
941
- if_save_every_weights18,
942
- version19,
943
- echl
944
- ):
945
- infos = []
946
-
947
- def get_info_str(strr):
948
- infos.append(strr)
949
- return "\n".join(infos)
950
-
951
- model_log_dir = "%s/logs/%s" % (now_dir, exp_dir1)
952
- preprocess_log_path = "%s/preprocess.log" % model_log_dir
953
- extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir
954
- gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir
955
- feature_dir = (
956
- "%s/3_feature256" % model_log_dir
957
- if version19 == "v1"
958
- else "%s/3_feature768" % model_log_dir
959
- )
960
-
961
- os.makedirs(model_log_dir, exist_ok=True)
962
- #########step1:处理数据
963
- open(preprocess_log_path, "w").close()
964
- cmd = (
965
- config.python_cmd
966
- + " trainset_preprocess_pipeline_print.py %s %s %s %s "
967
- % (trainset_dir4, sr_dict[sr2], np7, model_log_dir)
968
- + str(config.noparallel)
969
- )
970
- yield get_info_str(i18n("step1:正在处理数据"))
971
- yield get_info_str(cmd)
972
- p = Popen(cmd, shell=True)
973
- p.wait()
974
- with open(preprocess_log_path, "r") as f:
975
- print(f.read())
976
- #########step2a:提取音高
977
- open(extract_f0_feature_log_path, "w")
978
- if if_f0_3:
979
- yield get_info_str("step2a:正在提取音高")
980
- cmd = config.python_cmd + " extract_f0_print.py %s %s %s %s" % (
981
- model_log_dir,
982
- np7,
983
- f0method8,
984
- echl
985
- )
986
- yield get_info_str(cmd)
987
- p = Popen(cmd, shell=True, cwd=now_dir)
988
- p.wait()
989
- with open(extract_f0_feature_log_path, "r") as f:
990
- print(f.read())
991
- else:
992
- yield get_info_str(i18n("step2a:无需提取音高"))
993
- #######step2b:提取特征
994
- yield get_info_str(i18n("step2b:正在提取特征"))
995
- gpus = gpus16.split("-")
996
- leng = len(gpus)
997
- ps = []
998
- for idx, n_g in enumerate(gpus):
999
- cmd = config.python_cmd + " extract_feature_print.py %s %s %s %s %s %s" % (
1000
- config.device,
1001
- leng,
1002
- idx,
1003
- n_g,
1004
- model_log_dir,
1005
- version19,
1006
- )
1007
- yield get_info_str(cmd)
1008
- p = Popen(
1009
- cmd, shell=True, cwd=now_dir
1010
- ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
1011
- ps.append(p)
1012
- for p in ps:
1013
- p.wait()
1014
- with open(extract_f0_feature_log_path, "r") as f:
1015
- print(f.read())
1016
- #######step3a:训练模型
1017
- yield get_info_str(i18n("step3a:正在训练模型"))
1018
- # 生成filelist
1019
- if if_f0_3:
1020
- f0_dir = "%s/2a_f0" % model_log_dir
1021
- f0nsf_dir = "%s/2b-f0nsf" % model_log_dir
1022
- names = (
1023
- set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
1024
- & set([name.split(".")[0] for name in os.listdir(feature_dir)])
1025
- & set([name.split(".")[0] for name in os.listdir(f0_dir)])
1026
- & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
1027
- )
1028
- else:
1029
- names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
1030
- [name.split(".")[0] for name in os.listdir(feature_dir)]
1031
- )
1032
- opt = []
1033
- for name in names:
1034
- if if_f0_3:
1035
- opt.append(
1036
- "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
1037
- % (
1038
- gt_wavs_dir.replace("\\", "\\\\"),
1039
- name,
1040
- feature_dir.replace("\\", "\\\\"),
1041
- name,
1042
- f0_dir.replace("\\", "\\\\"),
1043
- name,
1044
- f0nsf_dir.replace("\\", "\\\\"),
1045
- name,
1046
- spk_id5,
1047
- )
1048
- )
1049
- else:
1050
- opt.append(
1051
- "%s/%s.wav|%s/%s.npy|%s"
1052
- % (
1053
- gt_wavs_dir.replace("\\", "\\\\"),
1054
- name,
1055
- feature_dir.replace("\\", "\\\\"),
1056
- name,
1057
- spk_id5,
1058
- )
1059
- )
1060
- fea_dim = 256 if version19 == "v1" else 768
1061
- if if_f0_3:
1062
- for _ in range(2):
1063
- opt.append(
1064
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
1065
- % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
1066
- )
1067
- else:
1068
- for _ in range(2):
1069
- opt.append(
1070
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
1071
- % (now_dir, sr2, now_dir, fea_dim, spk_id5)
1072
- )
1073
- shuffle(opt)
1074
- with open("%s/filelist.txt" % model_log_dir, "w") as f:
1075
- f.write("\n".join(opt))
1076
- yield get_info_str("write filelist done")
1077
- if gpus16:
1078
- cmd = (
1079
- config.python_cmd
1080
- +" train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1081
- % (
1082
- exp_dir1,
1083
- sr2,
1084
- 1 if if_f0_3 else 0,
1085
- batch_size12,
1086
- gpus16,
1087
- total_epoch11,
1088
- save_epoch10,
1089
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
1090
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
1091
- 1 if if_save_latest13 == True else 0,
1092
- 1 if if_cache_gpu17 == True else 0,
1093
- 1 if if_save_every_weights18 == True else 0,
1094
- version19,
1095
- )
1096
- )
1097
- else:
1098
- cmd = (
1099
- config.python_cmd
1100
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1101
- % (
1102
- exp_dir1,
1103
- sr2,
1104
- 1 if if_f0_3 else 0,
1105
- batch_size12,
1106
- total_epoch11,
1107
- save_epoch10,
1108
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
1109
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
1110
- 1 if if_save_latest13 == True else 0,
1111
- 1 if if_cache_gpu17 == True else 0,
1112
- 1 if if_save_every_weights18 == True else 0,
1113
- version19,
1114
- )
1115
- )
1116
- yield get_info_str(cmd)
1117
- p = Popen(cmd, shell=True, cwd=now_dir)
1118
- p.wait()
1119
- yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"))
1120
- #######step3b:训练索引
1121
- npys = []
1122
- listdir_res = list(os.listdir(feature_dir))
1123
- for name in sorted(listdir_res):
1124
- phone = np.load("%s/%s" % (feature_dir, name))
1125
- npys.append(phone)
1126
- big_npy = np.concatenate(npys, 0)
1127
-
1128
- big_npy_idx = np.arange(big_npy.shape[0])
1129
- np.random.shuffle(big_npy_idx)
1130
- big_npy = big_npy[big_npy_idx]
1131
- np.save("%s/total_fea.npy" % model_log_dir, big_npy)
1132
-
1133
- # n_ivf = big_npy.shape[0] // 39
1134
- n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
1135
- yield get_info_str("%s,%s" % (big_npy.shape, n_ivf))
1136
- index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
1137
- yield get_info_str("training index")
1138
- index_ivf = faiss.extract_index_ivf(index) #
1139
- index_ivf.nprobe = 1
1140
- index.train(big_npy)
1141
- faiss.write_index(
1142
- index,
1143
- "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
1144
- % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
1145
- )
1146
- yield get_info_str("adding index")
1147
- batch_size_add = 8192
1148
- for i in range(0, big_npy.shape[0], batch_size_add):
1149
- index.add(big_npy[i : i + batch_size_add])
1150
- faiss.write_index(
1151
- index,
1152
- "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
1153
- % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
1154
- )
1155
- yield get_info_str(
1156
- "成功构建索引, added_IVF%s_Flat_nprobe_%s_%s_%s.index"
1157
- % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
1158
- )
1159
- yield get_info_str(i18n("全流程结束!"))
1160
-
1161
-
1162
  def whethercrepeornah(radio):
1163
  mango = True if radio == 'mangio-crepe' or radio == 'mangio-crepe-tiny' else False
1164
  return ({"visible": mango, "__type__": "update"})
1165
 
1166
- # ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
1167
- def change_info_(ckpt_path):
1168
- if (
1169
- os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log"))
1170
- == False
1171
- ):
1172
- return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
1173
- try:
1174
- with open(
1175
- ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r"
1176
- ) as f:
1177
- info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
1178
- sr, f0 = info["sample_rate"], info["if_f0"]
1179
- version = "v2" if ("version" in info and info["version"] == "v2") else "v1"
1180
- return sr, str(f0), version
1181
- except:
1182
- traceback.print_exc()
1183
- return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
1184
-
1185
-
1186
- from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
1187
-
1188
-
1189
- def export_onnx(ModelPath, ExportedPath, MoeVS=True):
1190
- cpt = torch.load(ModelPath, map_location="cpu")
1191
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
1192
- hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels,为768Vec做准备
1193
-
1194
- test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
1195
- test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
1196
- test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
1197
- test_pitchf = torch.rand(1, 200) # nsf基频
1198
- test_ds = torch.LongTensor([0]) # 说话人ID
1199
- test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
1200
-
1201
- device = "cpu" # 导出时设备(不影响使用模型)
1202
-
1203
-
1204
- net_g = SynthesizerTrnMsNSFsidM(
1205
- *cpt["config"], is_half=False,version=cpt.get("version","v1")
1206
- ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
1207
- net_g.load_state_dict(cpt["weight"], strict=False)
1208
- input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
1209
- output_names = [
1210
- "audio",
1211
- ]
1212
- # net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出
1213
- torch.onnx.export(
1214
- net_g,
1215
- (
1216
- test_phone.to(device),
1217
- test_phone_lengths.to(device),
1218
- test_pitch.to(device),
1219
- test_pitchf.to(device),
1220
- test_ds.to(device),
1221
- test_rnd.to(device),
1222
- ),
1223
- ExportedPath,
1224
- dynamic_axes={
1225
- "phone": [1],
1226
- "pitch": [1],
1227
- "pitchf": [1],
1228
- "rnd": [2],
1229
- },
1230
- do_constant_folding=False,
1231
- opset_version=16,
1232
- verbose=False,
1233
- input_names=input_names,
1234
- output_names=output_names,
1235
- )
1236
- return "Finished"
1237
-
1238
  #region RVC WebUI App
1239
-
1240
- def get_presets():
1241
- data = None
1242
- with open('../inference-presets.json', 'r') as file:
1243
- data = json.load(file)
1244
- preset_names = []
1245
- for preset in data['presets']:
1246
- preset_names.append(preset['name'])
1247
-
1248
- return preset_names
1249
-
1250
  def change_choices2():
1251
  audio_files=[]
1252
  for filename in os.listdir("./audios"):
1253
  if filename.endswith(('.wav','.mp3','.ogg','.flac','.m4a','.aac','.mp4')):
1254
  audio_files.append(os.path.join('./audios',filename).replace('\\', '/'))
1255
- return {"choices": sorted(audio_files), "__type__": "update"}, {"__type__": "update"}
1256
 
1257
  audio_files=[]
1258
  for filename in os.listdir("./audios"):
@@ -1282,12 +457,6 @@ def get_indexes():
1282
  else:
1283
  return ''
1284
 
1285
- def get_name():
1286
- if len(audio_files) > 0:
1287
- return sorted(audio_files)[0]
1288
- else:
1289
- return ''
1290
-
1291
  def save_to_wav(record_button):
1292
  if record_button is None:
1293
  pass
@@ -1361,136 +530,18 @@ def download_from_url(url, model):
1361
  return "Success."
1362
  except:
1363
  return "There's been an error."
1364
- def success_message(face):
1365
- return f'{face.name} has been uploaded.', 'None'
1366
- def mouth(size, face, voice, faces):
1367
- if size == 'Half':
1368
- size = 2
1369
- else:
1370
- size = 1
1371
- if faces == 'None':
1372
- character = face.name
1373
- else:
1374
- if faces == 'Ben Shapiro':
1375
- character = '/content/wav2lip-HD/inputs/ben-shapiro-10.mp4'
1376
- elif faces == 'Andrew Tate':
1377
- character = '/content/wav2lip-HD/inputs/tate-7.mp4'
1378
- command = "python inference.py " \
1379
- "--checkpoint_path checkpoints/wav2lip.pth " \
1380
- f"--face {character} " \
1381
- f"--audio {voice} " \
1382
- "--pads 0 20 0 0 " \
1383
- "--outfile /content/wav2lip-HD/outputs/result.mp4 " \
1384
- "--fps 24 " \
1385
- f"--resize_factor {size}"
1386
- process = subprocess.Popen(command, shell=True, cwd='/content/wav2lip-HD/Wav2Lip-master')
1387
- stdout, stderr = process.communicate()
1388
- return '/content/wav2lip-HD/outputs/result.mp4', 'Animation completed.'
1389
- eleven_voices = ['Adam','Antoni','Josh','Arnold','Sam','Bella','Rachel','Domi','Elli']
1390
- eleven_voices_ids=['pNInz6obpgDQGcFmaJgB','ErXwobaYiN019PkySvjV','TxGEqnHWrfWFTfGW9XjX','VR6AewLTigWG4xSOukaG','yoZ06aMxZJJ28mfd3POQ','EXAVITQu4vr4xnSDxMaL','21m00Tcm4TlvDq8ikWAM','AZnzlk1XvdvUeBnXmlld','MF3mGyEYCl7XYWbV9V6O']
1391
- chosen_voice = dict(zip(eleven_voices, eleven_voices_ids))
1392
-
1393
- def stoptraining(mim):
1394
- if int(mim) == 1:
1395
- try:
1396
- CSVutil('csvdb/stop.csv', 'w+', 'stop', 'True')
1397
- os.kill(PID, signal.SIGTERM)
1398
- except Exception as e:
1399
- print(f"Couldn't click due to {e}")
1400
- return (
1401
- {"visible": False, "__type__": "update"},
1402
- {"visible": True, "__type__": "update"},
1403
- )
1404
-
1405
-
1406
- def elevenTTS(xiapi, text, id, lang):
1407
- if xiapi!= '' and id !='':
1408
- choice = chosen_voice[id]
1409
- CHUNK_SIZE = 1024
1410
- url = f"https://api.elevenlabs.io/v1/text-to-speech/{choice}"
1411
- headers = {
1412
- "Accept": "audio/mpeg",
1413
- "Content-Type": "application/json",
1414
- "xi-api-key": xiapi
1415
- }
1416
- if lang == 'en':
1417
- data = {
1418
- "text": text,
1419
- "model_id": "eleven_monolingual_v1",
1420
- "voice_settings": {
1421
- "stability": 0.5,
1422
- "similarity_boost": 0.5
1423
- }
1424
- }
1425
- else:
1426
- data = {
1427
- "text": text,
1428
- "model_id": "eleven_multilingual_v1",
1429
- "voice_settings": {
1430
- "stability": 0.5,
1431
- "similarity_boost": 0.5
1432
- }
1433
- }
1434
-
1435
- response = requests.post(url, json=data, headers=headers)
1436
- with open('./temp_eleven.mp3', 'wb') as f:
1437
- for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
1438
- if chunk:
1439
- f.write(chunk)
1440
- aud_path = save_to_wav('./temp_eleven.mp3')
1441
- return aud_path, aud_path
1442
- else:
1443
- tts = gTTS(text, lang=lang)
1444
- tts.save('./temp_gTTS.mp3')
1445
- aud_path = save_to_wav('./temp_gTTS.mp3')
1446
- return aud_path, aud_path
1447
-
1448
- def upload_to_dataset(files, dir):
1449
- if dir == '':
1450
- dir = './dataset'
1451
- if not os.path.exists(dir):
1452
- os.makedirs(dir)
1453
- count = 0
1454
- for file in files:
1455
- path=file.name
1456
- shutil.copy2(path,dir)
1457
- count += 1
1458
- return f' {count} files uploaded to {dir}.'
1459
-
1460
- def zip_downloader(model):
1461
- if not os.path.exists(f'./weights/{model}.pth'):
1462
- return {"__type__": "update"}, f'Make sure the Voice Name is correct. I could not find {model}.pth'
1463
- index_found = False
1464
- for file in os.listdir(f'./logs/{model}'):
1465
- if file.endswith('.index') and 'added' in file:
1466
- log_file = file
1467
- index_found = True
1468
- if index_found:
1469
- return [f'./weights/{model}.pth', f'./logs/{model}/{log_file}'], "Done"
1470
- else:
1471
- return f'./weights/{model}.pth', "Could not find Index file."
1472
 
1473
  with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1474
- with gr.Tabs():
1475
  with gr.TabItem("Inference"):
1476
- gr.HTML("<h1> Magic Vocals </h1>")
1477
-
1478
- # Inference Preset Row
1479
- # with gr.Row():
1480
- # mangio_preset = gr.Dropdown(label="Inference Preset", choices=sorted(get_presets()))
1481
- # mangio_preset_name_save = gr.Textbox(
1482
- # label="Your preset name"
1483
- # )
1484
- # mangio_preset_save_btn = gr.Button('Save Preset', variant="primary")
1485
 
1486
- # Other RVC stuff
1487
  with gr.Row():
1488
  sid0 = gr.Dropdown(label="1.Choose your Model.", choices=sorted(names), value=check_for_name())
1489
  refresh_button = gr.Button("Refresh", variant="primary")
1490
  if check_for_name() != '':
1491
  get_vc(sorted(names)[0])
1492
- vc_transform0 = gr.Number(label="Optional: You can change the pitch here or leave it at 0.", value=0)
1493
- #clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary")
1494
  spk_item = gr.Slider(
1495
  minimum=0,
1496
  maximum=2333,
@@ -1522,163 +573,37 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1522
  dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0])
1523
  dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio0])
1524
  refresh_button2 = gr.Button("Refresh", variant="primary", size='sm')
 
1525
  record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0])
1526
  record_button.change(fn=change_choices2, inputs=[], outputs=[input_audio0])
1527
- with gr.Row():
1528
- with gr.Accordion('Text To Speech', open=False):
1529
- with gr.Column():
1530
- lang = gr.Radio(label='Chinese & Japanese do not work with ElevenLabs currently.',choices=['en','es','fr','pt','zh-CN','de','hi','ja'], value='en')
1531
- api_box = gr.Textbox(label="Enter your API Key for ElevenLabs, or leave empty to use GoogleTTS", value='')
1532
- elevenid=gr.Dropdown(label="Voice:", choices=eleven_voices)
1533
- with gr.Column():
1534
- tfs = gr.Textbox(label="Input your Text", interactive=True, value="This is a test.")
1535
- tts_button = gr.Button(value="Speak")
1536
- tts_button.click(fn=elevenTTS, inputs=[api_box,tfs, elevenid, lang], outputs=[record_button, input_audio0])
1537
- with gr.Row():
1538
- with gr.Accordion('Wav2Lip', open=False):
1539
- with gr.Row():
1540
- size = gr.Radio(label='Resolution:',choices=['Half','Full'])
1541
- face = gr.UploadButton("Upload A Character",type='file')
1542
- faces = gr.Dropdown(label="OR Choose one:", choices=['None','Ben Shapiro','Andrew Tate'])
1543
- with gr.Row():
1544
- preview = gr.Textbox(label="Status:",interactive=False)
1545
- face.upload(fn=success_message,inputs=[face], outputs=[preview, faces])
1546
- with gr.Row():
1547
- animation = gr.Video(type='filepath')
1548
- refresh_button2.click(fn=change_choices2, inputs=[], outputs=[input_audio0, animation])
1549
- with gr.Row():
1550
- animate_button = gr.Button('Animate')
1551
-
1552
  with gr.Column():
1553
- with gr.Accordion("Index Settings", open=False):
1554
- file_index1 = gr.Dropdown(
1555
- label="3. Path to your added.index file (if it didn't automatically find it.)",
1556
- choices=get_indexes(),
1557
- value=get_index(),
1558
- interactive=True,
1559
- )
1560
- sid0.change(fn=match_index, inputs=[sid0],outputs=[file_index1])
1561
- refresh_button.click(
1562
- fn=change_choices, inputs=[], outputs=[sid0, file_index1]
1563
- )
1564
- # file_big_npy1 = gr.Textbox(
1565
- # label=i18n("特征文件路径"),
1566
- # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1567
- # interactive=True,
1568
- # )
1569
- index_rate1 = gr.Slider(
1570
- minimum=0,
1571
- maximum=1,
1572
- label=i18n("检索特征占比"),
1573
- value=0.66,
1574
- interactive=True,
1575
- )
1576
  vc_output2 = gr.Audio(
1577
  label="Output Audio (Click on the Three Dots in the Right Corner to Download)",
1578
  type='filepath',
1579
  interactive=False,
1580
  )
1581
- animate_button.click(fn=mouth, inputs=[size, face, vc_output2, faces], outputs=[animation, preview])
1582
- with gr.Accordion("Advanced Settings", open=False):
1583
- f0method0 = gr.Radio(
1584
- label="Optional: Change the Pitch Extraction Algorithm.\nExtraction methods are sorted from 'worst quality' to 'best quality'.\nmangio-crepe may or may not be better than rmvpe in cases where 'smoothness' is more important, but rmvpe is the best overall.",
1585
- choices=["pm", "dio", "crepe-tiny", "mangio-crepe-tiny", "crepe", "harvest", "mangio-crepe", "rmvpe"], # Fork Feature. Add Crepe-Tiny
1586
- value="rmvpe",
1587
- interactive=True,
1588
- )
1589
-
1590
- crepe_hop_length = gr.Slider(
1591
- minimum=1,
1592
- maximum=512,
1593
- step=1,
1594
- label="Mangio-Crepe Hop Length. Higher numbers will reduce the chance of extreme pitch changes but lower numbers will increase accuracy. 64-192 is a good range to experiment with.",
1595
- value=120,
1596
- interactive=True,
1597
- visible=False,
1598
- )
1599
- f0method0.change(fn=whethercrepeornah, inputs=[f0method0], outputs=[crepe_hop_length])
1600
- filter_radius0 = gr.Slider(
1601
- minimum=0,
1602
- maximum=7,
1603
- label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1604
- value=3,
1605
- step=1,
1606
- interactive=True,
1607
- )
1608
- resample_sr0 = gr.Slider(
1609
- minimum=0,
1610
- maximum=48000,
1611
- label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1612
- value=0,
1613
- step=1,
1614
- interactive=True,
1615
- visible=False
1616
- )
1617
- rms_mix_rate0 = gr.Slider(
1618
- minimum=0,
1619
- maximum=1,
1620
- label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1621
- value=0.21,
1622
- interactive=True,
1623
- )
1624
- protect0 = gr.Slider(
1625
- minimum=0,
1626
- maximum=0.5,
1627
- label=i18n("保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"),
1628
- value=0.33,
1629
- step=0.01,
1630
- interactive=True,
1631
- )
1632
- formanting = gr.Checkbox(
1633
- value=bool(DoFormant),
1634
- label="[EXPERIMENTAL] Formant shift inference audio",
1635
- info="Used for male to female and vice-versa conversions",
1636
- interactive=True,
1637
- visible=True,
1638
- )
1639
-
1640
- formant_preset = gr.Dropdown(
1641
- value='',
1642
- choices=get_fshift_presets(),
1643
- label="browse presets for formanting",
1644
- visible=bool(DoFormant),
1645
- )
1646
- formant_refresh_button = gr.Button(
1647
- value='\U0001f504',
1648
- visible=bool(DoFormant),
1649
- variant='primary',
1650
- )
1651
- #formant_refresh_button = ToolButton( elem_id='1')
1652
- #create_refresh_button(formant_preset, lambda: {"choices": formant_preset}, "refresh_list_shiftpresets")
1653
-
1654
- qfrency = gr.Slider(
1655
- value=Quefrency,
1656
- info="Default value is 1.0",
1657
- label="Quefrency for formant shifting",
1658
- minimum=0.0,
1659
- maximum=16.0,
1660
- step=0.1,
1661
- visible=bool(DoFormant),
1662
- interactive=True,
1663
- )
1664
- tmbre = gr.Slider(
1665
- value=Timbre,
1666
- info="Default value is 1.0",
1667
- label="Timbre for formant shifting",
1668
- minimum=0.0,
1669
- maximum=16.0,
1670
- step=0.1,
1671
- visible=bool(DoFormant),
1672
- interactive=True,
1673
- )
1674
-
1675
- formant_preset.change(fn=preset_apply, inputs=[formant_preset, qfrency, tmbre], outputs=[qfrency, tmbre])
1676
- frmntbut = gr.Button("Apply", variant="primary", visible=bool(DoFormant))
1677
- formanting.change(fn=formant_enabled,inputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button],outputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button])
1678
- frmntbut.click(fn=formant_apply,inputs=[qfrency, tmbre], outputs=[qfrency, tmbre])
1679
- formant_refresh_button.click(fn=update_fshift_presets,inputs=[formant_preset, qfrency, tmbre],outputs=[formant_preset, qfrency, tmbre])
1680
  with gr.Row():
1681
- vc_output1 = gr.Textbox("")
1682
  f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False)
1683
 
1684
  but0.click(
@@ -1688,137 +613,12 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1688
  input_audio0,
1689
  vc_transform0,
1690
  f0_file,
1691
- f0method0,
1692
  file_index1,
1693
- # file_index2,
1694
- # file_big_npy1,
1695
  index_rate1,
1696
- filter_radius0,
1697
- resample_sr0,
1698
- rms_mix_rate0,
1699
- protect0,
1700
- crepe_hop_length
1701
  ],
1702
  [vc_output1, vc_output2],
1703
  )
1704
 
1705
- with gr.Accordion("Batch Conversion",open=False):
1706
- with gr.Row():
1707
- with gr.Column():
1708
- vc_transform1 = gr.Number(
1709
- label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1710
- )
1711
- opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1712
- f0method1 = gr.Radio(
1713
- label=i18n(
1714
- "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU"
1715
- ),
1716
- choices=["pm", "harvest", "crepe", "rmvpe"],
1717
- value="rmvpe",
1718
- interactive=True,
1719
- )
1720
- filter_radius1 = gr.Slider(
1721
- minimum=0,
1722
- maximum=7,
1723
- label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1724
- value=3,
1725
- step=1,
1726
- interactive=True,
1727
- )
1728
- with gr.Column():
1729
- file_index3 = gr.Textbox(
1730
- label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"),
1731
- value="",
1732
- interactive=True,
1733
- )
1734
- file_index4 = gr.Dropdown(
1735
- label=i18n("自动检测index路径,下拉式选择(dropdown)"),
1736
- choices=sorted(index_paths),
1737
- interactive=True,
1738
- )
1739
- refresh_button.click(
1740
- fn=lambda: change_choices()[1],
1741
- inputs=[],
1742
- outputs=file_index4,
1743
- )
1744
- # file_big_npy2 = gr.Textbox(
1745
- # label=i18n("特征文件路径"),
1746
- # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1747
- # interactive=True,
1748
- # )
1749
- index_rate2 = gr.Slider(
1750
- minimum=0,
1751
- maximum=1,
1752
- label=i18n("检索特征占比"),
1753
- value=1,
1754
- interactive=True,
1755
- )
1756
- with gr.Column():
1757
- resample_sr1 = gr.Slider(
1758
- minimum=0,
1759
- maximum=48000,
1760
- label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1761
- value=0,
1762
- step=1,
1763
- interactive=True,
1764
- )
1765
- rms_mix_rate1 = gr.Slider(
1766
- minimum=0,
1767
- maximum=1,
1768
- label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1769
- value=1,
1770
- interactive=True,
1771
- )
1772
- protect1 = gr.Slider(
1773
- minimum=0,
1774
- maximum=0.5,
1775
- label=i18n(
1776
- "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"
1777
- ),
1778
- value=0.33,
1779
- step=0.01,
1780
- interactive=True,
1781
- )
1782
- with gr.Column():
1783
- dir_input = gr.Textbox(
1784
- label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1785
- value="E:\codes\py39\\test-20230416b\\todo-songs",
1786
- )
1787
- inputs = gr.File(
1788
- file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1789
- )
1790
- with gr.Row():
1791
- format1 = gr.Radio(
1792
- label=i18n("导出文件格式"),
1793
- choices=["wav", "flac", "mp3", "m4a"],
1794
- value="flac",
1795
- interactive=True,
1796
- )
1797
- but1 = gr.Button(i18n("转换"), variant="primary")
1798
- vc_output3 = gr.Textbox(label=i18n("输出信息"))
1799
- but1.click(
1800
- vc_multi,
1801
- [
1802
- spk_item,
1803
- dir_input,
1804
- opt_input,
1805
- inputs,
1806
- vc_transform1,
1807
- f0method1,
1808
- file_index3,
1809
- file_index4,
1810
- # file_big_npy2,
1811
- index_rate2,
1812
- filter_radius1,
1813
- resample_sr1,
1814
- rms_mix_rate1,
1815
- protect1,
1816
- format1,
1817
- crepe_hop_length,
1818
- ],
1819
- [vc_output3],
1820
- )
1821
- but1.click(fn=lambda: easy_uploader.clear())
1822
  with gr.TabItem("Download Model"):
1823
  with gr.Row():
1824
  url=gr.Textbox(label="Enter the URL to the Model:")
@@ -1831,256 +631,12 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1831
  with gr.Row():
1832
  gr.Markdown(
1833
  """
 
 
 
1834
  Made with ❤️ by [Alice Oliveira](https://github.com/aliceoq) | Hosted with ❤️ by [Mateus Elias](https://github.com/mateuseap)
1835
  """
1836
  )
1837
 
1838
- def has_two_files_in_pretrained_folder():
1839
- pretrained_folder = "./pretrained/"
1840
- if not os.path.exists(pretrained_folder):
1841
- return False
1842
-
1843
- files_in_folder = os.listdir(pretrained_folder)
1844
- num_files = len(files_in_folder)
1845
- return num_files >= 2
1846
-
1847
- if has_two_files_in_pretrained_folder():
1848
- print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------")
1849
- with gr.TabItem("Train", visible=False):
1850
- with gr.Row():
1851
- with gr.Column():
1852
- exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice")
1853
- sr2 = gr.Radio(
1854
- label=i18n("目标采样率"),
1855
- choices=["40k", "48k"],
1856
- value="40k",
1857
- interactive=True,
1858
- visible=False
1859
- )
1860
- if_f0_3 = gr.Radio(
1861
- label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1862
- choices=[True, False],
1863
- value=True,
1864
- interactive=True,
1865
- visible=False
1866
- )
1867
- version19 = gr.Radio(
1868
- label="RVC version",
1869
- choices=["v1", "v2"],
1870
- value="v2",
1871
- interactive=True,
1872
- visible=False,
1873
- )
1874
- np7 = gr.Slider(
1875
- minimum=0,
1876
- maximum=config.n_cpu,
1877
- step=1,
1878
- label="# of CPUs for data processing (Leave as it is)",
1879
- value=config.n_cpu,
1880
- interactive=True,
1881
- visible=True
1882
- )
1883
- trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset")
1884
- easy_uploader = gr.Files(label='OR Drop your audios here. They will be uploaded in your dataset path above.',file_types=['audio'])
1885
- but1 = gr.Button("1. Process The Dataset", variant="primary")
1886
- info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="")
1887
- easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4], outputs=[info1])
1888
- but1.click(
1889
- preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
1890
- )
1891
- with gr.Column():
1892
- spk_id5 = gr.Slider(
1893
- minimum=0,
1894
- maximum=4,
1895
- step=1,
1896
- label=i18n("请指定说话人id"),
1897
- value=0,
1898
- interactive=True,
1899
- visible=False
1900
- )
1901
- with gr.Accordion('GPU Settings', open=False, visible=False):
1902
- gpus6 = gr.Textbox(
1903
- label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1904
- value=gpus,
1905
- interactive=True,
1906
- visible=False
1907
- )
1908
- gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1909
- f0method8 = gr.Radio(
1910
- label=i18n(
1911
- "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1912
- ),
1913
- choices=["harvest","crepe", "mangio-crepe", "rmvpe"], # Fork feature: Crepe on f0 extraction for training.
1914
- value="rmvpe",
1915
- interactive=True,
1916
- )
1917
-
1918
- extraction_crepe_hop_length = gr.Slider(
1919
- minimum=1,
1920
- maximum=512,
1921
- step=1,
1922
- label=i18n("crepe_hop_length"),
1923
- value=128,
1924
- interactive=True,
1925
- visible=False,
1926
- )
1927
- f0method8.change(fn=whethercrepeornah, inputs=[f0method8], outputs=[extraction_crepe_hop_length])
1928
- but2 = gr.Button("2. Pitch Extraction", variant="primary")
1929
- info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=8)
1930
- but2.click(
1931
- extract_f0_feature,
1932
- [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
1933
- [info2],
1934
- )
1935
- with gr.Row():
1936
- with gr.Column():
1937
- total_epoch11 = gr.Slider(
1938
- minimum=1,
1939
- maximum=5000,
1940
- step=10,
1941
- label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):",
1942
- value=250,
1943
- interactive=True,
1944
- )
1945
- butstop = gr.Button(
1946
- "Stop Training",
1947
- variant='primary',
1948
- visible=False,
1949
- )
1950
- but3 = gr.Button("3. Train Model", variant="primary", visible=True)
1951
-
1952
- but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop])
1953
- butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[butstop, but3])
1954
-
1955
-
1956
- but4 = gr.Button("4.Train Index", variant="primary")
1957
- info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=10)
1958
- with gr.Accordion("Training Preferences (You can leave these as they are)", open=False):
1959
- #gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
1960
- with gr.Column():
1961
- save_epoch10 = gr.Slider(
1962
- minimum=1,
1963
- maximum=200,
1964
- step=1,
1965
- label="Backup every X amount of epochs:",
1966
- value=10,
1967
- interactive=True,
1968
- )
1969
- batch_size12 = gr.Slider(
1970
- minimum=1,
1971
- maximum=40,
1972
- step=1,
1973
- label="Batch Size (LEAVE IT unless you know what you're doing!):",
1974
- value=default_batch_size,
1975
- interactive=True,
1976
- )
1977
- if_save_latest13 = gr.Checkbox(
1978
- label="Save only the latest '.ckpt' file to save disk space.",
1979
- value=True,
1980
- interactive=True,
1981
- )
1982
- if_cache_gpu17 = gr.Checkbox(
1983
- label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.",
1984
- value=False,
1985
- interactive=True,
1986
- )
1987
- if_save_every_weights18 = gr.Checkbox(
1988
- label="Save a small final model to the 'weights' folder at each save point.",
1989
- value=True,
1990
- interactive=True,
1991
- )
1992
- zip_model = gr.Button('5. Download Model')
1993
- zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:')
1994
- zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3])
1995
- with gr.Group():
1996
- with gr.Accordion("Base Model Locations:", open=False, visible=False):
1997
- pretrained_G14 = gr.Textbox(
1998
- label=i18n("加载预训练底模G路径"),
1999
- value="pretrained_v2/f0G40k.pth",
2000
- interactive=True,
2001
- )
2002
- pretrained_D15 = gr.Textbox(
2003
- label=i18n("加载预训练底模D路径"),
2004
- value="pretrained_v2/f0D40k.pth",
2005
- interactive=True,
2006
- )
2007
- gpus16 = gr.Textbox(
2008
- label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
2009
- value=gpus,
2010
- interactive=True,
2011
- )
2012
- sr2.change(
2013
- change_sr2,
2014
- [sr2, if_f0_3, version19],
2015
- [pretrained_G14, pretrained_D15, version19],
2016
- )
2017
- version19.change(
2018
- change_version19,
2019
- [sr2, if_f0_3, version19],
2020
- [pretrained_G14, pretrained_D15],
2021
- )
2022
- if_f0_3.change(
2023
- change_f0,
2024
- [if_f0_3, sr2, version19],
2025
- [f0method8, pretrained_G14, pretrained_D15],
2026
- )
2027
- but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False)
2028
- but3.click(
2029
- click_train,
2030
- [
2031
- exp_dir1,
2032
- sr2,
2033
- if_f0_3,
2034
- spk_id5,
2035
- save_epoch10,
2036
- total_epoch11,
2037
- batch_size12,
2038
- if_save_latest13,
2039
- pretrained_G14,
2040
- pretrained_D15,
2041
- gpus16,
2042
- if_cache_gpu17,
2043
- if_save_every_weights18,
2044
- version19,
2045
- ],
2046
- [
2047
- info3,
2048
- butstop,
2049
- but3,
2050
- ],
2051
- )
2052
- but4.click(train_index, [exp_dir1, version19], info3)
2053
- but5.click(
2054
- train1key,
2055
- [
2056
- exp_dir1,
2057
- sr2,
2058
- if_f0_3,
2059
- trainset_dir4,
2060
- spk_id5,
2061
- np7,
2062
- f0method8,
2063
- save_epoch10,
2064
- total_epoch11,
2065
- batch_size12,
2066
- if_save_latest13,
2067
- pretrained_G14,
2068
- pretrained_D15,
2069
- gpus16,
2070
- if_cache_gpu17,
2071
- if_save_every_weights18,
2072
- version19,
2073
- extraction_crepe_hop_length
2074
- ],
2075
- info3,
2076
- )
2077
-
2078
- else:
2079
- print(
2080
- "Pretrained weights not downloaded. Disabling training tab.\n"
2081
- "Wondering how to train a voice? Visit here for the RVC model training guide: https://t.ly/RVC_Training_Guide\n"
2082
- "-------------------------------\n"
2083
- )
2084
-
2085
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
2086
  #endregion
 
4
  import threading
5
  from time import sleep
6
  from subprocess import Popen
7
+ import datetime, requests
 
 
 
8
  now_dir = os.getcwd()
9
  sys.path.append(now_dir)
10
  tmp = os.path.join(now_dir, "TEMP")
 
18
  torch.manual_seed(114514)
19
  from i18n import I18nAuto
20
 
 
 
 
 
21
  from utils import load_audio, CSVutil
22
 
23
+ DoFormant = False
24
+ Quefrency = 1.0
25
+ Timbre = 1.0
26
+
27
+ f0_method = 'rmvpe'
28
+ crepe_hop_length = 120
29
+ filter_radius = 3
30
+ resample_sr = 1
31
+ rms_mix_rate = 0.21
32
+ protect = 0.33
33
 
34
+ # essa parte excluir dps
35
  if not os.path.isdir('csvdb/'):
36
  os.makedirs('csvdb')
37
  frmnt, stp = open("csvdb/formanting.csv", 'w'), open("csvdb/stop.csv", 'w')
 
58
  print("Downloaded hubert base model file successfully. File saved to ./hubert_base.pt.")
59
  else:
60
  raise Exception("Failed to download hubert base model file. Status code: " + str(response.status_code) + ".")
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  download_models()
63
 
64
  print("\n-------------------------------\nRVC v2 Easy GUI (Local Edition)\n-------------------------------\n")
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  i18n = I18nAuto()
 
 
67
  ngpu = torch.cuda.device_count()
68
  gpu_infos = []
69
  mem = []
 
156
  if name.endswith(".index") and "trained" not in name:
157
  index_paths.append("%s/%s" % (root, name))
158
 
 
 
159
  def vc_single(
160
  sid,
161
  input_audio_path,
162
  f0_up_key,
163
  f0_file,
 
164
  file_index,
 
 
165
  index_rate,
 
 
 
 
 
166
  ): # spk_item, input_audio0, vc_transform0,f0_file,f0method0
167
  global tgt_sr, net_g, vc, hubert_model, version
168
  if input_audio_path is None:
 
200
  f0_up_key,
201
  f0_method,
202
  file_index,
 
203
  index_rate,
204
  if_f0,
205
  filter_radius,
 
229
  print(info)
230
  return info, (None, None)
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  def get_vc(sid):
233
  global n_spk, tgt_sr, net_g, vc, cpt, version
234
  if sid == "" or sid == []:
 
289
  n_spk = cpt["config"][-3]
290
  return {"visible": False, "maximum": n_spk, "__type__": "update"}
291
 
 
292
  def change_choices():
293
  names = []
294
  for name in os.listdir(weight_root):
 
299
  for name in files:
300
  if name.endswith(".index") and "trained" not in name:
301
  index_paths.append("%s/%s" % (root, name))
302
+ return {"choices": sorted(names), "__type__": "update"}
 
 
 
 
303
 
304
  def clean():
305
  return {"value": "", "__type__": "update"}
306
 
 
307
  sr_dict = {
308
  "32k": 32000,
309
  "40k": 40000,
310
  "48k": 48000,
311
  }
312
 
 
313
  def if_done(done, p):
314
  while 1:
315
  if p.poll() == None:
 
318
  break
319
  done[0] = True
320
 
 
321
  def if_done_multi(done, ps):
322
  while 1:
323
  # poll==None代表进程未结束
 
332
  break
333
  done[0] = True
334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
  def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl):
336
  gpus = gpus.split("-")
337
  os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
 
417
  print(log)
418
  yield log
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
  def whethercrepeornah(radio):
421
  mango = True if radio == 'mangio-crepe' or radio == 'mangio-crepe-tiny' else False
422
  return ({"visible": mango, "__type__": "update"})
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
  #region RVC WebUI App
 
 
 
 
 
 
 
 
 
 
 
425
  def change_choices2():
426
  audio_files=[]
427
  for filename in os.listdir("./audios"):
428
  if filename.endswith(('.wav','.mp3','.ogg','.flac','.m4a','.aac','.mp4')):
429
  audio_files.append(os.path.join('./audios',filename).replace('\\', '/'))
430
+ return {"choices": sorted(audio_files), "__type__": "update"}
431
 
432
  audio_files=[]
433
  for filename in os.listdir("./audios"):
 
457
  else:
458
  return ''
459
 
 
 
 
 
 
 
460
  def save_to_wav(record_button):
461
  if record_button is None:
462
  pass
 
530
  return "Success."
531
  except:
532
  return "There's been an error."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
534
  with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
535
+ with gr.Tabs():
536
  with gr.TabItem("Inference"):
537
+ gr.HTML("<h1> Vozes da Loirinha 👱🏻‍♀️ </h1>")
 
 
 
 
 
 
 
 
538
 
 
539
  with gr.Row():
540
  sid0 = gr.Dropdown(label="1.Choose your Model.", choices=sorted(names), value=check_for_name())
541
  refresh_button = gr.Button("Refresh", variant="primary")
542
  if check_for_name() != '':
543
  get_vc(sorted(names)[0])
544
+ vc_transform0 = gr.Number(label="Optional: You can change the pitch here or leave it at 0.", value=0, visible=False)
 
545
  spk_item = gr.Slider(
546
  minimum=0,
547
  maximum=2333,
 
573
  dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0])
574
  dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio0])
575
  refresh_button2 = gr.Button("Refresh", variant="primary", size='sm')
576
+ refresh_button2.click(fn=change_choices2, inputs=[], outputs=[input_audio0])
577
  record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0])
578
  record_button.change(fn=change_choices2, inputs=[], outputs=[input_audio0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579
  with gr.Column():
580
+ #antigo index
581
+ file_index1 = gr.Dropdown(
582
+ label="3. Path to your added.index file (if it didn't automatically find it.)",
583
+ choices=get_indexes(),
584
+ value=get_index(),
585
+ interactive=True,
586
+ visible=False,
587
+ )
588
+ sid0.change(fn=match_index, inputs=[sid0],outputs=[file_index1])
589
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[sid0])
590
+ index_rate1 = gr.Slider(
591
+ minimum=0,
592
+ maximum=1,
593
+ label=i18n("检索特征占比"),
594
+ value=0.66,
595
+ interactive=True,
596
+ visible=False,
597
+ )
598
+ ###---
 
 
 
 
599
  vc_output2 = gr.Audio(
600
  label="Output Audio (Click on the Three Dots in the Right Corner to Download)",
601
  type='filepath',
602
  interactive=False,
603
  )
604
+ vc_output1 = gr.Textbox("")
605
+ ###-----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
  with gr.Row():
 
607
  f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False)
608
 
609
  but0.click(
 
613
  input_audio0,
614
  vc_transform0,
615
  f0_file,
 
616
  file_index1,
 
 
617
  index_rate1,
 
 
 
 
 
618
  ],
619
  [vc_output1, vc_output2],
620
  )
621
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622
  with gr.TabItem("Download Model"):
623
  with gr.Row():
624
  url=gr.Textbox(label="Enter the URL to the Model:")
 
631
  with gr.Row():
632
  gr.Markdown(
633
  """
634
+ Original RVC: https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
635
+ Mangio's RVC Fork: https://github.com/Mangio621/Mangio-RVC-Fork
636
+ If you like the EasyGUI, help me keep it.❤️ https://paypal.me/lesantillan
637
  Made with ❤️ by [Alice Oliveira](https://github.com/aliceoq) | Hosted with ❤️ by [Mateus Elias](https://github.com/mateuseap)
638
  """
639
  )
640
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
642
  #endregion