sayashi commited on
Commit
f32d431
1 Parent(s): 353fbac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -13,17 +13,6 @@ import logging
13
  logging.getLogger('numba').setLevel(logging.WARNING)
14
  limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
15
 
16
- hps_ms = utils.get_hparams_from_file(r'./model/config.json')
17
- net_g_ms = SynthesizerTrn(
18
- len(hps_ms.symbols),
19
- hps_ms.data.filter_length // 2 + 1,
20
- hps_ms.train.segment_size // hps_ms.data.hop_length,
21
- n_speakers=hps_ms.data.n_speakers,
22
- **hps_ms.model)
23
- _ = net_g_ms.eval().to(device)
24
- speakers = hps_ms.speakers
25
- model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None)
26
-
27
  def get_text(text, hps):
28
  text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
29
  if hps.data.add_blank:
@@ -98,6 +87,18 @@ if __name__ == '__main__':
98
  parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
99
  args = parser.parse_args()
100
  device = torch.device(args.device)
 
 
 
 
 
 
 
 
 
 
 
 
101
  with gr.Blocks() as app:
102
  gr.Markdown(
103
  "# <center> VITS语音在线合成demo\n"
 
13
  logging.getLogger('numba').setLevel(logging.WARNING)
14
  limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
15
 
 
 
 
 
 
 
 
 
 
 
 
16
  def get_text(text, hps):
17
  text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
18
  if hps.data.add_blank:
 
87
  parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
88
  args = parser.parse_args()
89
  device = torch.device(args.device)
90
+
91
+ hps_ms = utils.get_hparams_from_file(r'./model/config.json')
92
+ net_g_ms = SynthesizerTrn(
93
+ len(hps_ms.symbols),
94
+ hps_ms.data.filter_length // 2 + 1,
95
+ hps_ms.train.segment_size // hps_ms.data.hop_length,
96
+ n_speakers=hps_ms.data.n_speakers,
97
+ **hps_ms.model)
98
+ _ = net_g_ms.eval().to(device)
99
+ speakers = hps_ms.speakers
100
+ model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None)
101
+
102
  with gr.Blocks() as app:
103
  gr.Markdown(
104
  "# <center> VITS语音在线合成demo\n"