Kevin676 commited on
Commit
0e3ff13
·
1 Parent(s): 371b8e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -236,20 +236,21 @@ def voice_conversion(apikey, ta, audio, choice1):
236
  # print("Reference Audio after decoder:")
237
  # IPython.display.display(Audio(ref_wav_voc, rate=ap.sample_rate))
238
 
239
- voicefixer.restore(input=ref_wav_voc, # input wav file path
240
- output="audio1.wav", # output wav file path
241
- cuda=True, # whether to use gpu acceleration
242
- mode = 0) # You can try out mode 0, 1, or 2 to find out the best result
243
 
244
- noisy = enhance_model.load_audio(
245
- "audio1.wav"
246
- ).unsqueeze(0)
247
 
248
- enhanced = enhance_model.enhance_batch(noisy, lengths=torch.tensor([1.]))
249
- torchaudio.save("enhanced.wav", enhanced.cpu(), 16000)
250
-
251
- return [result.text, chat_response, "enhanced.wav"]
252
 
 
 
 
253
  c1=gr.Interface(
254
  fn=voice_conversion,
255
  inputs=[
@@ -259,7 +260,8 @@ c1=gr.Interface(
259
  gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"),
260
  ],
261
  outputs=[
262
- gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"),
 
263
  ],
264
  #theme="huggingface",
265
  description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
@@ -274,7 +276,8 @@ c2=gr.Interface(
274
  gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"),
275
  ],
276
  outputs=[
277
- gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"),
 
278
  ],
279
  #theme="huggingface",
280
  description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
 
236
  # print("Reference Audio after decoder:")
237
  # IPython.display.display(Audio(ref_wav_voc, rate=ap.sample_rate))
238
 
239
+ # voicefixer.restore(input=ref_wav_voc, # input wav file path
240
+ # output="audio1.wav", # output wav file path
241
+ # cuda=True, # whether to use gpu acceleration
242
+ # mode = 0) # You can try out mode 0, 1, or 2 to find out the best result
243
 
244
+ # noisy = enhance_model.load_audio(
245
+ # "audio1.wav"
246
+ # ).unsqueeze(0)
247
 
248
+ # enhanced = enhance_model.enhance_batch(noisy, lengths=torch.tensor([1.]))
249
+ # torchaudio.save("enhanced.wav", enhanced.cpu(), 16000)
 
 
250
 
251
+ # return [result.text, chat_response, "enhanced.wav"]
252
+ return (ap.sample_rate, ref_wav_voc)
253
+
254
  c1=gr.Interface(
255
  fn=voice_conversion,
256
  inputs=[
 
260
  gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"),
261
  ],
262
  outputs=[
263
+ # gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"),
264
+ gr.Audio(label="Audio with Custom Voice"),
265
  ],
266
  #theme="huggingface",
267
  description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
 
276
  gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"),
277
  ],
278
  outputs=[
279
+ # gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"),
280
+ gr.Audio(label="Audio with Custom Voice"),
281
  ],
282
  #theme="huggingface",
283
  description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",