Spaces:
Running
Running
matdmiller
commited on
Commit
·
0a2958b
1
Parent(s):
e419f45
added gradio and openai api threading
Browse files- app.ipynb +101 -4
- app.py +77 -10
- requirements.txt +1 -0
app.ipynb
CHANGED
@@ -67,7 +67,16 @@
|
|
67 |
"import gradio as gr\n",
|
68 |
"import openai\n",
|
69 |
"from pydub import AudioSegment\n",
|
70 |
-
"import io"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
]
|
72 |
},
|
73 |
{
|
@@ -96,6 +105,29 @@
|
|
96 |
"tts_voices = ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']"
|
97 |
]
|
98 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
{
|
100 |
"cell_type": "code",
|
101 |
"execution_count": null,
|
@@ -175,6 +207,67 @@
|
|
175 |
" return combined_mp3.getvalue()"
|
176 |
]
|
177 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
{
|
179 |
"cell_type": "code",
|
180 |
"execution_count": null,
|
@@ -264,7 +357,8 @@
|
|
264 |
"#| export\n",
|
265 |
"with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS') as app:\n",
|
266 |
" gr.Markdown(\"# OpenAI TTS\")\n",
|
267 |
-
" gr.Markdown(\"Start typing below and then click **Go** to create the speech from your text. The current limit is 4,000 characters
|
|
|
268 |
" with gr.Row():\n",
|
269 |
" input_text = gr.Textbox(max_lines=100, label=\"Enter text here\")\n",
|
270 |
" with gr.Row():\n",
|
@@ -277,7 +371,7 @@
|
|
277 |
" input_text.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)\n",
|
278 |
" tts_model_dropdown.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)\n",
|
279 |
" go_btn = gr.Button(\"Go\")\n",
|
280 |
-
" go_btn.click(fn=
|
281 |
" clear_btn = gr.Button('Clear')\n",
|
282 |
" clear_btn.click(fn=lambda: '', outputs=input_text)\n",
|
283 |
" "
|
@@ -292,7 +386,8 @@
|
|
292 |
"source": [
|
293 |
"#| export\n",
|
294 |
"launch_kwargs = {'auth':('username',GRADIO_PASSWORD),\n",
|
295 |
-
" 'auth_message':'Please log in to Mat\\'s TTS App with username: username and password.'}"
|
|
|
296 |
]
|
297 |
},
|
298 |
{
|
@@ -304,6 +399,7 @@
|
|
304 |
"source": [
|
305 |
"#| hide\n",
|
306 |
"#Notebook launch\n",
|
|
|
307 |
"app.launch(**launch_kwargs)"
|
308 |
]
|
309 |
},
|
@@ -317,6 +413,7 @@
|
|
317 |
"#| export\n",
|
318 |
"#.py launch\n",
|
319 |
"if __name__ == \"__main__\":\n",
|
|
|
320 |
" app.launch(**launch_kwargs)"
|
321 |
]
|
322 |
},
|
|
|
67 |
"import gradio as gr\n",
|
68 |
"import openai\n",
|
69 |
"from pydub import AudioSegment\n",
|
70 |
+
"import io\n",
|
71 |
+
"from datetime import datetime\n",
|
72 |
+
"from math import ceil\n",
|
73 |
+
"from multiprocessing.pool import ThreadPool\n",
|
74 |
+
"from functools import partial\n",
|
75 |
+
"from tenacity import (\n",
|
76 |
+
" retry,\n",
|
77 |
+
" stop_after_attempt,\n",
|
78 |
+
" wait_random_exponential,\n",
|
79 |
+
") # for exponential backoff"
|
80 |
]
|
81 |
},
|
82 |
{
|
|
|
105 |
"tts_voices = ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']"
|
106 |
]
|
107 |
},
|
108 |
+
{
|
109 |
+
"cell_type": "code",
|
110 |
+
"execution_count": null,
|
111 |
+
"id": "8eb7e7d5-7121-4762-b8d1-e5a9539e2b36",
|
112 |
+
"metadata": {},
|
113 |
+
"outputs": [],
|
114 |
+
"source": [
|
115 |
+
"#| export\n",
|
116 |
+
"clean_text_prompt = \"\"\"Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early.\"\"\"\n"
|
117 |
+
]
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"cell_type": "code",
|
121 |
+
"execution_count": null,
|
122 |
+
"id": "52d373be-3a79-412e-8ca2-92bb443fa52d",
|
123 |
+
"metadata": {},
|
124 |
+
"outputs": [],
|
125 |
+
"source": [
|
126 |
+
"#| export\n",
|
127 |
+
"#Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.\n",
|
128 |
+
"OPENAI_CLIENT_TTS_THREADS = 10 "
|
129 |
+
]
|
130 |
+
},
|
131 |
{
|
132 |
"cell_type": "code",
|
133 |
"execution_count": null,
|
|
|
207 |
" return combined_mp3.getvalue()"
|
208 |
]
|
209 |
},
|
210 |
+
{
|
211 |
+
"cell_type": "code",
|
212 |
+
"execution_count": null,
|
213 |
+
"id": "4691703d-ed0f-4481-8006-b2906289b780",
|
214 |
+
"metadata": {},
|
215 |
+
"outputs": [],
|
216 |
+
"source": [
|
217 |
+
"#| export\n",
|
218 |
+
"def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):\n",
|
219 |
+
" client = openai.OpenAI()\n",
|
220 |
+
" \n",
|
221 |
+
" @retry(wait=wait_random_exponential(min=1, max=180), stop=stop_after_attempt(6))\n",
|
222 |
+
" def _create_speech_with_backoff(**kwargs):\n",
|
223 |
+
" return client.audio.speech.create(**kwargs)\n",
|
224 |
+
" \n",
|
225 |
+
" response = _create_speech_with_backoff(input=input, model=model, voice=voice, speed=speed, **kwargs)\n",
|
226 |
+
" client.close()\n",
|
227 |
+
" return chunk_idx, response.content"
|
228 |
+
]
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"cell_type": "code",
|
232 |
+
"execution_count": null,
|
233 |
+
"id": "e34bb4aa-698c-4452-8cda-bd02b38f7122",
|
234 |
+
"metadata": {},
|
235 |
+
"outputs": [],
|
236 |
+
"source": [
|
237 |
+
"#| export\n",
|
238 |
+
"def create_speech2(input_text, model='tts-1', voice='alloy', progress=gr.Progress(), **kwargs):\n",
|
239 |
+
" start = datetime.now()\n",
|
240 |
+
" # Split the input text into chunks\n",
|
241 |
+
" chunks = split_text(input_text)\n",
|
242 |
+
"\n",
|
243 |
+
" # Initialize the progress bar\n",
|
244 |
+
" progress(0, desc=f\"Started processing {len(chunks)} text chunks using {OPENAI_CLIENT_TTS_THREADS} threads. ETA is ~{ceil(len(chunks)/OPENAI_CLIENT_TTS_THREADS)} min.\")\n",
|
245 |
+
"\n",
|
246 |
+
" # Initialize a list to hold the audio data of each chunk\n",
|
247 |
+
" audio_data = []\n",
|
248 |
+
"\n",
|
249 |
+
" # Process each chunk\n",
|
250 |
+
" with ThreadPool(processes=OPENAI_CLIENT_TTS_THREADS) as pool:\n",
|
251 |
+
" results = pool.starmap(\n",
|
252 |
+
" partial(create_speech_openai, model=model, voice=voice, **kwargs), \n",
|
253 |
+
" zip(range(len(chunks)),chunks)\n",
|
254 |
+
" )\n",
|
255 |
+
" audio_data = [o[1] for o in sorted(results)]\n",
|
256 |
+
"\n",
|
257 |
+
" # Progress\n",
|
258 |
+
" progress(.9, desc=f\"Merging audio chunks... {(datetime.now()-start).seconds} seconds to process.\")\n",
|
259 |
+
" \n",
|
260 |
+
" # Concatenate the audio data from all chunks\n",
|
261 |
+
" combined_audio = concatenate_mp3(audio_data)\n",
|
262 |
+
"\n",
|
263 |
+
" # Final update to the progress bar\n",
|
264 |
+
" progress(1, desc=f\"Processing completed... {(datetime.now()-start).seconds} seconds to process.\")\n",
|
265 |
+
" \n",
|
266 |
+
" print(f\"Processing time: {(datetime.now()-start).seconds} seconds.\")\n",
|
267 |
+
"\n",
|
268 |
+
" return combined_audio\n"
|
269 |
+
]
|
270 |
+
},
|
271 |
{
|
272 |
"cell_type": "code",
|
273 |
"execution_count": null,
|
|
|
357 |
"#| export\n",
|
358 |
"with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS') as app:\n",
|
359 |
" gr.Markdown(\"# OpenAI TTS\")\n",
|
360 |
+
" gr.Markdown(\"\"\"Start typing below and then click **Go** to create the speech from your text. The current limit is 4,000 characters. \n",
|
361 |
+
"For requests longer than 4,000 chars they will be broken into chunks of 4,000 or less chars automatically.\"\"\")\n",
|
362 |
" with gr.Row():\n",
|
363 |
" input_text = gr.Textbox(max_lines=100, label=\"Enter text here\")\n",
|
364 |
" with gr.Row():\n",
|
|
|
371 |
" input_text.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)\n",
|
372 |
" tts_model_dropdown.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)\n",
|
373 |
" go_btn = gr.Button(\"Go\")\n",
|
374 |
+
" go_btn.click(fn=create_speech2, inputs=[input_text, tts_model_dropdown, tts_voice_dropdown], outputs=[output_audio])\n",
|
375 |
" clear_btn = gr.Button('Clear')\n",
|
376 |
" clear_btn.click(fn=lambda: '', outputs=input_text)\n",
|
377 |
" "
|
|
|
386 |
"source": [
|
387 |
"#| export\n",
|
388 |
"launch_kwargs = {'auth':('username',GRADIO_PASSWORD),\n",
|
389 |
+
" 'auth_message':'Please log in to Mat\\'s TTS App with username: username and password.'}\n",
|
390 |
+
"queue_kwargs = {'default_concurrency_limit':10}"
|
391 |
]
|
392 |
},
|
393 |
{
|
|
|
399 |
"source": [
|
400 |
"#| hide\n",
|
401 |
"#Notebook launch\n",
|
402 |
+
"app.queue(**queue_kwargs)\n",
|
403 |
"app.launch(**launch_kwargs)"
|
404 |
]
|
405 |
},
|
|
|
413 |
"#| export\n",
|
414 |
"#.py launch\n",
|
415 |
"if __name__ == \"__main__\":\n",
|
416 |
+
" app.queue(**queue_kwargs)\n",
|
417 |
" app.launch(**launch_kwargs)"
|
418 |
]
|
419 |
},
|
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
|
2 |
|
3 |
# %% auto 0
|
4 |
-
__all__ = ['secret_import_failed', 'tts_voices', '
|
|
|
5 |
'get_input_text_len', 'get_generation_cost']
|
6 |
|
7 |
# %% app.ipynb 1
|
@@ -33,6 +34,15 @@ import gradio as gr
|
|
33 |
import openai
|
34 |
from pydub import AudioSegment
|
35 |
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
# %% app.ipynb 4
|
38 |
try:
|
@@ -45,6 +55,14 @@ except:
|
|
45 |
tts_voices = ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']
|
46 |
|
47 |
# %% app.ipynb 6
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
def split_text(input_text, max_length=4000, lookback=1000):
|
49 |
# If the text is shorter than the max_length, return it as is
|
50 |
if len(input_text) <= max_length:
|
@@ -77,7 +95,7 @@ def split_text(input_text, max_length=4000, lookback=1000):
|
|
77 |
|
78 |
return chunks
|
79 |
|
80 |
-
# %% app.ipynb
|
81 |
def concatenate_mp3(mp3_files):
|
82 |
if len(mp3_files) == 1:
|
83 |
return mp3_files[0]
|
@@ -107,7 +125,53 @@ def concatenate_mp3(mp3_files):
|
|
107 |
|
108 |
return combined_mp3.getvalue()
|
109 |
|
110 |
-
# %% app.ipynb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def create_speech(input_text, model='tts-1', voice='alloy', progress=gr.Progress()):
|
112 |
# Split the input text into chunks
|
113 |
chunks = split_text(input_text)
|
@@ -150,11 +214,11 @@ def create_speech(input_text, model='tts-1', voice='alloy', progress=gr.Progress
|
|
150 |
return combined_audio
|
151 |
|
152 |
|
153 |
-
# %% app.ipynb
|
154 |
def get_input_text_len(input_text):
|
155 |
return len(input_text)
|
156 |
|
157 |
-
# %% app.ipynb
|
158 |
def get_generation_cost(input_text, tts_model_dropdown):
|
159 |
text_len = len(input_text)
|
160 |
if tts_model_dropdown.endswith('-hd'):
|
@@ -163,10 +227,11 @@ def get_generation_cost(input_text, tts_model_dropdown):
|
|
163 |
cost = text_len/1000 * 0.015
|
164 |
return "${:,.3f}".format(cost)
|
165 |
|
166 |
-
# %% app.ipynb
|
167 |
with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS') as app:
|
168 |
gr.Markdown("# OpenAI TTS")
|
169 |
-
gr.Markdown("Start typing below and then click **Go** to create the speech from your text. The current limit is 4,000 characters.
|
|
|
170 |
with gr.Row():
|
171 |
input_text = gr.Textbox(max_lines=100, label="Enter text here")
|
172 |
with gr.Row():
|
@@ -179,16 +244,18 @@ with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS') as app:
|
|
179 |
input_text.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)
|
180 |
tts_model_dropdown.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)
|
181 |
go_btn = gr.Button("Go")
|
182 |
-
go_btn.click(fn=
|
183 |
clear_btn = gr.Button('Clear')
|
184 |
clear_btn.click(fn=lambda: '', outputs=input_text)
|
185 |
|
186 |
|
187 |
-
# %% app.ipynb
|
188 |
launch_kwargs = {'auth':('username',GRADIO_PASSWORD),
|
189 |
'auth_message':'Please log in to Mat\'s TTS App with username: username and password.'}
|
|
|
190 |
|
191 |
-
# %% app.ipynb
|
192 |
#.py launch
|
193 |
if __name__ == "__main__":
|
|
|
194 |
app.launch(**launch_kwargs)
|
|
|
1 |
# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
|
2 |
|
3 |
# %% auto 0
|
4 |
+
__all__ = ['secret_import_failed', 'tts_voices', 'clean_text_prompt', 'OPENAI_CLIENT_TTS_THREADS', 'launch_kwargs',
|
5 |
+
'queue_kwargs', 'split_text', 'concatenate_mp3', 'create_speech_openai', 'create_speech2', 'create_speech',
|
6 |
'get_input_text_len', 'get_generation_cost']
|
7 |
|
8 |
# %% app.ipynb 1
|
|
|
34 |
import openai
|
35 |
from pydub import AudioSegment
|
36 |
import io
|
37 |
+
from datetime import datetime
|
38 |
+
from math import ceil
|
39 |
+
from multiprocessing.pool import ThreadPool
|
40 |
+
from functools import partial
|
41 |
+
from tenacity import (
|
42 |
+
retry,
|
43 |
+
stop_after_attempt,
|
44 |
+
wait_random_exponential,
|
45 |
+
) # for exponential backoff
|
46 |
|
47 |
# %% app.ipynb 4
|
48 |
try:
|
|
|
55 |
tts_voices = ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']
|
56 |
|
57 |
# %% app.ipynb 6
|
58 |
+
clean_text_prompt = """Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early."""
|
59 |
+
|
60 |
+
|
61 |
+
# %% app.ipynb 7
|
62 |
+
#Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.
|
63 |
+
OPENAI_CLIENT_TTS_THREADS = 10
|
64 |
+
|
65 |
+
# %% app.ipynb 8
|
66 |
def split_text(input_text, max_length=4000, lookback=1000):
|
67 |
# If the text is shorter than the max_length, return it as is
|
68 |
if len(input_text) <= max_length:
|
|
|
95 |
|
96 |
return chunks
|
97 |
|
98 |
+
# %% app.ipynb 9
|
99 |
def concatenate_mp3(mp3_files):
|
100 |
if len(mp3_files) == 1:
|
101 |
return mp3_files[0]
|
|
|
125 |
|
126 |
return combined_mp3.getvalue()
|
127 |
|
128 |
+
# %% app.ipynb 10
|
129 |
+
def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):
|
130 |
+
client = openai.OpenAI()
|
131 |
+
|
132 |
+
@retry(wait=wait_random_exponential(min=1, max=180), stop=stop_after_attempt(6))
|
133 |
+
def _create_speech_with_backoff(**kwargs):
|
134 |
+
return client.audio.speech.create(**kwargs)
|
135 |
+
|
136 |
+
response = _create_speech_with_backoff(input=input, model=model, voice=voice, speed=speed, **kwargs)
|
137 |
+
client.close()
|
138 |
+
return chunk_idx, response.content
|
139 |
+
|
140 |
+
# %% app.ipynb 11
|
141 |
+
def create_speech2(input_text, model='tts-1', voice='alloy', progress=gr.Progress(), **kwargs):
|
142 |
+
start = datetime.now()
|
143 |
+
# Split the input text into chunks
|
144 |
+
chunks = split_text(input_text)
|
145 |
+
|
146 |
+
# Initialize the progress bar
|
147 |
+
progress(0, desc=f"Started processing {len(chunks)} text chunks using {OPENAI_CLIENT_TTS_THREADS} threads. ETA is ~{ceil(len(chunks)/OPENAI_CLIENT_TTS_THREADS)} min.")
|
148 |
+
|
149 |
+
# Initialize a list to hold the audio data of each chunk
|
150 |
+
audio_data = []
|
151 |
+
|
152 |
+
# Process each chunk
|
153 |
+
with ThreadPool(processes=OPENAI_CLIENT_TTS_THREADS) as pool:
|
154 |
+
results = pool.starmap(
|
155 |
+
partial(create_speech_openai, model=model, voice=voice, **kwargs),
|
156 |
+
zip(range(len(chunks)),chunks)
|
157 |
+
)
|
158 |
+
audio_data = [o[1] for o in sorted(results)]
|
159 |
+
|
160 |
+
# Progress
|
161 |
+
progress(.9, desc=f"Merging audio chunks... {(datetime.now()-start).seconds} seconds to process.")
|
162 |
+
|
163 |
+
# Concatenate the audio data from all chunks
|
164 |
+
combined_audio = concatenate_mp3(audio_data)
|
165 |
+
|
166 |
+
# Final update to the progress bar
|
167 |
+
progress(1, desc=f"Processing completed... {(datetime.now()-start).seconds} seconds to process.")
|
168 |
+
|
169 |
+
print(f"Processing time: {(datetime.now()-start).seconds} seconds.")
|
170 |
+
|
171 |
+
return combined_audio
|
172 |
+
|
173 |
+
|
174 |
+
# %% app.ipynb 12
|
175 |
def create_speech(input_text, model='tts-1', voice='alloy', progress=gr.Progress()):
|
176 |
# Split the input text into chunks
|
177 |
chunks = split_text(input_text)
|
|
|
214 |
return combined_audio
|
215 |
|
216 |
|
217 |
+
# %% app.ipynb 13
|
218 |
def get_input_text_len(input_text):
|
219 |
return len(input_text)
|
220 |
|
221 |
+
# %% app.ipynb 14
|
222 |
def get_generation_cost(input_text, tts_model_dropdown):
|
223 |
text_len = len(input_text)
|
224 |
if tts_model_dropdown.endswith('-hd'):
|
|
|
227 |
cost = text_len/1000 * 0.015
|
228 |
return "${:,.3f}".format(cost)
|
229 |
|
230 |
+
# %% app.ipynb 15
|
231 |
with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS') as app:
|
232 |
gr.Markdown("# OpenAI TTS")
|
233 |
+
gr.Markdown("""Start typing below and then click **Go** to create the speech from your text. The current limit is 4,000 characters.
|
234 |
+
For requests longer than 4,000 chars they will be broken into chunks of 4,000 or less chars automatically.""")
|
235 |
with gr.Row():
|
236 |
input_text = gr.Textbox(max_lines=100, label="Enter text here")
|
237 |
with gr.Row():
|
|
|
244 |
input_text.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)
|
245 |
tts_model_dropdown.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)
|
246 |
go_btn = gr.Button("Go")
|
247 |
+
go_btn.click(fn=create_speech2, inputs=[input_text, tts_model_dropdown, tts_voice_dropdown], outputs=[output_audio])
|
248 |
clear_btn = gr.Button('Clear')
|
249 |
clear_btn.click(fn=lambda: '', outputs=input_text)
|
250 |
|
251 |
|
252 |
+
# %% app.ipynb 16
|
253 |
launch_kwargs = {'auth':('username',GRADIO_PASSWORD),
|
254 |
'auth_message':'Please log in to Mat\'s TTS App with username: username and password.'}
|
255 |
+
queue_kwargs = {'default_concurrency_limit':10}
|
256 |
|
257 |
+
# %% app.ipynb 18
|
258 |
#.py launch
|
259 |
if __name__ == "__main__":
|
260 |
+
app.queue(**queue_kwargs)
|
261 |
app.launch(**launch_kwargs)
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
openai==1.10.0
|
2 |
gradio==4.16.0
|
3 |
pydub==0.25.1
|
|
|
|
1 |
openai==1.10.0
|
2 |
gradio==4.16.0
|
3 |
pydub==0.25.1
|
4 |
+
tenacity==8.2.3
|