Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
8104036
1
Parent(s):
290deb7
fix args
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ model = ParlerTTSForConditionalGeneration.from_pretrained(
|
|
22 |
repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True
|
23 |
).to(device)
|
24 |
jenny_model = ParlerTTSForConditionalGeneration.from_pretrained(
|
25 |
-
|
26 |
).to(device)
|
27 |
|
28 |
tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
@@ -329,7 +329,7 @@ with gr.Blocks(css=css) as block:
|
|
329 |
with gr.Column():
|
330 |
input_text = gr.Textbox(label="Input Text", lines=2, value=default_text, elem_id="input_text")
|
331 |
description = gr.Textbox(label="Description", lines=2, value="", elem_id="input_description")
|
332 |
-
play_seconds = gr.Slider(2.5, 5.0, value=2.5, step=0.5, label="Streaming interval in seconds", info="Lower = shorter chunks, lower latency, more codec steps")
|
333 |
run_button = gr.Button("Generate Audio", variant="primary")
|
334 |
with gr.Column():
|
335 |
audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out", streaming=True, autoplay=True)
|
@@ -338,19 +338,20 @@ with gr.Blocks(css=css) as block:
|
|
338 |
outputs = [audio_out]
|
339 |
gr.Examples(examples=examples, fn=generate_base, inputs=inputs, outputs=outputs, cache_examples=False)
|
340 |
run_button.click(fn=generate_base, inputs=inputs, outputs=outputs, queue=True)
|
|
|
341 |
with gr.Tab("Jenny"):
|
342 |
with gr.Row():
|
343 |
with gr.Column():
|
344 |
-
input_text = gr.Textbox(label="Input Text", lines=2, value=
|
345 |
-
description = gr.Textbox(label="Description", lines=2, value=
|
346 |
-
play_seconds = gr.Slider(2.5, 5.0, value=2
|
347 |
run_button = gr.Button("Generate Audio", variant="primary")
|
348 |
with gr.Column():
|
349 |
audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out", streaming=True, autoplay=True)
|
350 |
|
351 |
inputs = [input_text, description, play_seconds]
|
352 |
outputs = [audio_out]
|
353 |
-
gr.Examples(examples=
|
354 |
run_button.click(fn=generate_jenny, inputs=inputs, outputs=outputs, queue=True)
|
355 |
|
356 |
gr.HTML(
|
|
|
22 |
repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True
|
23 |
).to(device)
|
24 |
jenny_model = ParlerTTSForConditionalGeneration.from_pretrained(
|
25 |
+
repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True
|
26 |
).to(device)
|
27 |
|
28 |
tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
|
|
329 |
with gr.Column():
|
330 |
input_text = gr.Textbox(label="Input Text", lines=2, value=default_text, elem_id="input_text")
|
331 |
description = gr.Textbox(label="Description", lines=2, value="", elem_id="input_description")
|
332 |
+
play_seconds = gr.Slider(2.5, 5.0, value=2.5, step=0.5, label="Streaming interval in seconds", info="Lower = shorter chunks, lower latency, more codec steps")
|
333 |
run_button = gr.Button("Generate Audio", variant="primary")
|
334 |
with gr.Column():
|
335 |
audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out", streaming=True, autoplay=True)
|
|
|
338 |
outputs = [audio_out]
|
339 |
gr.Examples(examples=examples, fn=generate_base, inputs=inputs, outputs=outputs, cache_examples=False)
|
340 |
run_button.click(fn=generate_base, inputs=inputs, outputs=outputs, queue=True)
|
341 |
+
|
342 |
with gr.Tab("Jenny"):
|
343 |
with gr.Row():
|
344 |
with gr.Column():
|
345 |
+
input_text = gr.Textbox(label="Input Text", lines=2, value=jenny_examples[0][0], elem_id="input_text")
|
346 |
+
description = gr.Textbox(label="Description", lines=2, value=jenny_examples[0][1], elem_id="input_description")
|
347 |
+
play_seconds = gr.Slider(2.5, 5.0, value=jenny_examples[0][2], step=0.5, label="Streaming interval in seconds", info="Lower = shorter chunks, lower latency, more codec steps")
|
348 |
run_button = gr.Button("Generate Audio", variant="primary")
|
349 |
with gr.Column():
|
350 |
audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out", streaming=True, autoplay=True)
|
351 |
|
352 |
inputs = [input_text, description, play_seconds]
|
353 |
outputs = [audio_out]
|
354 |
+
gr.Examples(examples=jenny_examples, fn=generate_jenny, inputs=inputs, outputs=outputs, cache_examples=False)
|
355 |
run_button.click(fn=generate_jenny, inputs=inputs, outputs=outputs, queue=True)
|
356 |
|
357 |
gr.HTML(
|