Update app.py
Browse files
app.py
CHANGED
@@ -9,18 +9,18 @@ caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
|
|
9 |
audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
|
10 |
|
11 |
ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
|
12 |
-
def infer(image_input, manual_caption, duration_in
|
13 |
print(duration_in)
|
14 |
if manual_caption == "":
|
15 |
cap = caption(image_input, fn_index=0)
|
16 |
print("gpt2 caption: '" + cap + "' β’ ")
|
17 |
-
ph_update = "
|
18 |
else:
|
19 |
cap = manual_caption
|
20 |
print("manual captiony: " + cap)
|
21 |
ph_update=""
|
22 |
|
23 |
-
sound = audio_gen(cap, duration_in, 2.5,
|
24 |
|
25 |
return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True)
|
26 |
|
@@ -83,7 +83,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
83 |
manual_cap = gr.Textbox(label="Manual Image description (optional)", placeholder=ph_message)
|
84 |
with gr.Row():
|
85 |
duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration")
|
86 |
-
seed_in = gr.Number(label="Seed", value=45)
|
87 |
caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
|
88 |
sound_output = gr.Audio(label="Result", elem_id="sound-output")
|
89 |
|
@@ -96,7 +96,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
96 |
|
97 |
gr.HTML(article)
|
98 |
|
99 |
-
generate.click(infer, inputs=[input_img, manual_cap, duration_in
|
100 |
share_button.click(None, [], [], _js=share_js)
|
101 |
|
102 |
demo.queue(max_size=32).launch(debug=True)
|
|
|
9 |
audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
|
10 |
|
11 |
ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
|
12 |
+
def infer(image_input, manual_caption, duration_in):
|
13 |
print(duration_in)
|
14 |
if manual_caption == "":
|
15 |
cap = caption(image_input, fn_index=0)
|
16 |
print("gpt2 caption: '" + cap + "' β’ ")
|
17 |
+
ph_update = "gpt2 caption: '" + cap + "' β’ "
|
18 |
else:
|
19 |
cap = manual_caption
|
20 |
print("manual captiony: " + cap)
|
21 |
ph_update=""
|
22 |
|
23 |
+
sound = audio_gen(cap, duration_in, 2.5, 440, 3, fn_index=0)
|
24 |
|
25 |
return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True)
|
26 |
|
|
|
83 |
manual_cap = gr.Textbox(label="Manual Image description (optional)", placeholder=ph_message)
|
84 |
with gr.Row():
|
85 |
duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration")
|
86 |
+
#seed_in = gr.Number(label="Seed", value=45)
|
87 |
caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
|
88 |
sound_output = gr.Audio(label="Result", elem_id="sound-output")
|
89 |
|
|
|
96 |
|
97 |
gr.HTML(article)
|
98 |
|
99 |
+
generate.click(infer, inputs=[input_img, manual_cap, duration_in], outputs=[caption_output, sound_output, manual_cap, share_group], api_name="i2fx")
|
100 |
share_button.click(None, [], [], _js=share_js)
|
101 |
|
102 |
demo.queue(max_size=32).launch(debug=True)
|