Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -34,9 +34,10 @@ if torch.cuda.is_available():
|
|
34 |
@spaces.GPU()
|
35 |
def generate_image(prompt, ckpt="4-Step"):
|
36 |
global loaded
|
37 |
-
|
|
|
38 |
|
39 |
-
print(
|
40 |
|
41 |
checkpoint = checkpoints[ckpt][0]
|
42 |
num_inference_steps = checkpoints[ckpt][1]
|
@@ -70,10 +71,10 @@ examples = [
|
|
70 |
|
71 |
with gr.Blocks(css=CSS, theme="soft") as demo:
|
72 |
gr.HTML("<h1><center>Adobe DMD2🦖</center></h1>")
|
73 |
-
gr.HTML("<p><center><a href='https://huggingface.co/tianweiy/DMD2'>DMD2</a> text-to-image generation</center></p>")
|
74 |
with gr.Group():
|
75 |
with gr.Row():
|
76 |
-
prompt = gr.Textbox(label='Enter
|
77 |
ckpt = gr.Dropdown(label='Steps',choices=['1-Step', '4-Step'], value='4-Step', interactive=True)
|
78 |
submit = gr.Button(scale=1, variant='primary')
|
79 |
img = gr.Image(label='DMD2 Generated Image')
|
|
|
34 |
@spaces.GPU()
|
35 |
def generate_image(prompt, ckpt="4-Step"):
|
36 |
global loaded
|
37 |
+
|
38 |
+
prompt = translator.translate(prompt, 'English')
|
39 |
|
40 |
+
print(prompt)
|
41 |
|
42 |
checkpoint = checkpoints[ckpt][0]
|
43 |
num_inference_steps = checkpoints[ckpt][1]
|
|
|
71 |
|
72 |
with gr.Blocks(css=CSS, theme="soft") as demo:
|
73 |
gr.HTML("<h1><center>Adobe DMD2🦖</center></h1>")
|
74 |
+
gr.HTML("<p><center><a href='https://huggingface.co/tianweiy/DMD2'>DMD2</a> text-to-image generation</center><br><center>Multi-Languages, 4-step is higher quality & 2X slower</center></p>")
|
75 |
with gr.Group():
|
76 |
with gr.Row():
|
77 |
+
prompt = gr.Textbox(label='Enter Your Prompt', scale=8)
|
78 |
ckpt = gr.Dropdown(label='Steps',choices=['1-Step', '4-Step'], value='4-Step', interactive=True)
|
79 |
submit = gr.Button(scale=1, variant='primary')
|
80 |
img = gr.Image(label='DMD2 Generated Image')
|