Spaces:
Running
on
A10G
Running
on
A10G
initialize zerogpu
#1
by
linoyts
HF staff
- opened
app.py
CHANGED
@@ -6,7 +6,7 @@ from diffusers.pipelines.auto_pipeline import AutoPipelineForImage2Image
|
|
6 |
from src.sdxl_inversion_pipeline import SDXLDDIMPipeline
|
7 |
from src.config import RunConfig
|
8 |
from src.editor import ImageEditorDemo
|
9 |
-
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
12 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
@@ -27,7 +27,7 @@ pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.
|
|
27 |
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
28 |
# pipe = pipe.to(device)
|
29 |
|
30 |
-
|
31 |
def infer(input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps=4,
|
32 |
num_inversion_steps=4,
|
33 |
inversion_max_step=0.6):
|
@@ -65,8 +65,8 @@ else:
|
|
65 |
power_device = "CPU"
|
66 |
|
67 |
# with gr.Blocks(css=css) as demo:
|
68 |
-
with gr.Blocks() as demo:
|
69 |
-
gr.Markdown(f"""
|
70 |
This is a demo for our [paper](https://arxiv.org/abs/2312.12540) **RNRI: Regularized Newton Raphson Inversion for Text-to-Image Diffusion Models**.
|
71 |
Image editing using our RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
|
72 |
Take a look at our [project page](https://barakmam.github.io/rnri.github.io/).
|
@@ -79,18 +79,20 @@ with gr.Blocks() as demo:
|
|
79 |
with gr.Row():
|
80 |
description_prompt = gr.Text(
|
81 |
label="Image description",
|
|
|
82 |
show_label=False,
|
83 |
max_lines=1,
|
84 |
-
placeholder="
|
85 |
container=False,
|
86 |
)
|
87 |
|
88 |
with gr.Row():
|
89 |
target_prompt = gr.Text(
|
90 |
label="Edit prompt",
|
|
|
91 |
show_label=False,
|
92 |
max_lines=1,
|
93 |
-
placeholder="
|
94 |
container=False,
|
95 |
)
|
96 |
|
|
|
6 |
from src.sdxl_inversion_pipeline import SDXLDDIMPipeline
|
7 |
from src.config import RunConfig
|
8 |
from src.editor import ImageEditorDemo
|
9 |
+
import spaces
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
12 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
|
|
27 |
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
28 |
# pipe = pipe.to(device)
|
29 |
|
30 |
+
@spaces.GPU
|
31 |
def infer(input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps=4,
|
32 |
num_inversion_steps=4,
|
33 |
inversion_max_step=0.6):
|
|
|
65 |
power_device = "CPU"
|
66 |
|
67 |
# with gr.Blocks(css=css) as demo:
|
68 |
+
with gr.Blocks(css="style.css") as demo:
|
69 |
+
gr.Markdown(f""" # Real Time Editing with RNRI Inversion 🍎⚡️
|
70 |
This is a demo for our [paper](https://arxiv.org/abs/2312.12540) **RNRI: Regularized Newton Raphson Inversion for Text-to-Image Diffusion Models**.
|
71 |
Image editing using our RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
|
72 |
Take a look at our [project page](https://barakmam.github.io/rnri.github.io/).
|
|
|
79 |
with gr.Row():
|
80 |
description_prompt = gr.Text(
|
81 |
label="Image description",
|
82 |
+
info = "Enter your image description ",
|
83 |
show_label=False,
|
84 |
max_lines=1,
|
85 |
+
placeholder="a cake on a table",
|
86 |
container=False,
|
87 |
)
|
88 |
|
89 |
with gr.Row():
|
90 |
target_prompt = gr.Text(
|
91 |
label="Edit prompt",
|
92 |
+
info = "Enter your edit prompt",
|
93 |
show_label=False,
|
94 |
max_lines=1,
|
95 |
+
placeholder="an oreo cake on a table",
|
96 |
container=False,
|
97 |
)
|
98 |
|
style.css
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#component-0{
|
3 |
+
max-width: 900px;
|
4 |
+
margin: 0 auto;
|
5 |
+
}
|
6 |
+
|
7 |
+
#description, h1 {
|
8 |
+
text-align: center;
|
9 |
+
}
|
10 |
+
|
11 |
+
#duplicate-button {
|
12 |
+
margin: auto;
|
13 |
+
color: #fff;
|
14 |
+
background: #1565c0;
|
15 |
+
border-radius: 100vh;
|
16 |
+
}
|