Rhueue commited on
Commit
e3f1008
·
verified ·
1 Parent(s): 6f2bf77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -156
app.py CHANGED
@@ -1,160 +1,36 @@
1
- from __future__ import annotations
2
-
3
- import math
4
- import random
5
-
6
  import gradio as gr
7
  import torch
8
- from PIL import Image, ImageOps
9
  from diffusers import StableDiffusionInstructPix2PixPipeline
10
 
11
- # Path to the SafeTensor model in Colab
12
- model_path = "/content/uberRealisticPornMerge_urpmv12.instruct-pix2pix.safetensors"
13
-
14
- def main():
15
- # Load the SafeTensor model from Colab
16
- safe_pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_path, torch_dtype=torch.float16, safety_checker=None).to("cuda")
17
- example_image = Image.open("/content/T/imgs_example.jpg").convert("RGB")
18
-
19
- def load_example(
20
- steps: int,
21
- randomize_seed: bool,
22
- seed: int,
23
- randomize_cfg: bool,
24
- text_cfg_scale: float,
25
- image_cfg_scale: float,
26
- ):
27
- example_instruction = random.choice(example_instructions)
28
- return [example_image, example_instruction] + generate(
29
- example_image,
30
- example_instruction,
31
- steps,
32
- randomize_seed,
33
- seed,
34
- randomize_cfg,
35
- text_cfg_scale,
36
- image_cfg_scale,
37
- )
38
-
39
- def generate(
40
- input_image: Image.Image,
41
- instruction: str,
42
- steps: int,
43
- randomize_seed: bool,
44
- seed: int,
45
- randomize_cfg: bool,
46
- text_cfg_scale: float,
47
- image_cfg_scale: float,
48
- ):
49
- seed = random.randint(0, 100000) if randomize_seed else seed
50
- text_cfg_scale = round(random.uniform(6.0, 9.0), ndigits=2) if randomize_cfg else text_cfg_scale
51
- image_cfg_scale = round(random.uniform(1.2, 1.8), ndigits=2) if randomize_cfg else image_cfg_scale
52
-
53
- width, height = input_image.size
54
- factor = 512 / max(width, height)
55
- factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
56
- width = int((width * factor) // 64) * 64
57
- height = int((height * factor) // 64) * 64
58
- input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS)
59
-
60
- if instruction == "":
61
- return [input_image, seed]
62
-
63
- generator = torch.manual_seed(seed)
64
- edited_image = safe_pipe(
65
- instruction, image=input_image,
66
- guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale,
67
- num_inference_steps=steps, generator=generator,
68
- ).images[0]
69
- return [seed, text_cfg_scale, image_cfg_scale, edited_image]
70
-
71
- def reset():
72
- return [0, "Randomize Seed", 1371, "Fix CFG", 7.5, 1.5, None]
73
-
74
- with gr.Blocks() as demo:
75
- gr.HTML("""<h1 style="font-weight: 900; margin-bottom: 7px;">
76
- InstructPix2Pix: Learning to Follow Image Editing Instructions
77
- </h1>
78
- <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
79
- <br/>
80
- <a href="https://huggingface.co/spaces/timbrooks/instruct-pix2pix?duplicate=true">
81
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
82
- <p/>""")
83
- with gr.Row():
84
- with gr.Column(scale=1, min_width=100):
85
- generate_button = gr.Button("Generate")
86
- with gr.Column(scale=1, min_width=100):
87
- load_button = gr.Button("Load Example")
88
- with gr.Column(scale=1, min_width=100):
89
- reset_button = gr.Button("Reset")
90
- with gr.Column(scale=3):
91
- instruction = gr.Textbox(lines=1, label="Edit Instruction", interactive=True)
92
-
93
- with gr.Row():
94
- input_image = gr.Image(label="Input Image", type="pil", interactive=True)
95
- edited_image = gr.Image(label=f"Edited Image", type="pil", interactive=False)
96
- input_image.style(height=512, width=512)
97
- edited_image.style(height=512, width=512)
98
-
99
- with gr.Row():
100
- steps = gr.Number(value=50, precision=0, label="Steps", interactive=True)
101
- randomize_seed = gr.Radio(
102
- ["Fix Seed", "Randomize Seed"],
103
- value="Randomize Seed",
104
- type="index",
105
- show_label=False,
106
- interactive=True,
107
- )
108
- seed = gr.Number(value=1371, precision=0, label="Seed", interactive=True)
109
- randomize_cfg = gr.Radio(
110
- ["Fix CFG", "Randomize CFG"],
111
- value="Fix CFG",
112
- type="index",
113
- show_label=False,
114
- interactive=True,
115
- )
116
- text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True)
117
- image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True)
118
-
119
- gr.Markdown(help_text)
120
-
121
- # Define actions for buttons
122
- load_button.click(
123
- fn=load_example,
124
- inputs=[
125
- steps,
126
- randomize_seed,
127
- seed,
128
- randomize_cfg,
129
- text_cfg_scale,
130
- image_cfg_scale,
131
- ],
132
- outputs=[input_image, instruction, seed, text_cfg_scale, image_cfg_scale, edited_image],
133
- )
134
- generate_button.click(
135
- fn=generate,
136
- inputs=[
137
- input_image,
138
- instruction,
139
- steps,
140
- randomize_seed,
141
- seed,
142
- randomize_cfg,
143
- text_cfg_scale,
144
- image_cfg_scale,
145
- ],
146
- outputs=[seed, text_cfg_scale, image_cfg_scale, edited_image],
147
- )
148
- reset_button.click(
149
- fn=reset,
150
- inputs=[],
151
- outputs=[steps, randomize_seed, seed, randomize_cfg, text_cfg_scale, image_cfg_scale, edited_image],
152
- )
153
-
154
- demo.queue(concurrency_count=1)
155
- demo.launch(share=False)
156
-
157
-
158
- if __name__ == "__main__":
159
- main()
160
-
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from PIL import Image
4
  from diffusers import StableDiffusionInstructPix2PixPipeline
5
 
6
+ # Define the path to the SafeTensor model
7
+ model_path = "/content/UberRealisticPornMerge_urpmv12.instruct-pix2pix.safetensors"
8
+
9
+ # Load the SafeTensor model
10
+ safe_pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_path, torch_dtype=torch.float16, safety_checker=None).to("cuda")
11
+
12
+ def generate_edited_image(input_image):
13
+ # Convert the Gradio Image object to a PIL Image
14
+ input_image_pil = Image.fromarray(input_image.astype('uint8'), 'RGB')
15
+
16
+ # Generate the edited image using the SafeTensor model
17
+ edited_image = safe_pipe(instruction="", image=input_image_pil, num_inference_steps=50).images[0]
18
+
19
+ # Convert the edited image back to Gradio Image format
20
+ edited_image_gradio = edited_image.cpu().numpy().astype('uint8')
21
+
22
+ return edited_image_gradio
23
+
24
+ # Define the input and output components for the Gradio app
25
+ input_image = gr.inputs.Image(label="Upload an Input Image")
26
+ output_image = gr.outputs.Image(label="Edited Image")
27
+
28
+ # Create the Gradio interface
29
+ gr.Interface(
30
+ fn=generate_edited_image,
31
+ inputs=input_image,
32
+ outputs=output_image,
33
+ title="SafeTensor Image Editing",
34
+ description="Upload an image and generate an edited image using a SafeTensor model.",
35
+ capture_session=True # This ensures that we use the same session for model inference
36
+ ).launch()