Spaces:
Runtime error
Runtime error
enable fixed mask and use new VAE
Browse files
stablediffusion-infinity/.gitignore
CHANGED
@@ -5,4 +5,5 @@ build/
|
|
5 |
travis.sh
|
6 |
*.iml
|
7 |
.token
|
8 |
-
libpatchmatch.so
|
|
|
|
5 |
travis.sh
|
6 |
*.iml
|
7 |
.token
|
8 |
+
libpatchmatch.so
|
9 |
+
rooms.db
|
stablediffusion-infinity/app.py
CHANGED
@@ -12,6 +12,8 @@ import numpy as np
|
|
12 |
import torch
|
13 |
from torch import autocast
|
14 |
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
|
|
|
|
|
15 |
from PIL import Image
|
16 |
import gradio as gr
|
17 |
import skimage
|
@@ -69,13 +71,19 @@ except Exception as e:
|
|
69 |
blocks = gr.Blocks().queue()
|
70 |
model = {}
|
71 |
|
|
|
|
|
|
|
72 |
|
73 |
def get_model():
|
74 |
if "inpaint" not in model:
|
|
|
|
|
75 |
inpaint = StableDiffusionInpaintPipeline.from_pretrained(
|
76 |
"runwayml/stable-diffusion-inpainting",
|
77 |
revision="fp16",
|
78 |
-
torch_dtype=torch.float16
|
|
|
79 |
).to("cuda")
|
80 |
|
81 |
# lms = LMSDiscreteScheduler(
|
@@ -126,29 +134,16 @@ def run_outpaint(
|
|
126 |
process_size = 512
|
127 |
|
128 |
mask_sum = mask.sum()
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
# # mask_image=mask_image.filter(ImageFilter.GaussianBlur(radius = 8))
|
140 |
-
# with autocast("cuda"):
|
141 |
-
# images = inpaint(
|
142 |
-
# prompt=prompt_text,
|
143 |
-
# init_image=init_image.resize(
|
144 |
-
# (process_size, process_size), resample=SAMPLING_MODE
|
145 |
-
# ),
|
146 |
-
# mask_image=mask_image.resize((process_size, process_size)),
|
147 |
-
# strength=strength,
|
148 |
-
# num_inference_steps=step,
|
149 |
-
# guidance_scale=guidance,
|
150 |
-
# )
|
151 |
-
if mask_sum > 0:
|
152 |
print("inpainting")
|
153 |
img, mask = functbl[fill_mode](img, mask)
|
154 |
init_image = Image.fromarray(img)
|
@@ -158,7 +153,6 @@ def run_outpaint(
|
|
158 |
mask_image = Image.fromarray(mask)
|
159 |
|
160 |
# mask_image=mask_image.filter(ImageFilter.GaussianBlur(radius = 8))
|
161 |
-
|
162 |
else:
|
163 |
print("text2image")
|
164 |
print("inpainting")
|
|
|
12 |
import torch
|
13 |
from torch import autocast
|
14 |
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
|
15 |
+
from diffusers.models import AutoencoderKL
|
16 |
+
|
17 |
from PIL import Image
|
18 |
import gradio as gr
|
19 |
import skimage
|
|
|
71 |
blocks = gr.Blocks().queue()
|
72 |
model = {}
|
73 |
|
74 |
+
WHITES = 66846720
|
75 |
+
STATIC_MASK = Image.open("mask.png")
|
76 |
+
|
77 |
|
78 |
def get_model():
|
79 |
if "inpaint" not in model:
|
80 |
+
|
81 |
+
vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-ema")
|
82 |
inpaint = StableDiffusionInpaintPipeline.from_pretrained(
|
83 |
"runwayml/stable-diffusion-inpainting",
|
84 |
revision="fp16",
|
85 |
+
torch_dtype=torch.float16,
|
86 |
+
vae=vae,
|
87 |
).to("cuda")
|
88 |
|
89 |
# lms = LMSDiscreteScheduler(
|
|
|
134 |
process_size = 512
|
135 |
|
136 |
mask_sum = mask.sum()
|
137 |
+
if mask_sum >= WHITES:
|
138 |
+
print("inpaiting with fixed Mask")
|
139 |
+
mask = np.array(STATIC_MASK)[:, :, 0]
|
140 |
+
img, mask = functbl[fill_mode](img, mask)
|
141 |
+
init_image = Image.fromarray(img)
|
142 |
+
mask = 255 - mask
|
143 |
+
mask = skimage.measure.block_reduce(mask, (8, 8), np.max)
|
144 |
+
mask = mask.repeat(8, axis=0).repeat(8, axis=1)
|
145 |
+
mask_image = Image.fromarray(mask)
|
146 |
+
elif mask_sum > 0 and mask_sum < WHITES:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
print("inpainting")
|
148 |
img, mask = functbl[fill_mode](img, mask)
|
149 |
init_image = Image.fromarray(img)
|
|
|
153 |
mask_image = Image.fromarray(mask)
|
154 |
|
155 |
# mask_image=mask_image.filter(ImageFilter.GaussianBlur(radius = 8))
|
|
|
156 |
else:
|
157 |
print("text2image")
|
158 |
print("inpainting")
|
stablediffusion-infinity/rooms.db
CHANGED
Binary files a/stablediffusion-infinity/rooms.db and b/stablediffusion-infinity/rooms.db differ
|
|