Spaces:
Runtime error
Runtime error
Merge branch 'add_safety_checker' of https://github.com/patrickvonplaten/stable-diffusion into patrickvonplaten-add_safety_checker
Browse files- scripts/txt2img.py +28 -2
scripts/txt2img.py
CHANGED
@@ -16,12 +16,31 @@ from ldm.util import instantiate_from_config
|
|
16 |
from ldm.models.diffusion.ddim import DDIMSampler
|
17 |
from ldm.models.diffusion.plms import PLMSSampler
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def chunk(it, size):
|
21 |
it = iter(it)
|
22 |
return iter(lambda: tuple(islice(it, size)), ())
|
23 |
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
def load_model_from_config(config, ckpt, verbose=False):
|
26 |
print(f"Loading model from {ckpt}")
|
27 |
pl_sd = torch.load(ckpt, map_location="cpu")
|
@@ -247,16 +266,23 @@ def main():
|
|
247 |
|
248 |
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
249 |
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
if not opt.skip_save:
|
252 |
-
for x_sample in
|
253 |
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
254 |
Image.fromarray(x_sample.astype(np.uint8)).save(
|
255 |
os.path.join(sample_path, f"{base_count:05}.png"))
|
256 |
base_count += 1
|
257 |
|
258 |
if not opt.skip_grid:
|
259 |
-
all_samples.append(
|
260 |
|
261 |
if not opt.skip_grid:
|
262 |
# additionally, save as grid
|
|
|
16 |
from ldm.models.diffusion.ddim import DDIMSampler
|
17 |
from ldm.models.diffusion.plms import PLMSSampler
|
18 |
|
19 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
20 |
+
from transformers import AutoFeatureExtractor
|
21 |
+
|
22 |
+
# load safety model
|
23 |
+
safety_model_id = "CompVis/stable-diffusion-v-1-3"
|
24 |
+
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id, use_auth_token=True)
|
25 |
+
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id, use_auth_token=True)
|
26 |
|
27 |
def chunk(it, size):
|
28 |
it = iter(it)
|
29 |
return iter(lambda: tuple(islice(it, size)), ())
|
30 |
|
31 |
|
32 |
+
def numpy_to_pil(images):
|
33 |
+
"""
|
34 |
+
Convert a numpy image or a batch of images to a PIL image.
|
35 |
+
"""
|
36 |
+
if images.ndim == 3:
|
37 |
+
images = images[None, ...]
|
38 |
+
images = (images * 255).round().astype("uint8")
|
39 |
+
pil_images = [Image.fromarray(image) for image in images]
|
40 |
+
|
41 |
+
return pil_images
|
42 |
+
|
43 |
+
|
44 |
def load_model_from_config(config, ckpt, verbose=False):
|
45 |
print(f"Loading model from {ckpt}")
|
46 |
pl_sd = torch.load(ckpt, map_location="cpu")
|
|
|
266 |
|
267 |
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
268 |
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
269 |
+
x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1).numpy()
|
270 |
+
|
271 |
+
x_image = x_samples_ddim
|
272 |
+
safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt")
|
273 |
+
x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values)
|
274 |
+
|
275 |
+
x_checked_image_torch = torch.from_numpy(x_checked_image).permute(0, 3, 2, 1)
|
276 |
|
277 |
if not opt.skip_save:
|
278 |
+
for x_sample in x_checked_image_torch:
|
279 |
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
280 |
Image.fromarray(x_sample.astype(np.uint8)).save(
|
281 |
os.path.join(sample_path, f"{base_count:05}.png"))
|
282 |
base_count += 1
|
283 |
|
284 |
if not opt.skip_grid:
|
285 |
+
all_samples.append(x_checked_image_torch)
|
286 |
|
287 |
if not opt.skip_grid:
|
288 |
# additionally, save as grid
|