Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -35,19 +35,12 @@ safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diff
|
|
35 |
feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
36 |
image_processor = VaeImageProcessor(vae_scale_factor=8)
|
37 |
|
38 |
-
with open("filter.txt") as f:
|
39 |
-
filter_words = {word for word in f.read().split("\n") if word}
|
40 |
-
|
41 |
# Inference function.
|
42 |
@spaces.GPU(enable_queue=True)
|
43 |
def generate(prompt, option, progress=gr.Progress()):
|
44 |
global step_loaded
|
45 |
print(prompt, option)
|
46 |
ckpt, step = opts[option]
|
47 |
-
if any(word in prompt for word in filter_words):
|
48 |
-
gr.Warning("Safety checker triggered. Image may contain violent or sexual content.")
|
49 |
-
print(f"Safety checker triggered on prompt: {prompt}")
|
50 |
-
return Image.new("RGB", (512, 512))
|
51 |
progress((0, step))
|
52 |
if step != step_loaded:
|
53 |
print(f"Switching checkpoint from {step_loaded} to {step}")
|
|
|
35 |
feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
36 |
image_processor = VaeImageProcessor(vae_scale_factor=8)
|
37 |
|
|
|
|
|
|
|
38 |
# Inference function.
|
39 |
@spaces.GPU(enable_queue=True)
|
40 |
def generate(prompt, option, progress=gr.Progress()):
|
41 |
global step_loaded
|
42 |
print(prompt, option)
|
43 |
ckpt, step = opts[option]
|
|
|
|
|
|
|
|
|
44 |
progress((0, step))
|
45 |
if step != step_loaded:
|
46 |
print(f"Switching checkpoint from {step_loaded} to {step}")
|