Update app.py
Browse files
app.py
CHANGED
@@ -1,71 +1,43 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
from tensorflow import keras
|
6 |
|
7 |
-
|
8 |
|
9 |
-
import
|
|
|
10 |
|
11 |
-
|
12 |
-
import
|
13 |
|
14 |
-
|
15 |
-
from share_btn import community_icon_html, loading_icon_html, share_js
|
16 |
|
17 |
-
|
18 |
-
weights_path = keras.utils.get_file(
|
19 |
-
origin="https://huggingface.co/sayakpaul/kerascv_sd_pokemon_finetuned/resolve/main/ckpt_epochs_72_res_512_mp_True.h5",
|
20 |
-
file_hash="10b20bd27912d1da904dafe8c576351c2f373546f446591aeff00d816d701a6e"
|
21 |
-
)
|
22 |
-
pokemon_model = keras_cv.models.StableDiffusion(
|
23 |
-
img_width=img_width, img_height=img_height
|
24 |
-
)
|
25 |
-
pokemon_model.to(device)
|
26 |
-
pokemon_model.diffusion_model.load_weights(weights_path)
|
27 |
-
|
28 |
-
pokemon_model.diffusion_model.compile(jit_compile=True)
|
29 |
-
pokemon_model.decoder.compile(jit_compile=True)
|
30 |
-
pokemon_model.text_encoder.compile(jit_compile=True)
|
31 |
|
32 |
device="cpu"
|
33 |
|
34 |
-
|
35 |
-
|
|
|
|
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
-
|
39 |
-
start_time = time.time()
|
40 |
-
# `images is an `np.ndarray`. So we convert it to a list of ndarrays.
|
41 |
-
# Each ndarray represents a generated image.
|
42 |
-
# Reference: https://gradio.app/docs/#gallery
|
43 |
-
images = pokemon_model.text_to_image(
|
44 |
-
prompt,
|
45 |
-
batch_size=num_images_to_gen,
|
46 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
47 |
-
)
|
48 |
-
end_time = time.time()
|
49 |
-
print(f"Time taken: {end_time - start_time} seconds.")
|
50 |
-
return [image for image in images]
|
51 |
|
|
|
|
|
52 |
|
53 |
-
|
54 |
-
article = "This Space leverages a T4 GPU to run the predictions. We use mixed-precision to speed up the inference latency. We further use XLA to carve out maximum performance from TensorFlow."
|
55 |
-
gr.Interface(
|
56 |
-
generate_image_fn,
|
57 |
-
inputs=[
|
58 |
-
gr.Textbox(
|
59 |
-
label="Enter your prompt",
|
60 |
-
max_lines=1,
|
61 |
-
placeholder="cute Sundar Pichai creature",
|
62 |
-
),
|
63 |
-
gr.Slider(value=40, minimum=8, maximum=50, step=1),
|
64 |
-
],
|
65 |
-
outputs=gr.Gallery().style(grid=[2], height="auto"),
|
66 |
-
title="Generate custom pokemons",
|
67 |
-
description=description,
|
68 |
-
article=article,
|
69 |
-
examples=[["cute Sundar Pichai creature", 40], ["Hello kitty", 40]],
|
70 |
-
allow_flagging=False,
|
71 |
-
).launch(enable_queue=True)
|
|
|
1 |
+
import gradio as gr
|
2 |
+
#import torch
|
3 |
+
#from torch import autocast // only for GPU
|
|
|
|
|
4 |
|
5 |
+
from PIL import Image
|
6 |
|
7 |
+
import os
|
8 |
+
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
|
9 |
|
10 |
+
from diffusers import StableDiffusionPipeline
|
11 |
+
#from diffusers import StableDiffusionImg2ImgPipeline
|
12 |
|
13 |
+
print("hello sylvain")
|
|
|
14 |
|
15 |
+
YOUR_TOKEN=MY_SECRET_TOKEN
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
device="cpu"
|
18 |
|
19 |
+
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_auth_token=YOUR_TOKEN)
|
20 |
+
pipe.to(device)
|
21 |
+
|
22 |
+
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
|
23 |
|
24 |
+
def infer(prompt):
|
25 |
+
|
26 |
+
#image = pipe(prompt, init_image=init_image)["sample"][0]
|
27 |
+
images_list = pipe([prompt] * 4)
|
28 |
+
images = []
|
29 |
+
safe_image = Image.open(r"unsafe.png")
|
30 |
+
for i, image in enumerate(images_list["images"]):
|
31 |
+
if(images_list["nsfw_content_detected"][i]):
|
32 |
+
images.append(safe_image)
|
33 |
+
else:
|
34 |
+
images.append(image)
|
35 |
+
|
36 |
+
return images
|
37 |
|
38 |
+
print("Great sylvain ! Everything is working fine !")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
+
title="Stable Diffusion CPU"
|
41 |
+
description="Stable Diffusion example using CPU and HF token. <br />Warning: Slow process... ~5/10 min inference time. <b>NSFW filter enabled.</b>"
|
42 |
|
43 |
+
gr.Interface(fn=infer, inputs="text", outputs=gallery,title=title,description=description).queue(max_size=10).launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|