Gosula commited on
Commit
9e06d90
·
1 Parent(s): 2b7b059

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -0
app.py CHANGED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from base64 import b64encode
2
+ from utils import *
3
+ from device import torch_device,vae,text_encoder,unet,tokenizer,scheduler,token_emb_layer,pos_emb_layer,position_embeddings
4
+ import numpy
5
+ import torch
6
+ from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
7
+ from huggingface_hub import notebook_login
8
+ import gradio as gr
9
+ import random
10
+ import torch
11
+ import pathlib
12
+
13
+ import gradio as gr
14
+ import random
15
+ import torch
16
+ import pathlib
17
+ # For video display:
18
+ from IPython.display import HTML
19
+ from matplotlib import pyplot as plt
20
+ from pathlib import Path
21
+ from PIL import Image
22
+ from torch import autocast
23
+ from torchvision import transforms as tfms
24
+ from tqdm.auto import tqdm
25
+ from transformers import CLIPTextModel, CLIPTokenizer, logging
26
+ import os
27
+ import shutil
28
+
29
+ from stablediffusion import *
30
+ path="/Project/concept_styles"
31
+ concept_styles={
32
+ "cubex":"cubex.bin",
33
+ "hours-style":"hours-style.bin",
34
+ "orange-jacket":"orange-jacket.bin",
35
+ "simple_styles(2)":"simple_styles(2).bin",
36
+ "xyz":"xyz.bin"
37
+
38
+ }
39
+
40
+
41
+ def generate(prompt, styles,num_inference_steps, loss_scale,noised_image):
42
+ lossless_images, lossy_images = [], []
43
+ for style in styles:
44
+ concept_lib_path = f"{path}/{concept_styles[style]}"
45
+ concept_lib = pathlib.Path(concept_lib_path)
46
+ concept_embed = torch.load(concept_lib)
47
+
48
+ manual_seed = random.randint(0, 100)
49
+
50
+ generated_image_lossless = generate_image(prompt,concept_embed,num_inference_steps=num_inference_steps,color_postprocessing=False,noised_image=noised_image,loss_scale=loss_scale,seed=manual_seed
51
+ )
52
+ generated_image_lossy = generate_image(prompt,concept_embed,num_inference_steps=num_inference_steps,color_postprocessing=True,noised_image=noised_image,loss_scale=loss_scale,seed=manual_seed
53
+ )
54
+ lossless_images.append((generated_image_lossless, style))
55
+ lossy_images.append((generated_image_lossy, style))
56
+ return {lossless_gallery: lossless_images,lossy_gallery: lossy_images}
57
+
58
+ with gr.Blocks() as app:
59
+ gr.Markdown("## ERA V1 Session20 - Stable Diffusion Model: Generative Art with Guidance")
60
+ with gr.Row():
61
+ with gr.Column():
62
+ prompt_box = gr.Textbox(label="Prompt", interactive=True)
63
+ style_selector = gr.Dropdown(
64
+ choices=list(concept_styles.keys()),
65
+ value=list(concept_styles.keys())[0],
66
+ multiselect=True,
67
+ label="Select a Concept Style",
68
+ interactive=True,
69
+ )
70
+ num_inference_steps = gr.Slider(
71
+ minimum=10,
72
+ maximum=50,
73
+ value=30,
74
+ step=10,
75
+ label="Select Number of Steps",
76
+ interactive=True,
77
+ )
78
+
79
+ loss_scale = gr.Slider(
80
+ minimum=0,
81
+ maximum=10,
82
+ value=8,
83
+ step=1,
84
+ label="Select Guidance Scale",
85
+ interactive=True,
86
+ )
87
+ noised_image = gr.Checkbox(
88
+ label="Include Noised Image",
89
+ default=False,
90
+ interactive=True,
91
+ )
92
+
93
+
94
+ submit_btn = gr.Button(value="Generate")
95
+
96
+ with gr.Column():
97
+ lossless_gallery = gr.Gallery(
98
+ label="Generated Images without Guidance", show_label=True
99
+ )
100
+ lossy_gallery = gr.Gallery(
101
+ label="Generated Images with Guidance", show_label=True
102
+ )
103
+
104
+ submit_btn.click(
105
+ generate,
106
+ inputs=[prompt_box, style_selector, num_inference_steps, loss_scale,noised_image],
107
+ outputs=[lossless_gallery,lossy_gallery],
108
+ )
109
+
110
+ app.launch()
111
+