File size: 3,723 Bytes
9e06d90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d76bad9
9e06d90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from base64 import b64encode
from utils import *
from device import torch_device,vae,text_encoder,unet,tokenizer,scheduler,token_emb_layer,pos_emb_layer,position_embeddings
import numpy
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
from huggingface_hub import notebook_login
import gradio as gr
import random
import torch
import pathlib

import gradio as gr
import random
import torch
import pathlib
# For video display:
from IPython.display import HTML
from matplotlib import pyplot as plt
from pathlib import Path
from PIL import Image
from torch import autocast
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, logging
import os
import shutil

from stablediffusion import *
path="Project/concept_styles"
concept_styles={
    "cubex":"cubex.bin",
    "hours-style":"hours-style.bin",
    "orange-jacket":"orange-jacket.bin",
    "simple_styles(2)":"simple_styles(2).bin",
    "xyz":"xyz.bin"

}


def generate(prompt, styles,num_inference_steps, loss_scale,noised_image):
    lossless_images, lossy_images = [], []
    for style in styles:
        concept_lib_path = f"{path}/{concept_styles[style]}"
        concept_lib = pathlib.Path(concept_lib_path)
        concept_embed = torch.load(concept_lib)

        manual_seed = random.randint(0, 100)

        generated_image_lossless = generate_image(prompt,concept_embed,num_inference_steps=num_inference_steps,color_postprocessing=False,noised_image=noised_image,loss_scale=loss_scale,seed=manual_seed
        )
        generated_image_lossy = generate_image(prompt,concept_embed,num_inference_steps=num_inference_steps,color_postprocessing=True,noised_image=noised_image,loss_scale=loss_scale,seed=manual_seed
        )
        lossless_images.append((generated_image_lossless, style))
        lossy_images.append((generated_image_lossy, style))
    return {lossless_gallery: lossless_images,lossy_gallery: lossy_images}

with gr.Blocks() as app:
    gr.Markdown("## ERA V1 Session20 - Stable Diffusion Model: Generative Art with Guidance")
    with gr.Row():
        with gr.Column():
            prompt_box = gr.Textbox(label="Prompt", interactive=True)
            style_selector = gr.Dropdown(
                choices=list(concept_styles.keys()),
                value=list(concept_styles.keys())[0],
                multiselect=True,
                label="Select a Concept Style",
                interactive=True,
            )
            num_inference_steps = gr.Slider(
                minimum=10,
                maximum=50,
                value=30,
                step=10,
                label="Select Number of Steps",
                interactive=True,
            )

            loss_scale = gr.Slider(
                minimum=0,
                maximum=10,
                value=8,
                step=1,
                label="Select Guidance Scale",
                interactive=True,
            )
            noised_image = gr.Checkbox(
                label="Include Noised Image",
                default=False,
                interactive=True,
            )


            submit_btn = gr.Button(value="Generate")

        with gr.Column():
            lossless_gallery = gr.Gallery(
                label="Generated Images without Guidance", show_label=True
            )
            lossy_gallery = gr.Gallery(
                label="Generated Images with Guidance", show_label=True
            )

        submit_btn.click(
            generate,
            inputs=[prompt_box, style_selector, num_inference_steps, loss_scale,noised_image],
            outputs=[lossless_gallery,lossy_gallery],
        )

app.launch()