File size: 3,066 Bytes
b4e0431
 
 
 
 
 
 
 
a81b4f4
 
b4e0431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
import torch, torchvision
import torch.nn.functional as F
import numpy as np
from PIL import Image, ImageColor
from diffusers import DDPMPipeline
from diffusers import DDIMScheduler

device = 'mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu'

# Load the pretrained pipeline
pipeline_name = 'johnowhitaker/sd-class-wikiart-from-bedrooms'
image_pipe = DDPMPipeline.from_pretrained(pipeline_name).to(device)

# Set up the scheduler
scheduler = DDIMScheduler.from_pretrained(pipeline_name)
scheduler.set_timesteps(num_inference_steps=40)

def color_loss(images, target_color=(0.1, 0.9, 0.5)):
    """Given a target color (R, G, B) return a loss for how far away on average 
    the images' pixels are from that color. Defaults to a light teal: (0.1, 0.9, 0.5) """
    target = torch.tensor(target_color).to(images.device) * 2 - 1 # Map target color to (-1, 1)
    target = target[None, :, None, None] # Get shape right to work with the images (b, c, h, w)
    error = torch.abs(images - target).mean() # Mean absolute difference between the image pixels and the target color
    return error


def generate(color, guidance_loss_scale):
    
    # Target color as RGB
    target_color = ImageColor.getcolor(color, "RGB")

    # Initial random x - just one image but you could add a 'num_images' argument/input to give the user control
    x = torch.randn(1, 3, 256, 256).to(device) 

    # Our custom sampling loop:
    for i, t in tqdm(enumerate(scheduler.timesteps)):
        
        # Prep the model input 
        model_input = scheduler.scale_model_input(x, t)
    
        # predict the noise residual
        with torch.no_grad():
            noise_pred = image_pipe.unet(model_input, t)["sample"]
    
        # Set requires grad on x (shortcut method - we're doing this AFTER the unet)
        x = x.detach().requires_grad_()
    
        # Get the predicted x0:
        x0 = scheduler.step(noise_pred, t, x).pred_original_sample
    
        # Calculate loss
        loss = color_loss(x0, target_color) * guidance_loss_scale
    
        # Get gradient
        cond_grad = -torch.autograd.grad(loss, x)[0]
    
        # Modify x based on this gradient
        x = x.detach() + cond_grad 
        
        # Now step with scheduler
        x = scheduler.step(noise_pred, t, x).prev_sample
        
    # Return the final output as an image (or image grid if there are more than one images)
    grid = torchvision.utils.make_grid(x, nrow=4)
    im = grid.permute(1, 2, 0).cpu().clip(-1, 1)*0.5 + 0.5
    return Image.fromarray(np.array(im*255).astype(np.uint8))


inputs = [
    gr.ColorPicker(label="color", value='55FFAA'), # Add any inputs you need here
    gr.Slider(label="guidance_scale", minimum=1, maximum=100, value=30)
]
outputs = gr.Image(label="result")

demo = gr.Interface(
    fn=generate,
    inputs=inputs,
    outputs=outputs,
    examples=[
        ["#BB2266"], # You can provide some example inputs to get people started
    ],
)

if __name__ == "__main__":
    demo.launch()