Spaces:
Runtime error
Runtime error
bhavyapandya
commited on
Commit
•
73f71ed
1
Parent(s):
c212ba0
Upload 2 files
Browse files- app.py +188 -0
- requirements.txt +0 -0
app.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tensorflow as tf
|
3 |
+
from PIL import Image
|
4 |
+
from tensorflow.keras.applications import vgg19
|
5 |
+
|
6 |
+
from tensorflow.python.keras import models
|
7 |
+
import numpy as np
|
8 |
+
from tensorflow import keras
|
9 |
+
from werkzeug.utils import secure_filename
|
10 |
+
import os
|
11 |
+
from flask import send_file
|
12 |
+
|
13 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
14 |
+
|
15 |
+
img_nrows = 256
|
16 |
+
img_ncols = 300
|
17 |
+
|
18 |
+
def img_preprocess(image_path):
|
19 |
+
# Util function to open, resize and format pictures into appropriate tensors
|
20 |
+
# img = keras.preprocessing.image.load_img(
|
21 |
+
# image_path, target_size=(img_nrows, img_ncols)
|
22 |
+
# )
|
23 |
+
img = keras.preprocessing.image.img_to_array(image_path)
|
24 |
+
img = np.expand_dims(img, axis=0)
|
25 |
+
img = vgg19.preprocess_input(img)
|
26 |
+
return tf.convert_to_tensor(img)
|
27 |
+
|
28 |
+
def deprocess_img(processed_img):
|
29 |
+
x = processed_img.copy()
|
30 |
+
if len(x.shape) == 4:
|
31 |
+
x = np.squeeze(x, 0)
|
32 |
+
assert len(x.shape) == 3 #Input dimension must be [1, height, width, channel] or [height, width, channel]
|
33 |
+
|
34 |
+
|
35 |
+
# perform the inverse of the preprocessing step
|
36 |
+
x[:, :, 0] += 103.939
|
37 |
+
x[:, :, 1] += 116.779
|
38 |
+
x[:, :, 2] += 123.68
|
39 |
+
x = x[:, :, ::-1] # converting BGR to RGB channel
|
40 |
+
|
41 |
+
x = np.clip(x, 0, 255).astype('uint8')
|
42 |
+
return x
|
43 |
+
|
44 |
+
content_layers = ['block5_conv2']
|
45 |
+
style_layers = ['block1_conv1',
|
46 |
+
'block2_conv1',
|
47 |
+
'block3_conv1',
|
48 |
+
'block4_conv1',
|
49 |
+
'block5_conv1']
|
50 |
+
number_content=len(content_layers)
|
51 |
+
number_style =len(style_layers)
|
52 |
+
def get_model():
|
53 |
+
|
54 |
+
vgg=tf.keras.applications.vgg19.VGG19(include_top=False,weights='imagenet')
|
55 |
+
vgg.trainable=False
|
56 |
+
content_output=[vgg.get_layer(layer).output for layer in content_layers]
|
57 |
+
style_output=[vgg.get_layer(layer).output for layer in style_layers]
|
58 |
+
model_output= style_output+content_output
|
59 |
+
return models.Model(vgg.input,model_output)
|
60 |
+
|
61 |
+
def get_content_loss(noise,target):
|
62 |
+
loss = tf.reduce_mean(tf.square(noise-target))
|
63 |
+
return loss
|
64 |
+
|
65 |
+
def gram_matrix(tensor):
|
66 |
+
channels=int(tensor.shape[-1])
|
67 |
+
vector=tf.reshape(tensor,[-1,channels])
|
68 |
+
n=tf.shape(vector)[0]
|
69 |
+
gram_matrix=tf.matmul(vector,vector,transpose_a=True)
|
70 |
+
return gram_matrix/tf.cast(n,tf.float32)
|
71 |
+
|
72 |
+
def get_style_loss(noise,target):
|
73 |
+
gram_noise=gram_matrix(noise)
|
74 |
+
#gram_target=gram_matrix(target)
|
75 |
+
loss=tf.reduce_mean(tf.square(target-gram_noise))
|
76 |
+
return loss
|
77 |
+
|
78 |
+
def get_features(model,content_path,style_path):
|
79 |
+
content_img=img_preprocess(content_path)
|
80 |
+
style_image=img_preprocess(style_path)
|
81 |
+
|
82 |
+
content_output=model(content_img)
|
83 |
+
style_output=model(style_image)
|
84 |
+
|
85 |
+
content_feature = [layer[0] for layer in content_output[number_style:]]
|
86 |
+
style_feature = [layer[0] for layer in style_output[:number_style]]
|
87 |
+
return content_feature,style_feature
|
88 |
+
|
89 |
+
def compute_loss(model, loss_weights,image, gram_style_features, content_features):
|
90 |
+
style_weight,content_weight = loss_weights #style weight and content weight are user given parameters
|
91 |
+
#that define what percentage of content and/or style will be preserved in the generated image
|
92 |
+
|
93 |
+
output=model(image)
|
94 |
+
content_loss=0
|
95 |
+
style_loss=0
|
96 |
+
|
97 |
+
noise_style_features = output[:number_style]
|
98 |
+
noise_content_feature = output[number_style:]
|
99 |
+
|
100 |
+
weight_per_layer = 1.0/float(number_style)
|
101 |
+
for a,b in zip(gram_style_features,noise_style_features):
|
102 |
+
style_loss+=weight_per_layer*get_style_loss(b[0],a)
|
103 |
+
|
104 |
+
|
105 |
+
weight_per_layer =1.0/ float(number_content)
|
106 |
+
for a,b in zip(noise_content_feature,content_features):
|
107 |
+
content_loss+=weight_per_layer*get_content_loss(a[0],b)
|
108 |
+
|
109 |
+
style_loss *= style_weight
|
110 |
+
content_loss *= content_weight
|
111 |
+
|
112 |
+
total_loss = content_loss + style_loss
|
113 |
+
|
114 |
+
|
115 |
+
return total_loss,style_loss,content_loss
|
116 |
+
|
117 |
+
def compute_grads(dictionary):
|
118 |
+
with tf.GradientTape() as tape:
|
119 |
+
all_loss=compute_loss(**dictionary)
|
120 |
+
|
121 |
+
total_loss=all_loss[0]
|
122 |
+
return tape.gradient(total_loss,dictionary['image']),all_loss
|
123 |
+
|
124 |
+
def run_style_transfer(content_path,style_path,epochs=20,content_weight=1e3, style_weight=1e-2):
|
125 |
+
|
126 |
+
model=get_model()
|
127 |
+
|
128 |
+
for layer in model.layers:
|
129 |
+
layer.trainable = False
|
130 |
+
|
131 |
+
content_feature,style_feature = get_features(model,content_path,style_path)
|
132 |
+
style_gram_matrix=[gram_matrix(feature) for feature in style_feature]
|
133 |
+
|
134 |
+
noise = img_preprocess(content_path)
|
135 |
+
noise=tf.Variable(noise,dtype=tf.float32)
|
136 |
+
|
137 |
+
optimizer = tf.keras.optimizers.Adam(learning_rate=5, beta_1=0.99, epsilon=1e-1)
|
138 |
+
|
139 |
+
best_loss,best_img=float('inf'),None
|
140 |
+
|
141 |
+
loss_weights = (style_weight, content_weight)
|
142 |
+
dictionary={'model':model,
|
143 |
+
'loss_weights':loss_weights,
|
144 |
+
'image':noise,
|
145 |
+
'gram_style_features':style_gram_matrix,
|
146 |
+
'content_features':content_feature}
|
147 |
+
|
148 |
+
norm_means = np.array([103.939, 116.779, 123.68])
|
149 |
+
min_vals = -norm_means
|
150 |
+
max_vals = 255 - norm_means
|
151 |
+
|
152 |
+
imgs = []
|
153 |
+
for i in range(1,epochs+1):
|
154 |
+
grad,all_loss=compute_grads(dictionary)
|
155 |
+
total_loss,style_loss,content_loss=all_loss
|
156 |
+
optimizer.apply_gradients([(grad,noise)])
|
157 |
+
clipped=tf.clip_by_value(noise,min_vals,max_vals)
|
158 |
+
noise.assign(clipped)
|
159 |
+
|
160 |
+
if total_loss<best_loss:
|
161 |
+
best_loss = total_loss
|
162 |
+
best_img = deprocess_img(noise.numpy())
|
163 |
+
|
164 |
+
#for visualization
|
165 |
+
|
166 |
+
if i%1==0:
|
167 |
+
plot_img = noise.numpy()
|
168 |
+
plot_img = deprocess_img(plot_img)
|
169 |
+
imgs.append(plot_img)
|
170 |
+
|
171 |
+
|
172 |
+
return best_img,best_loss,imgs
|
173 |
+
|
174 |
+
content_path = "3.jpg"
|
175 |
+
style_path = "4.jpg"
|
176 |
+
def predict(image1_input, image2_input):
|
177 |
+
|
178 |
+
return run_style_transfer(image1_input,image2_input,epochs=60)[0]
|
179 |
+
|
180 |
+
|
181 |
+
image1_input = gr.inputs.Image(label="Image 1")
|
182 |
+
image2_input = gr.inputs.Image(label="Image 2")
|
183 |
+
output_image = gr.outputs.Image(label="Merged Image", type="filepath")
|
184 |
+
|
185 |
+
title = "Image Merger"
|
186 |
+
description = "Merge two input images"
|
187 |
+
|
188 |
+
gr.Interface(fn=predict, inputs=[image1_input, image2_input], outputs=output_image, title=title, description=description).launch()
|
requirements.txt
ADDED
Binary file (4.26 kB). View file
|
|