import torch # for model import numpy as np import torch.nn as nn import torch.optim as optim from PIL import Image #for importing images import torchvision.models as models #to load vgg 19 model import torchvision.transforms as transforms from tqdm import tqdm import spaces from dataTransform import load_image from vggModel import VGGNet @spaces.GPU(duration = 242) def style_transfer(content_img, style_img, total_steps, alpha=1e5, beta=1e10, learning_rate=0.001): # Preprocess the input images device = 'cuda' if torch.cuda.is_available() else 'cpu' print('-'*30) print(f'Device Initialized: {device}') print('-'*30) content_img = load_image(content_img, device) style_img = load_image(style_img, device) generated_img = content_img.clone().requires_grad_(True) optimizer = optim.Adam([generated_img], lr = learning_rate) model = VGGNet().to(device).eval() # print(content_img.shape) # print(style_img.shape) # print(generated_img.shape) for step in tqdm(range(total_steps)): #first we send the 3 images from the vgg network generated_feats = model(generated_img) original_image_feats = model(content_img) style_feats = model(style_img) #defining the style loss style_loss = original_loss = 0 for gen_feat, orig_image_feat, styl_feat in zip(generated_feats, original_image_feats, style_feats): #looping over each feature # print(gen_feat.shape) # print(orig_image_feat.shape) # print(styl_feat.shape) batch, channel, height, width = gen_feat.shape original_loss += torch.mean((gen_feat - orig_image_feat)**2) # computing gram matrix for gen and style to compute style loss G = gen_feat.view(channel, height*width).mm( gen_feat.view(channel, height*width).t() ) # correlation matrix A = styl_feat.view(channel, height*width).mm( styl_feat.view(channel, height*width).t() ) style_loss += torch.mean((G-A)**2) total_loss = alpha*original_loss + beta*style_loss optimizer.zero_grad() total_loss.backward() optimizer.step() if step == total_steps - 1: # Postprocess and return the final generated image return generated_img