|
|
|
import numpy as np |
|
from PIL import Image |
|
import inspect, re |
|
import numpy as np |
|
import torch |
|
import os |
|
import collections |
|
from torch.optim import lr_scheduler |
|
import torch.nn.init as init |
|
|
|
|
|
|
|
|
|
def tensor2im(image_tensor, imtype=np.uint8): |
|
image_numpy = image_tensor[0].cpu().float().numpy() |
|
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 |
|
image_numpy = np.maximum(image_numpy, 0) |
|
image_numpy = np.minimum(image_numpy, 255) |
|
return image_numpy.astype(imtype) |
|
|
|
def atten2im(image_tensor, imtype=np.uint8): |
|
image_tensor = image_tensor[0] |
|
image_tensor = torch.cat((image_tensor, image_tensor, image_tensor), 0) |
|
image_numpy = image_tensor.cpu().float().numpy() |
|
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0 |
|
image_numpy = image_numpy/(image_numpy.max()/255.0) |
|
return image_numpy.astype(imtype) |
|
|
|
def latent2im(image_tensor, imtype=np.uint8): |
|
|
|
image_numpy = image_tensor[0].cpu().float().numpy() |
|
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0 |
|
image_numpy = np.maximum(image_numpy, 0) |
|
image_numpy = np.minimum(image_numpy, 255) |
|
return image_numpy.astype(imtype) |
|
|
|
def max2im(image_1, image_2, imtype=np.uint8): |
|
image_1 = image_1[0].cpu().float().numpy() |
|
image_2 = image_2[0].cpu().float().numpy() |
|
image_1 = (np.transpose(image_1, (1, 2, 0)) + 1) / 2.0 * 255.0 |
|
image_2 = (np.transpose(image_2, (1, 2, 0))) * 255.0 |
|
output = np.maximum(image_1, image_2) |
|
output = np.maximum(output, 0) |
|
output = np.minimum(output, 255) |
|
return output.astype(imtype) |
|
|
|
def variable2im(image_tensor, imtype=np.uint8): |
|
image_numpy = image_tensor[0].data.cpu().float().numpy() |
|
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 |
|
return image_numpy.astype(imtype) |
|
|
|
|
|
def diagnose_network(net, name='network'): |
|
mean = 0.0 |
|
count = 0 |
|
for param in net.parameters(): |
|
if param.grad is not None: |
|
mean += torch.mean(torch.abs(param.grad.data)) |
|
count += 1 |
|
if count > 0: |
|
mean = mean / count |
|
print(name) |
|
print(mean) |
|
|
|
|
|
def save_image(image_numpy, image_path): |
|
image_pil = Image.fromarray(image_numpy) |
|
image_pil.save(image_path) |
|
|
|
def info(object, spacing=10, collapse=1): |
|
"""Print methods and doc strings. |
|
Takes module, class, list, dictionary, or string.""" |
|
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)] |
|
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s) |
|
print( "\n".join(["%s %s" % |
|
(method.ljust(spacing), |
|
processFunc(str(getattr(object, method).__doc__))) |
|
for method in methodList]) ) |
|
|
|
def varname(p): |
|
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]: |
|
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line) |
|
if m: |
|
return m.group(1) |
|
|
|
def print_numpy(x, val=True, shp=False): |
|
x = x.astype(np.float64) |
|
if shp: |
|
print('shape,', x.shape) |
|
if val: |
|
x = x.flatten() |
|
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( |
|
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) |
|
|
|
|
|
def mkdirs(paths): |
|
if isinstance(paths, list) and not isinstance(paths, str): |
|
for path in paths: |
|
mkdir(path) |
|
else: |
|
mkdir(paths) |
|
|
|
|
|
def mkdir(path): |
|
if not os.path.exists(path): |
|
os.makedirs(path) |
|
|
|
def get_model_list(dirname, key): |
|
if os.path.exists(dirname) is False: |
|
return None |
|
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if |
|
os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f] |
|
if gen_models is None: |
|
return None |
|
gen_models.sort() |
|
last_model_name = gen_models[-1] |
|
return last_model_name |
|
|
|
|
|
def load_vgg16(model_dir): |
|
""" Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """ |
|
if not os.path.exists(model_dir): |
|
os.mkdir(model_dir) |
|
if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')): |
|
if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')): |
|
os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7')) |
|
vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7')) |
|
vgg = Vgg16() |
|
for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()): |
|
dst.data[:] = src |
|
torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight')) |
|
vgg = Vgg16() |
|
vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight'))) |
|
return vgg |
|
|
|
|
|
def vgg_preprocess(batch): |
|
tensortype = type(batch.data) |
|
(r, g, b) = torch.chunk(batch, 3, dim = 1) |
|
batch = torch.cat((b, g, r), dim = 1) |
|
batch = (batch + 1) * 255 * 0.5 |
|
mean = tensortype(batch.data.size()) |
|
mean[:, 0, :, :] = 103.939 |
|
mean[:, 1, :, :] = 116.779 |
|
mean[:, 2, :, :] = 123.680 |
|
batch = batch.sub(Variable(mean)) |
|
return batch |
|
|
|
|
|
def get_scheduler(optimizer, hyperparameters, iterations=-1): |
|
if 'lr_policy' not in hyperparameters or hyperparameters['lr_policy'] == 'constant': |
|
scheduler = None |
|
elif hyperparameters['lr_policy'] == 'step': |
|
scheduler = lr_scheduler.StepLR(optimizer, step_size=hyperparameters['step_size'], |
|
gamma=hyperparameters['gamma'], last_epoch=iterations) |
|
else: |
|
return NotImplementedError('learning rate policy [%s] is not implemented', hyperparameters['lr_policy']) |
|
return scheduler |
|
|
|
|
|
def weights_init(init_type='gaussian'): |
|
def init_fun(m): |
|
classname = m.__class__.__name__ |
|
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'): |
|
|
|
if init_type == 'gaussian': |
|
init.normal(m.weight.data, 0.0, 0.02) |
|
elif init_type == 'xavier': |
|
init.xavier_normal(m.weight.data, gain=math.sqrt(2)) |
|
elif init_type == 'kaiming': |
|
init.kaiming_normal(m.weight.data, a=0, mode='fan_in') |
|
elif init_type == 'orthogonal': |
|
init.orthogonal(m.weight.data, gain=math.sqrt(2)) |
|
elif init_type == 'default': |
|
pass |
|
else: |
|
assert 0, "Unsupported initialization: {}".format(init_type) |
|
if hasattr(m, 'bias') and m.bias is not None: |
|
init.constant(m.bias.data, 0.0) |
|
|
|
return init_fun |