File size: 8,160 Bytes
ded9852 37ae4f3 0e0ccfd 0b8ccd5 0e0ccfd 97c70fe 0e0ccfd 5dcb962 ded9852 a02b039 8aa2992 a02b039 7dbe13f 7369de5 7dbe13f a02b039 8eeae86 aca3a01 8eeae86 137fd43 b157748 23427aa db18a37 a02b039 687282d 0e0ccfd 5086b19 456ec06 d4740d1 bf86791 6aea108 1436059 34abba4 1436059 ded9852 a02b039 0c1dd21 0e0ccfd 0c1dd21 7369de5 0e0ccfd 709fd3c 8aa2992 0e0ccfd ded9852 0c1dd21 ded9852 0c1dd21 ded9852 0c1dd21 e7e1eaf 0c1dd21 c18ed86 a02b039 c18ed86 ded9852 0c1dd21 d84731a 0e0ccfd 7dbe13f ded9852 0e0ccfd 0c1dd21 a02b039 0c1dd21 db18a37 0c1dd21 db18a37 0c1dd21 db18a37 0c1dd21 db18a37 0c1dd21 db18a37 2e734d0 0c1dd21 db18a37 0c1dd21 db18a37 0c1dd21 db18a37 0c1dd21 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import os
import sys
import gradio as gr
from PIL import Image
## environment settup
os.system("git clone https://github.com/codeslake/RefVSR.git")
os.chdir("RefVSR")
os.system("./install/install_cudnn113.sh")
os.mkdir("ckpt")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/SPyNet.pytorch -O ckpt/SPyNet.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID_8K.pytorch -O ckpt/RefVSR_MFID_8K.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID_8K.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID.pytorch -O ckpt/RefVSR_MFID.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID.pytorch")
sys.path.append("RefVSR")
## I/O setup (creates folders and places inputs corresponding to the original RefVSR code)
# HD input
HR_LR_path = "test/RealMCVSR/test/HR/UW/0000"
HR_Ref_path = "test/RealMCVSR/test/HR/W/0000"
HR_Ref_path_T = "test/RealMCVSR/test/HR/T/0000"
os.makedirs(HR_LR_path)
os.makedirs(HR_Ref_path)
os.makedirs(HR_Ref_path_T)
os.system("wget https://www.dropbox.com/s/x33ka2jlzwsde7r/LR.png -O HR_LR1.png")
os.system("wget https://www.dropbox.com/s/pp903wlz3syf68w/Ref.png -O HR_Ref1.png")
os.system("wget https://www.dropbox.com/s/zl0h83x0le6ejfw/LR.png -O HR_LR2.png")
os.system("wget https://www.dropbox.com/s/9hzupmc3clt0f0e/Ref.png -O HR_Ref2.png")
os.system("wget https://www.dropbox.com/s/2u6lcfdhvcylklg/LR.png -O HR_LR3.png")
os.system("wget https://www.dropbox.com/s/a7bwfy3gl26tvbq/Ref.png -O HR_Ref3.png")
# 4x downsampled input
LR_path = "test/RealMCVSR/test/LRx4/UW/0000"
Ref_path = "test/RealMCVSR/test/LRx4/W/0000"
Ref_path_T = "test/RealMCVSR/test/LRx4/T/0000"
os.makedirs(LR_path)
os.makedirs(Ref_path)
os.makedirs(Ref_path_T)
os.system("wget https://www.dropbox.com/s/hkvdwm3grshjt0k/LR.png -O LR.png")
os.system("wget https://www.dropbox.com/s/4sv34su3kg1ifkp/Ref.png -O Ref.png")
# output directory
os.makedirs('result')
## resize if necessary (not used)
def resize(img):
max_side = 480
w = img.size[0]
h = img.size[1]
if max(h, w) > max_side:
scale_ratio = max_side / max(h, w)
wsize=int(w*scale_ratio)
hsize=int(h*scale_ratio)
img = img.resize((wsize,hsize), Image.ANTIALIAS)
w = img.size[0]
h = img.size[1]
img = img.crop((0, 0, w-w%8, h-h%8))
return img
#################### 8K ##################
## inference
def inference_8K(LR, Ref):
## resize for user selected input (not used)
LR = resize(LR)
Ref = resize(Ref)
## Input setup (creates folders and places inputs corresponding to the original RefVSR code)
LR.save(os.path.join(LR_path, '0000.png'))
Ref.save(os.path.join(Ref_path, '0000.png'))
Ref.save(os.path.join(Ref_path_T, '0000.png'))
LR.save(os.path.join(HR_LR_path, '0000.png'))
Ref.save(os.path.join(HR_Ref_path, '0000.png'))
Ref.save(os.path.join(HR_Ref_path_T, '0000.png'))
## Run RefVSR model
os.system("python -B run.py \
--mode RefVSR_MFID_8K \
--config config_RefVSR_MFID_8K \
--data RealMCVSR \
--ckpt_abs_name ckpt/RefVSR_MFID_8K.pytorch \
--data_offset ./test \
--output_offset ./result \
--qualitative_only \
--cpu \
--is_gradio")
return "result/0000.png"
title="RefVSR"
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about 30s."
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frames due to computational complexity.<br>Hence, the model <b>will not take advantage</b> of temporal LR and Ref frames.</p><p style='text-align: center'>Moreover, the model is trained <b>with the proposed 2-stage training strategy</b>, but due to the memory and computational complexity, we downsampled sample frames to have the 480x270 resolution.</p><p style='text-align: center'>For user given frames, the size will be adjusted for the longer side of the frames to have 480 pixels.</p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
## resize for sample (not used)
#LR = resize(Image.open('LR.png')).save('LR.png')
#Ref = resize(Image.open('Ref.png')).save('Ref.png')
## input
examples=[['HR_LR1.png', 'HR_Ref1.png'], ['HR_LR2.png', 'HR_Ref2.png'], ['HR_LR3.png', 'HR_Ref3.png']]
## interface
gr.Interface(inference_8K,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
#################### low res ##################
## inference
def inference(LR, Ref):
## resize for user selected input
LR = resize(LR)
Ref = resize(Ref)
## Input setup (creates folders and places inputs corresponding to the original RefVSR code)
LR.save(os.path.join(LR_path, '0000.png'))
Ref.save(os.path.join(Ref_path, '0000.png'))
Ref.save(os.path.join(Ref_path_T, '0000.png'))
LR.save(os.path.join(HR_LR_path, '0000.png'))
Ref.save(os.path.join(HR_Ref_path, '0000.png'))
Ref.save(os.path.join(HR_Ref_path_T, '0000.png'))
## Run RefVSR model
os.system("python -B run.py \
--mode RefVSR_MFID \
--config config_RefVSR_MFID \
--data RealMCVSR \
--ckpt_abs_name ckpt/RefVSR_MFID.pytorch \
--data_offset ./test \
--output_offset ./result \
--qualitative_only \
--cpu \
--is_gradio")
return "result/0000.png"
title="Demo for RefVSR (CVPR 2022)"
description="The demo applies 4xVSR on a video frame. It runs on CPUs and takes about 150s. For the demo, upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. It is recommended for the reference frame to have a 2x larger zoom factor than that of the low-resolution frame."
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frames due to computational complexity.<br>Hence, the model <b>will not take advantage</b> of temporal LR and Ref frames.</p><p style='text-align: center'>Moreover, the model is trained <b>only with the proposed pre-training strategy</b> to cope with downsampled sample frames, which are in the 480x270 resolution.</p><p style='text-align: center'>For user given frames, the size will be adjusted for the longer side of the frames to have 480 pixels.</p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
## resize for sample
LR = resize(Image.open('LR.png')).save('LR.png')
Ref = resize(Image.open('Ref.png')).save('Ref.png')
## input
examples=[['LR.png','Ref.png']]
## interface
gr.Interface(inference, [gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")], gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
|