|
import spaces |
|
import os |
|
import torch |
|
from PIL import Image |
|
from RealESRGAN import RealESRGAN |
|
import gradio as gr |
|
from huggingface_hub import HfApi |
|
import datetime |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
model2 = RealESRGAN(device, scale=2) |
|
model2.load_weights('weights/RealESRGAN_x2.pth', download=True) |
|
model4 = RealESRGAN(device, scale=4) |
|
model4.load_weights('weights/RealESRGAN_x4.pth', download=True) |
|
model8 = RealESRGAN(device, scale=8) |
|
model8.load_weights('weights/RealESRGAN_x8.pth', download=True) |
|
|
|
def upload_to_hf(image_path, folder, filename): |
|
api = HfApi() |
|
api.upload_file( |
|
path_or_fileobj=image_path, |
|
path_in_repo=f"{folder}/{filename}", |
|
repo_id='DamarJati/esr-dev', |
|
repo_type='dataset', |
|
token=os.getenv('HF_TOKEN') |
|
) |
|
|
|
@spaces.GPU() |
|
def inference(image, size): |
|
global model2, model4, model8 |
|
if image is None: |
|
raise gr.Error("Image not uploaded") |
|
|
|
width, height = image.size |
|
if width >= 5000 or height >= 5000: |
|
raise gr.Error("The image is too large.") |
|
|
|
if torch.cuda.is_available(): |
|
torch.cuda.empty_cache() |
|
|
|
folder = '' |
|
result = None |
|
if size == '2x': |
|
try: |
|
result = model2.predict(image.convert('RGB')) |
|
except torch.cuda.OutOfMemoryError as e: |
|
print(e) |
|
model2 = RealESRGAN(device, scale=2) |
|
model2.load_weights('weights/RealESRGAN_x2.pth', download=False) |
|
result = model2.predict(image.convert('RGB')) |
|
folder = '2' |
|
elif size == '4x': |
|
try: |
|
result = model4.predict(image.convert('RGB')) |
|
except torch.cuda.OutOfMemoryError as e: |
|
print(e) |
|
model4 = RealESRGAN(device, scale=4) |
|
model4.load_weights('weights/RealESRGAN_x4.pth', download=False) |
|
result = model4.predict(image.convert('RGB')) |
|
folder = '4' |
|
else: |
|
try: |
|
result = model8.predict(image.convert('RGB')) |
|
except torch.cuda.OutOfMemoryError as e: |
|
print(e) |
|
model8 = RealESRGAN(device, scale=8) |
|
model8.load_weights('weights/RealESRGAN_x8.pth', download=False) |
|
result = model8.predict(image.convert('RGB')) |
|
folder = '8' |
|
|
|
|
|
timestamp = datetime.datetime.now().strftime("%H%M%S%f%d%m%Y") |
|
filename = f"{timestamp}.png" |
|
|
|
|
|
original_filename = f"original_{filename}" |
|
upscaled_filename = f"{folder}_{filename}" |
|
image.save(original_filename) |
|
result.save(upscaled_filename) |
|
|
|
|
|
upload_to_hf(original_filename, "original", filename) |
|
upload_to_hf(upscaled_filename, folder, filename) |
|
|
|
print(f"Image size ({device}): {size} ... OK") |
|
return result |
|
|
|
title = "Real ESRGAN UpScale: 2x 4x 8x" |
|
description = "AI-powered image resolution enhancement .<br>Donation: https://ko-fi.com/Damarjati" |
|
|
|
gr.Interface( |
|
inference, |
|
[gr.Image(type="pil"), gr.Radio(['2x', '4x', '8x'], type="value", value='2x', label='Resolution model')], |
|
gr.Image(type="pil", label="Output"), |
|
title=title, |
|
description=description, |
|
examples=[['example0.jpg', "2x"]], |
|
allow_flagging='never', |
|
cache_examples=False |
|
).queue(api_open=True).launch(show_error=True, show_api=True) |
|
|