File size: 4,547 Bytes
0cfb4a5 d4fba6d 0dec378 de6051a 0dec378 0a67e9a a484b84 d4fba6d 2fc432b 1a52ee5 0dec378 219d097 c5b40c9 0dec378 79f1585 e3be785 20de417 e3be785 2fc432b 0198afd 1a52ee5 58b06a7 2f35681 ee899d5 e3be785 ee899d5 58b06a7 ee899d5 e3be785 ee899d5 e3be785 ee899d5 e3be785 2fc432b c5b40c9 2fc432b e3be785 79f1585 e3be785 165b2f6 3b4ee8c 5e03798 e5d91cc 58b06a7 5e03798 11a4797 5e03798 ee899d5 5e03798 58b06a7 5e03798 ee899d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
import requests
import re
import asyncio
from PIL import Image
from gradio_client import Client, handle_file
from huggingface_hub import login
from gradio_imageslider import ImageSlider
translator = Translator()
HF_TOKEN = os.environ.get("HF_TOKEN")
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
MAX_SEED = np.iinfo(np.int32).max
CSS = "footer { visibility: hidden; }"
JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
def enable_lora(lora_add, basemodel):
return basemodel if not lora_add else lora_add
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = str(translator.translate(prompt, 'English')) + "," + lora_word
client = AsyncInferenceClient()
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
image_path = "temp_image.jpg"
image.save(image_path, quality=95)
if process_upscale:
upscale_image_path = "upscale_image.jpg"
upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
upscale_image.save(upscale_image_path, quality=95)
else:
upscale_image_path = image_path
return [image_path, upscale_image_path]
def get_upscale_finegrain(prompt, img_path, upscale_factor):
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
return result[1]
css = """
#col-container{
margin: 0 auto;
max-width: 1024px;
}
"""
with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("Flux Upscaled +LORA")
with gr.Row():
with gr.Column(scale=1.5):
output_res = ImageSlider(label="Flux / Upscaled")
with gr.Column(scale=0.8):
prompt = gr.Textbox(label="Prompt")
basemodel_choice = gr.Dropdown(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
lora_model_choice = gr.Dropdown(label="LORA Model", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
process_lora = gr.Checkbox(label="Process LORA")
process_upscale = gr.Checkbox(label="Process Upscale")
upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 4, 8], value=2)
with gr.Accordion(label="Advanced Options", open=False):
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=768)
scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24)
seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
submit_btn = gr.Button("Submit", scale=1)
submit_btn.click(
fn=lambda: None,
inputs=None,
outputs=[output_res],
queue=False
).then(
fn=gen,
inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
outputs=[output_res]
) |