K00B404's picture
Update app.py
43a8be6 verified
raw
history blame
8.88 kB
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
#from translatepy import Translator
from gradio_client import Client, handle_file
from PIL import Image
from huggingface_hub import login
from themes import IndonesiaTheme # Import custom IndonesiaTheme
from loras import loras
MAX_SEED = np.iinfo(np.int32).max
HF_TOKEN = os.getenv('HF_TOKEN_UPSCALER')
HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN_UPSCALER')
qwen_client = Client("K00B404/HugChatWrap",hf_token=HF_TOKEN)
loaded_loras=[]
for lora in loras:
print(lora.get('repo'))
loaded_loras.append(lora.get('repo'))
# Function to enable LoRA if selected
def enable_lora(lora_add, basemodel):
print(f"[-] Determining model: LoRA {'enabled' if lora_add else 'disabled'}, base model: {basemodel}")
return basemodel if not lora_add else lora_add
def generate_character_description(character_prompt, system_message = """
You are a character description generator. Create detailed, vivid descriptions
of characters including their physical appearance, personality, and notable features. Keep the
description focused on visual elements that could be used for image generation.
"""
):
"""Generate detailed character description using K00B404/HugChatWrap space"""
try:
result = qwen_client.predict(
message=character_prompt,
param_2=system_message,
param_3=100,
param_4=0.9,
param_5=0.99,
api_name="/chat"
)
return result
except Exception as e:
return f"Error generating description: {str(e)}"
# Function to enable LoRA if selected
def enable_lora(lora_add, basemodel):
print(f"[-] Menentukan model: LoRA {'diaktifkan' if lora_add else 'tidak diaktifkan'}, model dasar: {basemodel}")
return basemodel if not lora_add else lora_add
# Function to generate image
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
try:
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
print(f"[-] Menerjemahkan prompt: {prompt}")
text = str(Translator().translate(prompt, 'English')) + "," + lora_word
print(f"[-] Generating image with prompt: {text}, model: {model}")
client = AsyncInferenceClient()
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
except Exception as e:
print(f"[-] Error generating image: {e}")
return None, None
# Function to upscale image
def get_upscale_finegrain(prompt, img_path, upscale_factor):
try:
print(f"[-] Memulai proses upscaling dengan faktor {upscale_factor} untuk gambar {img_path}")
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(
input_image=handle_file(img_path),
prompt=prompt,
negative_prompt="worst quality, low quality, normal quality",
upscale_factor=upscale_factor,
controlnet_scale=0.6,
controlnet_decay=1,
condition_scale=6,
denoise_strength=0.35,
num_inference_steps=18,
solver="DDIM",
api_name="/process"
)
print(f"[-] Proses upscaling berhasil.")
return result[1] # Return upscale image path
except Exception as e:
print(f"[-] Error scaling image: {e}")
return None
# Main function to generate images and optionally upscale
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
print(f"[-] Memulai generasi gambar dengan prompt: {prompt}")
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
print(f"[-] Menggunakan model: {model}")
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
if image is None:
print("[-] Image generation failed.")
return []
image_path = "temp_image.jpg"
print(f"[-] Menyimpan gambar sementara di: {image_path}")
image.save(image_path, format="JPEG")
upscale_image_path = None
if process_upscale:
print(f"[-] Memproses upscaling dengan faktor: {upscale_factor}")
upscale_image_path = get_upscale_finegrain(prompt, image_path, upscale_factor)
if upscale_image_path is not None and os.path.exists(upscale_image_path):
print(f"[-] Proses upscaling selesai. Gambar tersimpan di: {upscale_image_path}")
return [image_path, upscale_image_path] # Return both images
else:
print("[-] Upscaling gagal, jalur gambar upscale tidak ditemukan.")
return [image_path]
# CSS for styling the interface
css = """
#col-left, #col-mid, #col-right {
margin: 0 auto;
max-width: 400px;
padding: 10px;
border-radius: 15px;
background-color: #f9f9f9;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
#banner {
width: 100%;
text-align: center;
margin-bottom: 20px;
}
#run-button {
background-color: #ff4b5c;
color: white;
font-weight: bold;
padding: 10px;
border-radius: 10px;
cursor: pointer;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
}
#footer {
text-align: center;
margin-top: 20px;
color: silver;
}
"""
# Creating Gradio interface
with gr.Blocks(css=css, theme=IndonesiaTheme()) as WallpaperFluxMaker:
# Displaying the application title
gr.HTML('<div id="banner">✨ Flux MultiMode Generator + Upscaler ✨</div>')
with gr.Column(elem_id="col-container"):
# Output section (replacing ImageSlider with gr.Gallery)
with gr.Row():
output_res = gr.Gallery(label="⚡ Flux / Upscaled Image ⚡", elem_id="output-res", columns=2, height="auto")
# User input section split into two columns
with gr.Row():
# Column 1: Input prompt, LoRA, and base model
with gr.Column(scale=1, elem_id="col-left"):
prompt = gr.Textbox(
label="📜 Deskripsi Gambar",
placeholder="Tuliskan prompt Anda dalam bahasa apapun, yang akan langsung diterjemahkan ke bahasa Inggris.",
elem_id="textbox-prompt"
)
basemodel_choice = gr.Dropdown(
label="🖼️ Pilih Model",
choices=[
"black-forest-labs/FLUX.1-schnell",
"black-forest-labs/FLUX.1-DEV",
"enhanceaiteam/Flux-uncensored",
"Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
"Shakker-Labs/FLUX.1-dev-LoRA-add-details",
"city96/FLUX.1-dev-gguf"
],
value="black-forest-labs/FLUX.1-schnell"
)
lora_model_choice = gr.Dropdown(
label="🎨 Pilih LoRA",
choices=[
"Shakker-Labs/FLUX.1-dev-LoRA-add-details",
"XLabs-AI/flux-RealismLora",
"enhanceaiteam/Flux-uncensored"
],
value="XLabs-AI/flux-RealismLora"
)
process_lora = gr.Checkbox(label="🎨 Aktifkan LoRA")
process_upscale = gr.Checkbox(label="🔍 Aktifkan Peningkatan Resolusi")
upscale_factor = gr.Radio(label="🔍 Faktor Peningkatan Resolusi", choices=[2, 4, 8], value=2)
# Column 2: Advanced options (always open)
with gr.Column(scale=1, elem_id="col-right"):
with gr.Accordion(label="⚙️ Opsi Lanjutan", open=True):
width = gr.Slider(label="Lebar", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Tinggi", minimum=512, maximum=1280, step=8, value=768)
scales = gr.Slider(label="Skala", minimum=1, maximum=20, step=1, value=8)
steps = gr.Slider(label="Langkah", minimum=1, maximum=100, step=1, value=8)
seed = gr.Number(label="Seed", value=-1)
# Button to generate image
btn = gr.Button("🚀 Buat Gambar", elem_id="generate-btn")
# Running the `gen` function when "Generate" button is pressed
btn.click(fn=gen, inputs=[
prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora
], outputs=output_res)
# Launching the Gradio app
WallpaperFluxMaker.queue(api_open=False).launch(show_api=False)