File size: 8,805 Bytes
0cfb4a5 d4fba6d 0dec378 de6051a 6b3081a 2fc432b 7440665 2fc432b 7440665 1194f82 4ec4b86 e3be785 1194f82 3e3d315 41753f4 1194f82 35fffeb 41753f4 1194f82 7440665 bc9a69a 6b3081a bc9a69a e3be785 e9fbff7 3e3d315 9413e8d 3e3d315 e9fbff7 3e3d315 7440665 2fc432b 6bd865c bc9a69a 7440665 6b3081a 7440665 6bd865c 7440665 6bd865c 1a52ee5 7440665 61bff42 6bd865c e9fbff7 6bd865c 7440665 9413e8d 7440665 9413e8d 7440665 6bd865c 7440665 6bd865c 7440665 bc9a69a 7440665 0a48097 7440665 0a48097 7440665 0a48097 7440665 0a48097 7440665 6bd865c 7440665 581837a 7440665 7809429 6b3081a 7440665 6bd865c 7440665 6bd865c 7440665 6bd865c e3be785 7440665 6bd865c 7440665 6bd865c 7440665 6b3081a 7440665 10fca1b 7440665 10fca1b 7440665 14fcd29 7440665 6b3081a 7440665 a361ccc 41753f4 7440665 6b3081a 7440665 6b3081a 7440665 6b3081a 7440665 bdd28a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
#from translatepy import Translator
from gradio_client import Client, handle_file
from PIL import Image
from huggingface_hub import login
from themes import IndonesiaTheme # Import custom IndonesiaTheme
from loras import loras
MAX_SEED = np.iinfo(np.int32).max
HF_TOKEN = os.getenv('HF_TOKEN_UPSCALER')
HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN_UPSCALER')
qwen_client = Client("K00B404/HugChatWrap",hf_token=HF_TOKEN)
loaded_loras=[]
for lora in loras:
print(lora.get('repo'))
loaded_loras.append(lora.get('repo'))
# Function to enable LoRA if selected
def enable_lora(lora_add, basemodel):
print(f"[-] Determining model: LoRA {'enabled' if lora_add else 'disabled'}, base model: {basemodel}")
return basemodel if not lora_add else lora_add
def generate_character_description(character_prompt, system_message = """
You are a character description generator. Create detailed, vivid descriptions
of characters including their physical appearance, personality, and notable features. Keep the
description focused on visual elements that could be used for image generation.
"""
):
"""Generate detailed character description using K00B404/HugChatWrap space"""
try:
result = qwen_client.predict(
message=character_prompt,
param_2=system_message,
param_3=100,
param_4=0.9,
param_5=0.99,
api_name="/chat"
)
return result
except Exception as e:
return f"Error generating description: {str(e)}"
# Function to generate image
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
try:
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
print(f"[-] Menerjemahkan prompt: {prompt}")
#text = generate_character_description(str(Translator().translate(prompt, 'English'))) + "," + lora_word
print(f"[-] Generating image with prompt: {text}, model: {model}")
client = AsyncInferenceClient()
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
except Exception as e:
print(f"[-] Error generating image: {e}")
return None, None
# Function to upscale image
def get_upscale_finegrain(prompt, img_path, upscale_factor):
try:
print(f"[-] Processing with Factor {upscale_factor} for Image {img_path}")
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(
input_image=handle_file(img_path),
prompt=prompt,
negative_prompt="worst quality, low quality, normal quality",
upscale_factor=upscale_factor,
controlnet_scale=0.7,
controlnet_decay=1,
condition_scale=6,
denoise_strength=0.33,
num_inference_steps=20,
solver="DDIM",
api_name="/process"
)
print(f"[-] Proses upscaling berhasil.")
return result[1] # Return upscale image path
except Exception as e:
print(f"[-] Error scaling image: {e}")
return None
# Main function to generate images and optionally upscale
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
print(f"[-] Memulai generasi gambar dengan prompt: {prompt}")
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
print(f"[-] Menggunakan model: {model}")
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
if image is None:
print("[-] Image generation failed.")
return []
image_path = "temp_image.jpg"
print(f"[-] Menyimpan gambar sementara di: {image_path}")
image.save(image_path, format="JPEG")
upscale_image_path = None
if process_upscale:
print(f"[-] Memproses upscaling dengan faktor: {upscale_factor}")
upscale_image_path = get_upscale_finegrain(prompt, image_path, upscale_factor)
if upscale_image_path is not None and os.path.exists(upscale_image_path):
print(f"[-] Proses upscaling selesai. Gambar tersimpan di: {upscale_image_path}")
return [image_path, upscale_image_path] # Return both images
else:
print("[-] Upscaling process, select the factor.")
return [image_path]
# CSS for styling the interface
css = """
#col-left, #col-mid, #col-right {
margin: 0 auto;
max-width: 400px;
padding: 10px;
border-radius: 15px;
background-color: #f9f9f9;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
#banner {
width: 100%;
text-align: center;
margin-bottom: 20px;
}
#run-button {
background-color: #ff4b5c;
color: white;
font-weight: bold;
padding: 10px;
border-radius: 10px;
cursor: pointer;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
}
#footer {
text-align: center;
margin-top: 20px;
color: silver;
}
"""
# Creating Gradio interface
with gr.Blocks(css=css, theme=IndonesiaTheme()) as WallpaperFluxMaker:
# Displaying the application title
gr.HTML('<div id="banner">✨ Flux MultiMode Generator + Upscaler ✨</div>')
with gr.Column(elem_id="col-container"):
# Output section (replacing ImageSlider with gr.Gallery)
with gr.Row():
output_res = gr.Gallery(label="⚡ Flux / Upscaled Image ⚡", elem_id="output-res", columns=2, height="auto")
# User input section split into two columns
with gr.Row():
# Column 1: Input prompt, LoRA, and base model
with gr.Column(scale=1, elem_id="col-left"):
prompt = gr.Textbox(
label="📜 Image Description",
placeholder="Write your prompt in any language, and it will be automatically translated into English.",
elem_id="textbox-prompt"
)
basemodel_choice = gr.Dropdown(
label="🖼️ Select a Model",
choices=[
"black-forest-labs/FLUX.1-schnell",
"black-forest-labs/FLUX.1-dev",
"black-forest-labs/FLUX.1-merged",
"dataautogpt3/FLUX-SyntheticAnime",
"Shakker-Labs/FLUX.1-dev-LoRA-AntiBlur"
"Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
"Shakker-Labs/FLUX.1-dev-LoRA-add-details",
"city96/FLUX.1-schnell-gguf"
],
value="black-forest-labs/FLUX.1-schnell"
)
lora_model_choice = gr.Dropdown(
label="🎨 select a LoRA",
choices=[
"Shakker-Labs/FLUX.1-dev-LoRA-add-details",
"XLabs-AI/flux-RealismLora",
"enhanceaiteam/Flux-uncensored",
"Keltezaa/female-masturbation-fingering"
]+loaded_loras,
value="XLabs-AI/flux-RealismLora"
)
process_lora =gr.Checkbox(label="🎨 Use LoRA")
process_upscale = gr.Checkbox(label="🔍 Upscale resolution")
upscale_factor = gr.Radio(label="🔍 Upscale factor", choices=[2, 4, 8], value=2)
# Column 2: Advanced options (always open)
with gr.Column(scale=1, elem_id="col-right"):
with gr.Accordion(label="⚙️ Settings", open=True):
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=768)
scales = gr.Slider(label="Scale", minimum=1, maximum=20, step=1, value=8)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=8)
seed = gr.Number(label="Seed", value=-1)
# Button to generate image
btn = gr.Button("🚀 Bombs away!", elem_id="generate-btn")
# Running the `gen` function when "Generate" button is pressed
btn.click(fn=gen, inputs=[
prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora
], outputs=output_res)
# Launching the Gradio app
WallpaperFluxMaker.queue(api_open=False).launch(show_api=True) |