killwithabass's picture
Update app.py
e0386b6 verified
import spaces
import gradio as gr
import torch
from PIL import Image, PngImagePlugin
from diffusers import DiffusionPipeline
import random
import os
import pygsheets
from datetime import datetime
from transformers.utils.hub import move_cache
import json
from gradio_client import Client
# Move cache
move_cache()
# Initialize GSheet Connexion
#Authorization
gc = pygsheets.authorize(service_account_env_var='GSHEET_AUTH')
#Open the google spreadsheet
sh = gc.open('AndroFLUX-Logs')
#Select the first sheet
wks = sh[0]
# Initialize the base model and specific LoRA
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
lora_repo = "markury/AndroFlux"
trigger_word = "" # Leave trigger_word blank if not used.
pipe.load_lora_weights(lora_repo, weight_name = "AndroFlux-v19.safetensors")
pipe.to("cuda")
MAX_SEED = 2**32-1
@spaces.GPU(duration=80)
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
# Set random seed for reproducibility
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device="cuda").manual_seed(seed)
# Log prompt
print('PROMPT: ' + prompt + 'SEED:' + str(seed) + 'CFG: '+ str(cfg_scale))
#Moderation
moderation_client = Client("duchaba/Friendly_Text_Moderation")
result = moderation_client.predict(
msg=f"{prompt}",
safer=0.02,
api_name="/fetch_toxicity_level"
)
if float(json.loads(result[1])['sexual_minors']) > 0.03 :
print('Minors')
raise gr.Error("Unauthorized request 💥!")
# Update progress bar (0% saat mulai)
progress(0, "Starting image generation...")
# Generate image with progress updates
for i in range(1, steps + 1):
# Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
if i % (steps // 10) == 0: # Update every 10% of the steps
progress(i / steps * 100, f"Processing step {i} of {steps}...")
# Generate image using the pipeline
image = pipe(
prompt=f"{prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
max_sequence_length=512
).images[0]
# Save the image to a file with a unique name in /tmp directory
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
image_filename = f"generated_image_{timestamp}.png"
image_path = os.path.join("/tmp/gradio", image_filename)
# Add Metadata
new_metadata_string = f"{prompt}\nNegative prompt: none \nSteps: {steps}, CFG scale: {cfg_scale}, Seed: {seed}, Lora hashes: AndroFlux-v19: c44afd41ece1"
metadata = PngImagePlugin.PngInfo()
metadata.add_text("parameters", new_metadata_string)
image.save(image_path, pnginfo=metadata)
# Construct the URL to access the image
space_url = "https://killwithabass-flux-1-dev-lora-androflux.hf.space" # Replace with your actual space URL
image_url = f"{space_url}/gradio_api/file={image_path}"
#Log queries
try:
if "girl" not in prompt and "woman" not in prompt:
wks.append_table(values=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale,image_url])
except Exception as error:
# handle the exception
print("An exception occurred:", error)
print(f"Image URL: {image_url}") # Log the file URL
# Final update (100%)
progress(100, "Completed!")
yield image, seed
# Example cached image and settings
example_image_path = "blond_5.webp" # Replace with the actual path to the example image
example_prompt = """a full frontal view photo of a athletic man with olive skin in his late twenties standing on a flowery terrace at golden hour. He is fully naked with a thick uncut penis and blond pubic hair. The man has long blond hair and has a dominant expression. The setting is outdoors, with a peaceful and aesthetic atmosphere."""
example_cfg_scale = 3.5
example_steps = 25
example_width = 896
example_height = 1152
example_seed = 556215326
example_lora_scale = 1
def load_example():
# Load example image from file
example_image = Image.open(example_image_path)
return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_image
with gr.Blocks() as app:
gr.Markdown("# Androflux Image Generator")
with gr.Row():
with gr.Column(scale=3):
prompt = gr.TextArea(label="Prompt", placeholder="Type a prompt of max 77 characters", lines=3)
generate_button = gr.Button("Generate")
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=example_cfg_scale)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=example_steps)
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=example_width)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=example_height)
randomize_seed = gr.Checkbox(False, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
with gr.Column(scale=1):
result = gr.Image(label="Generated Image")
gr.Markdown("Generate images using Androflux Lora and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")
# Automatically load example data and image when the interface is launched
app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result])
generate_button.click(
run_lora,
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed],
)
app.queue()
app.launch()