Spaces:
Sleeping
Sleeping
File size: 4,617 Bytes
69620c8 31c0b50 69620c8 29592ba 22cbaed 4aaf289 69620c8 9b81854 b2b0b82 9b81854 4aaf289 188eb35 c1bd24e 69620c8 3d30960 9b81854 69620c8 4aaf289 6811fb5 69620c8 66d409b 69620c8 22cbaed 4db5383 69620c8 3d30960 59f8ae9 69620c8 4db5383 69620c8 0cb6010 69620c8 31ad247 69620c8 8b78161 69620c8 66d409b 69620c8 29592ba 69620c8 9b81854 69620c8 7efe5dc 69620c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import spaces
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = True
# Initialize the base model and specific LoRA
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
lora_repo = "kaytoo2022/jguan_35-flux-2"
trigger_word = "" # Leave trigger_word blank if not used.
pipe.load_lora_weights(lora_repo, adapter_name='jguan')
# ghibsky
lora_repo_2 = "aleksa-codes/flux-ghibsky-illustration"
pipe.load_lora_weights(lora_repo_2, adapter_name='lora_2')
pipe.set_adapters(["jguan", "lora_2"], adapter_weights=[0.85, 0.0])
pipe.to("cuda")
MAX_SEED = 2**32-1
@spaces.GPU()
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, lora_scale_2, progress=gr.Progress(track_tqdm=True)):
# Set random seed for reproducibility
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device="cuda").manual_seed(seed)
pipe.set_adapters(["jguan", "lora_2"], adapter_weights=[lora_scale, lora_scale_2])
# Update progress bar (0% saat mulai)
progress(0, "Starting image generation...")
# Generate image with progress updates
for i in range(1, steps + 1):
# Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
if i % (steps // 10) == 0: # Update every 10% of the steps
progress(i / steps * 100, f"Processing step {i} of {steps}...")
# Generate image using the pipeline
image = pipe(
prompt=f"{prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
# joint_attention_kwargs={"scale": lora_scale},
).images[0]
# Final update (100%)
progress(100, "Completed!")
yield image, seed
# Example cached image and settings
example_image_path = "bella_space.jpeg" # Replace with the actual path to the example image
example_prompt = """A portrait picture of [Just1nGu4n] in an astronaut outfit. Planets are visible in the background"""
example_cfg_scale = 3.2
example_steps = 32
example_width = 1152
example_height = 896
example_seed = 3981632454
example_lora_scale = 0.85
example_lora_scale_2 = 0.0
def load_example():
# Load example image from file
# example_image = Image.open(example_image_path)
return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_lora_scale_2, None
with gr.Blocks() as app:
gr.Markdown("# Flux Lora Image Generator")
with gr.Row():
with gr.Column(scale=3):
prompt = gr.TextArea(label="Prompt", placeholder="Type a prompt", lines=5)
generate_button = gr.Button("Generate")
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=example_cfg_scale)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=example_steps)
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=example_width)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=example_height)
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
lora_scale_2 = gr.Slider(label="LoRA Scale (GhibSky)", minimum=0, maximum=1, step=0.01, value=example_lora_scale_2)
with gr.Column(scale=1):
result = gr.Image(label="Generated Image")
gr.Markdown("Generate images using Flux and a text prompt.\nUse `[Just1nGu4n]` in the prompt to activate the LoRA adapter.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")
# Automatically load example data and image when the interface is launched
app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, lora_scale_2, result])
generate_button.click(
run_lora,
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, lora_scale_2],
outputs=[result, seed]
)
app.queue()
app.launch() |