File size: 6,232 Bytes
69620c8
 
 
1b59cb3
69620c8
 
9512d1e
1b59cb3
e0e09c3
bf1f981
cac24ee
 
bf1f981
 
 
69620c8
1b59cb3
add0661
 
1b59cb3
add0661
 
1b59cb3
add0661
 
1b59cb3
69620c8
 
 
 
fa21a1c
69620c8
4ef5bd9
69620c8
c1bd24e
 
69620c8
 
181c134
a0be1d1
69620c8
 
 
 
 
1ab0849
c26d152
1ab0849
cac24ee
 
 
 
 
 
 
 
 
e0386b6
cac24ee
 
 
69620c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a09a841
a6dc53c
69620c8
 
e0e09c3
 
 
 
1b59cb3
add0661
 
 
 
1b59cb3
 
e0e09c3
e35757e
e0e09c3
 
7fadf1b
e0e09c3
add0661
1b59cb3
 
 
a3e632d
 
 
 
1b59cb3
e0e09c3
69620c8
 
 
 
 
 
220b7ed
 
 
 
 
 
 
 
69620c8
 
 
 
d1cb9e7
69620c8
 
b60f120
69620c8
 
c792d6f
31ad247
69620c8
 
 
 
 
 
 
 
 
b60f120
69620c8
 
 
 
 
 
a0be1d1
7617ffa
69620c8
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import spaces
import gradio as gr
import torch
from PIL import Image, PngImagePlugin
from diffusers import DiffusionPipeline
import random
import os
import pygsheets
from datetime import datetime
from transformers.utils.hub import move_cache
import json
from gradio_client import Client

# Move cache
move_cache()

# Initialize GSheet Connexion
#Authorization
gc = pygsheets.authorize(service_account_env_var='GSHEET_AUTH')

#Open the google spreadsheet
sh = gc.open('AndroFLUX-Logs')

#Select the first sheet 
wks = sh[0]

# Initialize the base model and specific LoRA
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)

lora_repo = "markury/AndroFlux"
trigger_word = ""  # Leave trigger_word blank if not used.
pipe.load_lora_weights(lora_repo, weight_name = "AndroFlux-v19.safetensors")

pipe.to("cuda")

MAX_SEED = 2**32-1

@spaces.GPU(duration=80)
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
    # Set random seed for reproducibility
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device="cuda").manual_seed(seed)

    # Log prompt
    print('PROMPT: ' + prompt + 'SEED:' + str(seed) + 'CFG: '+ str(cfg_scale))

    #Moderation 

    moderation_client = Client("duchaba/Friendly_Text_Moderation")
    result = moderation_client.predict(
		msg=f"{prompt}",
		safer=0.02,
		api_name="/fetch_toxicity_level"
    )

    if float(json.loads(result[1])['sexual_minors']) > 0.03 : 
        print('Minors')
        raise gr.Error("Unauthorized request 💥!")

    # Update progress bar (0% saat mulai)
    progress(0, "Starting image generation...")

    # Generate image with progress updates
    for i in range(1, steps + 1):
        # Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
        if i % (steps // 10) == 0:  # Update every 10% of the steps
            progress(i / steps * 100, f"Processing step {i} of {steps}...")

    # Generate image using the pipeline
    image = pipe(
        prompt=f"{prompt} {trigger_word}",
        num_inference_steps=steps,
        guidance_scale=cfg_scale,
        width=width,
        height=height,
        generator=generator,
        joint_attention_kwargs={"scale": lora_scale},
        max_sequence_length=512
    ).images[0]

    # Save the image to a file with a unique name in /tmp directory
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    image_filename = f"generated_image_{timestamp}.png"
    image_path = os.path.join("/tmp/gradio", image_filename)

    # Add Metadata
    new_metadata_string = f"{prompt}\nNegative prompt:  none \nSteps: {steps}, CFG scale: {cfg_scale}, Seed: {seed}, Lora hashes: AndroFlux-v19: c44afd41ece1"
    metadata = PngImagePlugin.PngInfo()
    metadata.add_text("parameters", new_metadata_string)

    image.save(image_path, pnginfo=metadata)


    # Construct the URL to access the image
    space_url = "https://killwithabass-flux-1-dev-lora-androflux.hf.space"  # Replace with your actual space URL
    image_url = f"{space_url}/gradio_api/file={image_path}"

    #Log queries
    try:
        if "girl" not in prompt and "woman" not in prompt:
            wks.append_table(values=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale,image_url])

    except Exception as error:
        # handle the exception
        print("An exception occurred:", error)
        print(f"Image URL: {image_url}") # Log the file URL

    # Final update (100%)
    progress(100, "Completed!")

    yield image, seed

# Example cached image and settings
example_image_path = "blond_5.webp"  # Replace with the actual path to the example image
example_prompt = """a full frontal view photo of a athletic man with olive skin in his late twenties standing on a flowery terrace at golden hour. He is fully naked with a thick uncut penis and blond pubic hair. The man has long blond hair and has a dominant expression. The setting is outdoors, with a peaceful and aesthetic atmosphere."""
example_cfg_scale = 3.5
example_steps = 25
example_width = 896
example_height = 1152
example_seed = 556215326
example_lora_scale = 1

def load_example():
    # Load example image from file
    example_image = Image.open(example_image_path)
    return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_image

with gr.Blocks() as app:
    gr.Markdown("# Androflux Image Generator")
    with gr.Row():
        with gr.Column(scale=3):
            prompt = gr.TextArea(label="Prompt", placeholder="Type a prompt of max 77 characters", lines=3)
            generate_button = gr.Button("Generate")
            cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=example_cfg_scale)
            steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=example_steps)
            width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=example_width)
            height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=example_height)
            randomize_seed = gr.Checkbox(False, label="Randomize seed")
            seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
            lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
        with gr.Column(scale=1):
            result = gr.Image(label="Generated Image")
            gr.Markdown("Generate images using Androflux Lora and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")

    # Automatically load example data and image when the interface is launched
    app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result])
    
    generate_button.click(
        run_lora,
        inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
        outputs=[result, seed],
    )

app.queue()
app.launch()