File size: 4,055 Bytes
3210048
98cede9
3210048
 
 
 
 
 
98cede9
75bbc61
98cede9
3210048
 
 
00b5f29
3210048
9d12334
 
784ce74
3cfa4cc
00b5f29
784ce74
3cfa4cc
 
 
 
 
3210048
 
 
98cede9
a9967ad
 
 
98cede9
 
a9967ad
3210048
 
 
 
 
 
 
 
 
 
98cede9
3210048
 
 
 
099273d
98cede9
3210048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
099273d
3210048
 
 
 
 
 
 
 
98cede9
3210048
 
 
 
a9967ad
378d4e8
3210048
a9967ad
 
378d4e8
a9967ad
 
3210048
98cede9
 
3210048
 
98cede9
3210048
 
 
 
 
 
 
 
 
 
 
 
 
 
98cede9
3210048
 
 
 
 
 
 
75bbc61
4e904f6
98cede9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import gradio as gr
import os
from PIL import Image
import torch
from diffusers.utils import load_image, check_min_version
from controlnet_flux import FluxControlNetModel
from transformer_flux import FluxTransformer2DModel
from pipeline_flux_controlnet_inpaint import FluxControlNetInpaintingPipeline
import spaces
import huggingface_hub
huggingface_hub.login(os.getenv('HF_TOKEN'))

check_min_version("0.30.2")
transformer = FluxTransformer2DModel.from_pretrained(
        "black-forest-labs/FLUX.1-dev", subfolder='transformer', torch_dytpe=torch.bfloat16
    )

# Build pipeline
# controlnet = FluxControlNetModel.from_pretrained("alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", torch_dtype=torch.bfloat16)
pipe = FluxControlNetInpaintingPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev",
    # controlnet=controlnet,
    transformer=transformer,
    torch_dtype=torch.bfloat16
).to("cuda")
pipe.transformer.to(torch.bfloat16)
pipe.controlnet.to(torch.bfloat16)


MARKDOWN = """
# FLUX.1-dev-Inpainting

#### VERSION: 0.0.3_beta

Original Model: Flux.1-dev
FluxControlNet: alimama-creative

"""

@spaces.GPU()
def process(input_image_editor,
            prompt,
            negative_prompt,
            controlnet_conditioning_scale,
            guidance_scale,
            seed,
            num_inference_steps,
            true_guidance_scale
            ):
    image = input_image_editor['background']
    mask = input_image_editor['layers'][0]
    size = (768, 768)
    image_or = image.copy()

    image = image.convert("RGB").resize(size)
    mask = mask.convert("RGB").resize(size)
    generator = torch.Generator(device="cuda").manual_seed(seed)
    result = pipe(
    prompt=prompt,
    height=size[1],
    width=size[0],
    control_image=image,
    control_mask=mask,
    num_inference_steps=num_inference_steps,
    generator=generator,
    controlnet_conditioning_scale=controlnet_conditioning_scale,
    guidance_scale=guidance_scale,
    negative_prompt=negative_prompt,
    true_guidance_scale=true_guidance_scale
    ).images[0]

    return result.resize((image_or.size[:2]))

with gr.Blocks() as demo:
    gr.Markdown(MARKDOWN)
    with gr.Row():
        with gr.Column():
            input_image_editor_component = gr.ImageEditor(
                label='Image',
                type='pil',
                sources=["upload"],
                image_mode='RGB',
                layers=False,
                brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))


            prompt = gr.Textbox(lines=2, placeholder="Enter prompt here...")
            negative_prompt = gr.Textbox(lines=2, placeholder="Enter negative_prompt here...")
            controlnet_conditioning_scale = gr.Slider(minimum=0, step=0.01, maximum=1, value=0.8, label="controlnet_conditioning_scale")  # Adjusted value
            guidance_scale = gr.Slider(minimum=1, step=0.5, maximum=10, value=5.0, label="Image to generate")  # Adjusted value
            seed  = gr.Slider(minimum=0, step=1, maximum=10000000, value=124, label="Seed Value")
            num_inference_steps = gr.Slider(minimum=1, step=1, maximum=50, value=40, label="num_inference_steps")  # Adjusted value
            true_guidance_scale = gr.Slider(minimum=1, step=1, maximum=10, value=7.0, label="true_guidance_scale")  # Adjusted value



            submit_button_component = gr.Button(
                    value='Submit', variant='primary', scale=0)

        with gr.Column():
            output_image_component = gr.Image(
                type='pil', image_mode='RGB', label='Generated image', format="png")

    submit_button_component.click(
        fn=process,
        inputs=[
            input_image_editor_component,
            prompt,
            negative_prompt,
            controlnet_conditioning_scale,
            guidance_scale,
            seed,
            num_inference_steps,
            true_guidance_scale

        ],
        outputs=[
            output_image_component,
        ]
    )



demo.launch(debug=False, show_error=True,share=True)