Culda commited on
Commit
00cde65
·
1 Parent(s): 89da14a
Files changed (2) hide show
  1. README.md +1 -0
  2. app.py +136 -131
README.md CHANGED
@@ -6,6 +6,7 @@ colorTo: gray
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
 
9
  pinned: false
10
  ---
11
 
 
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
+ hf_oauth: true
10
  pinned: false
11
  ---
12
 
app.py CHANGED
@@ -3,13 +3,16 @@ import torch
3
  import spaces
4
  import gradio as gr
5
  import os
6
- from diffusers.pipelines.flux.pipeline_flux_controlnet_inpaint import FluxControlNetInpaintPipeline
 
 
7
  from diffusers.models.controlnet_flux import FluxControlNetModel
8
  from controlnet_aux import CannyDetector
9
 
10
  # login hf token
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
  from huggingface_hub import login
 
13
  login()
14
 
15
  dtype = torch.bfloat16
@@ -27,6 +30,7 @@ pipe.enable_model_cpu_offload()
27
 
28
  canny = CannyDetector()
29
 
 
30
  @spaces.GPU(duration=75)
31
  def inpaint(
32
  image,
@@ -53,161 +57,162 @@ def inpaint(
53
  return image_res
54
 
55
 
56
- iface = gr.Interface(
57
- fn=inpaint,
58
- inputs=[
59
- gr.Image(type="pil", label="Input Image"),
60
- gr.Image(type="pil", label="Mask Image"),
61
- gr.Textbox(label="Prompt"),
62
- gr.Slider(0, 1, value=0.95, label="Strength"),
63
- gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps"),
64
- gr.Slider(0, 20, value=5, label="Guidance Scale"),
65
- gr.Slider(0, 1, value=0.5, label="ControlNet Conditioning Scale"),
66
- ],
67
- outputs=gr.Image(type="pil", label="Output Image"),
68
- title="Flux Inpaint AI Model",
69
- description="Upload an image and a mask, then provide a prompt to generate an inpainted image.",
 
 
70
  )
71
 
72
- iface.launch()
73
 
74
- # import gradio as gr
75
- # import numpy as np
76
- # import random
77
- # # import spaces
78
- # import torch
79
- # from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
80
- # from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
81
- # # from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
82
- #
83
- # dtype = torch.bfloat16
84
- # device = "cuda" if torch.cuda.is_available() else "cpu"
85
- #
86
- # taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
87
- # good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-schnell", subfolder="vae", torch_dtype=dtype).to(device)
88
- # pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype, vae=taef1).to(device)
89
- # torch.cuda.empty_cache()
90
- #
91
- # MAX_SEED = np.iinfo(np.int32).max
92
- # MAX_IMAGE_SIZE = 2048
93
- #
94
- # # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
95
- #
96
- # # @spaces.GPU(duration=75)
97
- # def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
98
- # if randomize_seed:
99
- # seed = random.randint(0, MAX_SEED)
100
- # generator = torch.Generator().manual_seed(seed)
101
- #
102
- # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
103
- # prompt=prompt,
104
- # guidance_scale=guidance_scale,
105
- # num_inference_steps=num_inference_steps,
106
- # width=width,
107
- # height=height,
108
- # generator=generator,
109
- # output_type="pil",
110
- # good_vae=good_vae,
111
- # ):
112
- # yield img, seed
113
- #
114
  # examples = [
115
- # "a tiny astronaut hatching from an egg on the moon",
116
- # "a cat holding a sign that says hello world",
117
- # "an anime illustration of a wiener schnitzel",
118
- # ]
119
- #
120
- # css="""
121
- # #col-container {
122
- # margin: 0 auto;
123
- # max-width: 520px;
124
- # }
125
  # """
126
  #
127
  # with gr.Blocks(css=css) as demo:
128
- #
129
  # with gr.Column(elem_id="col-container"):
130
  # gr.Markdown(f"""# FLUX.1 [dev]
131
- # 12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
132
- # [[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
133
- # """)
134
- #
135
  # with gr.Row():
136
- #
137
  # prompt = gr.Text(
138
- # label="Prompt",
139
- # show_label=False,
140
- # max_lines=1,
141
- # placeholder="Enter your prompt",
142
- # container=False,
143
- # )
144
- #
145
  # run_button = gr.Button("Run", scale=0)
146
- #
147
  # result = gr.Image(label="Result", show_label=False)
148
- #
149
  # with gr.Accordion("Advanced Settings", open=False):
150
- #
151
  # seed = gr.Slider(
152
- # label="Seed",
153
- # minimum=0,
154
- # maximum=MAX_SEED,
155
- # step=1,
156
- # value=0,
157
- # )
158
- #
159
  # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
160
- #
161
  # with gr.Row():
162
- #
163
  # width = gr.Slider(
164
- # label="Width",
165
- # minimum=256,
166
- # maximum=MAX_IMAGE_SIZE,
167
- # step=32,
168
- # value=1024,
169
- # )
170
- #
171
  # height = gr.Slider(
172
- # label="Height",
173
- # minimum=256,
174
- # maximum=MAX_IMAGE_SIZE,
175
- # step=32,
176
- # value=1024,
177
- # )
178
- #
179
  # with gr.Row():
180
  #
181
  # guidance_scale = gr.Slider(
182
- # label="Guidance Scale",
183
- # minimum=1,
184
- # maximum=15,
185
- # step=0.1,
186
- # value=3.5,
187
- # )
188
- #
189
  # num_inference_steps = gr.Slider(
190
- # label="Number of inference steps",
191
- # minimum=1,
192
- # maximum=50,
193
- # step=1,
194
- # value=28,
195
- # )
196
- #
197
  # gr.Examples(
198
- # examples = examples,
199
- # fn = infer,
200
- # inputs = [prompt],
201
- # outputs = [result, seed],
202
- # cache_examples="lazy"
203
- # )
204
  #
205
  # gr.on(
206
- # triggers=[run_button.click, prompt.submit],
207
- # fn = infer,
208
- # inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
209
- # outputs = [result, seed]
210
- # )
211
  #
212
  # demo.launch()
213
-
 
3
  import spaces
4
  import gradio as gr
5
  import os
6
+ from diffusers.pipelines.flux.pipeline_flux_controlnet_inpaint import (
7
+ FluxControlNetInpaintPipeline,
8
+ )
9
  from diffusers.models.controlnet_flux import FluxControlNetModel
10
  from controlnet_aux import CannyDetector
11
 
12
  # login hf token
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
  from huggingface_hub import login
15
+
16
  login()
17
 
18
  dtype = torch.bfloat16
 
30
 
31
  canny = CannyDetector()
32
 
33
+
34
  @spaces.GPU(duration=75)
35
  def inpaint(
36
  image,
 
57
  return image_res
58
 
59
 
60
+ with gr.Blocks() as demo:
61
+ gr.LoginButton()
62
+ gr.Interface(
63
+ fn=inpaint,
64
+ inputs=[
65
+ gr.Image(type="pil", label="Input Image"),
66
+ gr.Image(type="pil", label="Mask Image"),
67
+ gr.Textbox(label="Prompt"),
68
+ gr.Slider(0, 1, value=0.95, label="Strength"),
69
+ gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps"),
70
+ gr.Slider(0, 20, value=5, label="Guidance Scale"),
71
+ gr.Slider(0, 1, value=0.5, label="ControlNet Conditioning Scale"),
72
+ ],
73
+ outputs=gr.Image(type="pil", label="Output Image"),
74
+ title="Flux Inpaint AI Model",
75
+ description="Upload an image and a mask, then provide a prompt to generate an inpainted image.",
76
  )
77
 
78
+ demo.launch()
79
 
80
+ # import gradio as gr
81
+ # import numpy as np
82
+ # import random
83
+ # # import spaces
84
+ # import torch
85
+ # from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
86
+ # from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
87
+ # # from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
88
+ #
89
+ # dtype = torch.bfloat16
90
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
91
+ #
92
+ # taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
93
+ # good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-schnell", subfolder="vae", torch_dtype=dtype).to(device)
94
+ # pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype, vae=taef1).to(device)
95
+ # torch.cuda.empty_cache()
96
+ #
97
+ # MAX_SEED = np.iinfo(np.int32).max
98
+ # MAX_IMAGE_SIZE = 2048
99
+ #
100
+ # # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
101
+ #
102
+ # # @spaces.GPU(duration=75)
103
+ # def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
104
+ # if randomize_seed:
105
+ # seed = random.randint(0, MAX_SEED)
106
+ # generator = torch.Generator().manual_seed(seed)
107
+ #
108
+ # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
109
+ # prompt=prompt,
110
+ # guidance_scale=guidance_scale,
111
+ # num_inference_steps=num_inference_steps,
112
+ # width=width,
113
+ # height=height,
114
+ # generator=generator,
115
+ # output_type="pil",
116
+ # good_vae=good_vae,
117
+ # ):
118
+ # yield img, seed
119
+ #
120
  # examples = [
121
+ # "a tiny astronaut hatching from an egg on the moon",
122
+ # "a cat holding a sign that says hello world",
123
+ # "an anime illustration of a wiener schnitzel",
124
+ # ]
125
+ #
126
+ # css="""
127
+ # #col-container {
128
+ # margin: 0 auto;
129
+ # max-width: 520px;
130
+ # }
131
  # """
132
  #
133
  # with gr.Blocks(css=css) as demo:
134
+ #
135
  # with gr.Column(elem_id="col-container"):
136
  # gr.Markdown(f"""# FLUX.1 [dev]
137
+ # 12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
138
+ # [[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
139
+ # """)
140
+ #
141
  # with gr.Row():
142
+ #
143
  # prompt = gr.Text(
144
+ # label="Prompt",
145
+ # show_label=False,
146
+ # max_lines=1,
147
+ # placeholder="Enter your prompt",
148
+ # container=False,
149
+ # )
150
+ #
151
  # run_button = gr.Button("Run", scale=0)
152
+ #
153
  # result = gr.Image(label="Result", show_label=False)
154
+ #
155
  # with gr.Accordion("Advanced Settings", open=False):
156
+ #
157
  # seed = gr.Slider(
158
+ # label="Seed",
159
+ # minimum=0,
160
+ # maximum=MAX_SEED,
161
+ # step=1,
162
+ # value=0,
163
+ # )
164
+ #
165
  # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
166
+ #
167
  # with gr.Row():
168
+ #
169
  # width = gr.Slider(
170
+ # label="Width",
171
+ # minimum=256,
172
+ # maximum=MAX_IMAGE_SIZE,
173
+ # step=32,
174
+ # value=1024,
175
+ # )
176
+ #
177
  # height = gr.Slider(
178
+ # label="Height",
179
+ # minimum=256,
180
+ # maximum=MAX_IMAGE_SIZE,
181
+ # step=32,
182
+ # value=1024,
183
+ # )
184
+ #
185
  # with gr.Row():
186
  #
187
  # guidance_scale = gr.Slider(
188
+ # label="Guidance Scale",
189
+ # minimum=1,
190
+ # maximum=15,
191
+ # step=0.1,
192
+ # value=3.5,
193
+ # )
194
+ #
195
  # num_inference_steps = gr.Slider(
196
+ # label="Number of inference steps",
197
+ # minimum=1,
198
+ # maximum=50,
199
+ # step=1,
200
+ # value=28,
201
+ # )
202
+ #
203
  # gr.Examples(
204
+ # examples = examples,
205
+ # fn = infer,
206
+ # inputs = [prompt],
207
+ # outputs = [result, seed],
208
+ # cache_examples="lazy"
209
+ # )
210
  #
211
  # gr.on(
212
+ # triggers=[run_button.click, prompt.submit],
213
+ # fn = infer,
214
+ # inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
215
+ # outputs = [result, seed]
216
+ # )
217
  #
218
  # demo.launch()