blanchon commited on
Commit
b1d8999
·
1 Parent(s): 73d42f6

Add examples

Browse files
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import secrets
 
2
  from typing import cast
3
 
4
  import gradio as gr
@@ -11,10 +12,9 @@ from PIL import Image, ImageFilter, ImageOps
11
 
12
  DEVICE = "cuda"
13
 
 
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
- FIXED_DIMENSION = 900
16
- FIXED_DIMENSION = 720
17
- FIXED_DIMENSION = (FIXED_DIMENSION // 16) * 16
18
 
19
  SYSTEM_PROMPT = r"""This two-panel split-frame image showcases a furniture in as a product shot versus styled in a room.
20
  [LEFT] standalone product shot image the furniture on a white background.
@@ -30,6 +30,7 @@ else:
30
  state_dict, network_alphas = FluxFillPipeline.lora_state_dict(
31
  pretrained_model_name_or_path_or_dict="blanchon/FluxFillFurniture",
32
  weight_name="pytorch_lora_weights3.safetensors",
 
33
  return_alphas=True,
34
  )
35
 
@@ -43,11 +44,42 @@ else:
43
  FluxFillPipeline.load_lora_into_transformer(
44
  state_dict=state_dict,
45
  network_alphas=network_alphas,
 
46
  transformer=pipe.transformer,
47
  )
48
  pipe.to(DEVICE)
49
 
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  @spaces.GPU(duration=150)
52
  def infer(
53
  furniture_image: Image.Image,
@@ -57,16 +89,20 @@ def infer(
57
  randomize_seed: bool = False,
58
  guidance_scale: float = 3.5,
59
  num_inference_steps: int = 20,
 
60
  progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
61
  ):
 
 
 
62
  _room_image = room_image["background"]
63
  if _room_image is None:
64
  msg = "Room image is required"
65
  raise ValueError(msg)
66
- _room_image = cast(Image.Image, _room_image)
67
  _room_image = ImageOps.fit(
68
  _room_image,
69
- (FIXED_DIMENSION, FIXED_DIMENSION),
70
  method=Image.Resampling.LANCZOS,
71
  centering=(0.5, 0.5),
72
  )
@@ -75,47 +111,54 @@ def infer(
75
  if _room_mask is None:
76
  msg = "Room mask is required"
77
  raise ValueError(msg)
78
- _room_mask = cast(Image.Image, _room_mask)
79
  _room_mask = ImageOps.fit(
80
  _room_mask,
81
- (FIXED_DIMENSION, FIXED_DIMENSION),
82
  method=Image.Resampling.LANCZOS,
83
  centering=(0.5, 0.5),
84
  )
85
 
 
 
 
 
 
 
 
86
  furniture_image = ImageOps.fit(
87
  furniture_image,
88
- (FIXED_DIMENSION, FIXED_DIMENSION),
89
  method=Image.Resampling.LANCZOS,
90
  centering=(0.5, 0.5),
91
  )
92
  _furniture_image = Image.new(
93
  "RGB",
94
- (FIXED_DIMENSION, FIXED_DIMENSION),
95
  (255, 255, 255),
96
  )
97
  _furniture_image.paste(furniture_image, (0, 0))
98
 
99
- _furniture_mask = Image.new(
100
- "RGB", (FIXED_DIMENSION, FIXED_DIMENSION), (255, 255, 255)
101
- )
102
 
103
  image = Image.new(
104
  "RGB",
105
- (FIXED_DIMENSION * 2, FIXED_DIMENSION),
106
  (255, 255, 255),
107
  )
108
  # Paste on the center of the image
109
  image.paste(_furniture_image, (0, 0))
110
- image.paste(_room_image, (FIXED_DIMENSION, 0))
111
 
112
  mask = Image.new(
113
  "RGB",
114
- (FIXED_DIMENSION * 2, FIXED_DIMENSION),
115
  (255, 255, 255),
116
  )
117
  mask.paste(_furniture_mask, (0, 0))
118
- mask.paste(_room_mask, (FIXED_DIMENSION, 0), _room_mask)
119
  # Invert the mask
120
  mask = ImageOps.invert(mask)
121
  # Blur the mask
@@ -131,8 +174,8 @@ def infer(
131
  prompt=prompt,
132
  image=image,
133
  mask_image=mask,
134
- height=FIXED_DIMENSION,
135
- width=FIXED_DIMENSION * 2,
136
  num_inference_steps=num_inference_steps,
137
  guidance_scale=guidance_scale,
138
  num_images_per_prompt=2,
@@ -140,106 +183,196 @@ def infer(
140
  )["images"]
141
 
142
  cropped_images = [
143
- image.crop((FIXED_DIMENSION, 0, FIXED_DIMENSION * 2, FIXED_DIMENSION))
144
  for image in results_images
145
  ]
146
 
147
  return cropped_images, seed
148
 
149
 
150
- intro_markdown = """
151
- # AnyFurnish
152
-
153
- AnyFurnish is a tool that allows you to generate furniture images using Flux.1 Fill Dev.
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  """
155
 
156
- css = """
157
- #col-container {
158
  margin: 0 auto;
159
- max-width: 1000px;
 
 
 
 
 
 
 
 
 
 
 
 
160
  }
161
  """
162
 
163
- with gr.Blocks(css=css) as demo:
164
- with gr.Column(elem_id="col-container"):
165
- gr.Markdown(intro_markdown)
166
- with gr.Row():
167
- with gr.Column():
168
- with gr.Column():
169
- furniture_image = gr.Image(
170
- label="Furniture Image",
171
- type="pil",
172
- sources=["upload"],
173
- image_mode="RGB",
174
- height=400,
175
- )
176
- room_image = gr.ImageEditor(
177
- label="Room Image - Draw mask for inpainting",
178
- type="pil",
179
- sources=["upload"],
180
- image_mode="RGBA",
181
- layers=False,
182
- crop_size="1:1",
183
- brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
184
- height=400,
185
- )
186
- prompt = gr.Text(
187
- label="Prompt",
188
- show_label=False,
189
- max_lines=1,
190
- placeholder="Enter a custom furniture description (optional)",
191
- container=False,
192
- )
193
- run_button = gr.Button("Run")
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  results = gr.Gallery(
196
  label="Results",
197
  format="png",
198
  show_label=False,
199
  columns=2,
200
- height=700,
201
- )
202
-
203
- with gr.Accordion("Advanced Settings", open=False):
204
- seed = gr.Slider(
205
- label="Seed",
206
- minimum=0,
207
- maximum=MAX_SEED,
208
- step=1,
209
- value=0,
210
  )
211
-
212
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
213
-
214
- with gr.Row():
215
- guidance_scale = gr.Slider(
216
- label="Guidance Scale",
217
- minimum=1,
218
- maximum=30,
219
- step=0.5,
220
- # value=50,
221
- value=3.5,
222
- )
223
-
224
- num_inference_steps = gr.Slider(
225
- label="Number of inference steps",
226
- minimum=1,
227
- maximum=50,
228
  step=1,
229
- value=20,
230
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  gr.on(
233
- triggers=[run_button.click, prompt.submit],
234
  fn=infer,
235
  inputs=[
236
  furniture_image,
237
  room_image,
238
- prompt,
239
  seed,
240
  randomize_seed,
241
  guidance_scale,
242
  num_inference_steps,
 
243
  ],
244
  outputs=[results, seed],
245
  )
 
1
  import secrets
2
+ from pathlib import Path
3
  from typing import cast
4
 
5
  import gradio as gr
 
12
 
13
  DEVICE = "cuda"
14
 
15
+ EXAMPLES_DIR = Path(__file__).parent / "examples"
16
+
17
  MAX_SEED = np.iinfo(np.int32).max
 
 
 
18
 
19
  SYSTEM_PROMPT = r"""This two-panel split-frame image showcases a furniture in as a product shot versus styled in a room.
20
  [LEFT] standalone product shot image the furniture on a white background.
 
30
  state_dict, network_alphas = FluxFillPipeline.lora_state_dict(
31
  pretrained_model_name_or_path_or_dict="blanchon/FluxFillFurniture",
32
  weight_name="pytorch_lora_weights3.safetensors",
33
+ torch_dtype=torch.bfloat16,
34
  return_alphas=True,
35
  )
36
 
 
44
  FluxFillPipeline.load_lora_into_transformer(
45
  state_dict=state_dict,
46
  network_alphas=network_alphas,
47
+ torch_dtype=torch.bfloat16,
48
  transformer=pipe.transformer,
49
  )
50
  pipe.to(DEVICE)
51
 
52
 
53
+ def make_example(image_path: Path, mask_path: Path) -> EditorValue:
54
+ background_image = Image.open(image_path)
55
+ background_image = background_image.convert("RGB")
56
+ background = np.array(background_image)
57
+
58
+ mask_image = Image.open(mask_path)
59
+ mask_image = mask_image.convert("RGB")
60
+
61
+ mask = np.array(mask_image)
62
+ mask = mask[:, :, 0]
63
+ mask = np.where(mask == 255, 0, 255) # noqa: PLR2004
64
+
65
+ if background.shape[0] != mask.shape[0] or background.shape[1] != mask.shape[1]:
66
+ msg = "Background and mask must have the same shape"
67
+ raise ValueError(msg)
68
+
69
+ layer = np.zeros((background.shape[0], background.shape[1], 4), dtype=np.uint8)
70
+ layer[:, :, 3] = mask
71
+
72
+ composite = np.zeros((background.shape[0], background.shape[1], 4), dtype=np.uint8)
73
+ composite[:, :, :3] = background
74
+ composite[:, :, 3] = np.where(mask == 255, 0, 255) # noqa: PLR2004
75
+
76
+ return {
77
+ "background": background,
78
+ "layers": [layer],
79
+ "composite": composite,
80
+ }
81
+
82
+
83
  @spaces.GPU(duration=150)
84
  def infer(
85
  furniture_image: Image.Image,
 
89
  randomize_seed: bool = False,
90
  guidance_scale: float = 3.5,
91
  num_inference_steps: int = 20,
92
+ max_dimension: int = 720,
93
  progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
94
  ):
95
+ # Ensure max_dimension is a multiple of 16 (for VAE)
96
+ max_dimension = (max_dimension // 16) * 16
97
+
98
  _room_image = room_image["background"]
99
  if _room_image is None:
100
  msg = "Room image is required"
101
  raise ValueError(msg)
102
+ _room_image = cast("Image.Image", _room_image)
103
  _room_image = ImageOps.fit(
104
  _room_image,
105
+ (max_dimension, max_dimension),
106
  method=Image.Resampling.LANCZOS,
107
  centering=(0.5, 0.5),
108
  )
 
111
  if _room_mask is None:
112
  msg = "Room mask is required"
113
  raise ValueError(msg)
114
+ _room_mask = cast("Image.Image", _room_mask)
115
  _room_mask = ImageOps.fit(
116
  _room_mask,
117
+ (max_dimension, max_dimension),
118
  method=Image.Resampling.LANCZOS,
119
  centering=(0.5, 0.5),
120
  )
121
 
122
+ # _room_image.save("room_image.png")
123
+ # _room_mask_with_white_background = Image.new(
124
+ # "RGB", _room_mask.size, (255, 255, 255)
125
+ # )
126
+ # _room_mask_with_white_background.paste(_room_mask, (0, 0), _room_mask)
127
+ # _room_mask_with_white_background.save("room_mask.png")
128
+
129
  furniture_image = ImageOps.fit(
130
  furniture_image,
131
+ (max_dimension, max_dimension),
132
  method=Image.Resampling.LANCZOS,
133
  centering=(0.5, 0.5),
134
  )
135
  _furniture_image = Image.new(
136
  "RGB",
137
+ (max_dimension, max_dimension),
138
  (255, 255, 255),
139
  )
140
  _furniture_image.paste(furniture_image, (0, 0))
141
 
142
+ # _furniture_image.save("furniture_image.png")
143
+
144
+ _furniture_mask = Image.new("RGB", (max_dimension, max_dimension), (255, 255, 255))
145
 
146
  image = Image.new(
147
  "RGB",
148
+ (max_dimension * 2, max_dimension),
149
  (255, 255, 255),
150
  )
151
  # Paste on the center of the image
152
  image.paste(_furniture_image, (0, 0))
153
+ image.paste(_room_image, (max_dimension, 0))
154
 
155
  mask = Image.new(
156
  "RGB",
157
+ (max_dimension * 2, max_dimension),
158
  (255, 255, 255),
159
  )
160
  mask.paste(_furniture_mask, (0, 0))
161
+ mask.paste(_room_mask, (max_dimension, 0), _room_mask)
162
  # Invert the mask
163
  mask = ImageOps.invert(mask)
164
  # Blur the mask
 
174
  prompt=prompt,
175
  image=image,
176
  mask_image=mask,
177
+ height=max_dimension,
178
+ width=max_dimension * 2,
179
  num_inference_steps=num_inference_steps,
180
  guidance_scale=guidance_scale,
181
  num_images_per_prompt=2,
 
183
  )["images"]
184
 
185
  cropped_images = [
186
+ image.crop((max_dimension, 0, max_dimension * 2, max_dimension))
187
  for image in results_images
188
  ]
189
 
190
  return cropped_images, seed
191
 
192
 
193
+ intro_markdown = r"""
194
+ <div>
195
+ <div>
196
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 40px;">
197
+ <b>AnyFurnish</b>
198
+ </div>
199
+ <br>
200
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
201
+ <a href="https://github.com/julien-blanchon/"><img src="https://img.shields.io/static/v1?label=Github Report&message=Github&color=green"></a> &ensp;
202
+ </div>
203
+ <br>
204
+ <div style="display: flex; text-align: center; font-size: 14px; padding-right: 300px; padding-left: 300px;">
205
+ AnyFurnish is a tool that allows you to generate furniture images using Flux.1 Fill Dev.
206
+ You can upload a furniture image and a room image, and the tool will generate a new image with the furniture in the room.
207
+ </div>
208
+ </div>
209
+ </div>
210
  """
211
 
212
+ css = r"""
213
+ #col-left {
214
  margin: 0 auto;
215
+ max-width: 430px;
216
+ }
217
+ #col-mid {
218
+ margin: 0 auto;
219
+ max-width: 430px;
220
+ }
221
+ #col-right {
222
+ margin: 0 auto;
223
+ max-width: 430px;
224
+ }
225
+ #col-showcase {
226
+ margin: 0 auto;
227
+ max-width: 1100px;
228
  }
229
  """
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
+ with gr.Blocks(css=css) as demo:
233
+ gr.Markdown(intro_markdown)
234
+ with gr.Row():
235
+ with gr.Column(elem_id="col-left"):
236
+ gr.HTML(
237
+ """
238
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
239
+ <div>
240
+ Step 1. Upload a furniture image ⬇️
241
+ </div>
242
+ </div>
243
+ """,
244
+ max_height=50,
245
+ )
246
+ furniture_image = gr.Image(
247
+ label="Furniture Image",
248
+ type="pil",
249
+ sources=["upload"],
250
+ image_mode="RGB",
251
+ height=500,
252
+ )
253
+ furniture_prompt = gr.Text(
254
+ label="Prompt",
255
+ max_lines=1,
256
+ placeholder="Enter a custom furniture description (optional)",
257
+ container=False,
258
+ )
259
+ with gr.Column(elem_id="col-mid"):
260
+ gr.HTML(
261
+ """
262
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
263
+ <div>
264
+ Step 2. Upload a room image ⬇️
265
+ </div>
266
+ </div>
267
+ """,
268
+ max_height=50,
269
+ )
270
+ room_image = gr.ImageEditor(
271
+ label="Room Image - Draw mask for inpainting",
272
+ type="pil",
273
+ sources=["upload"],
274
+ image_mode="RGBA",
275
+ layers=False,
276
+ crop_size="1:1",
277
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
278
+ height=500,
279
+ )
280
+ with gr.Column(elem_id="col-right"):
281
+ gr.HTML(
282
+ """
283
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
284
+ <div>
285
+ Step 3. Press Run to launch
286
+ </div>
287
+ </div>
288
+ """,
289
+ max_height=50,
290
+ )
291
  results = gr.Gallery(
292
  label="Results",
293
  format="png",
294
  show_label=False,
295
  columns=2,
296
+ height=500,
 
 
 
 
 
 
 
 
 
297
  )
298
+ run_button = gr.Button("Run")
299
+ with gr.Accordion("Advanced Settings", open=False):
300
+ seed = gr.Slider(
301
+ label="Seed",
302
+ minimum=0,
303
+ maximum=MAX_SEED,
 
 
 
 
 
 
 
 
 
 
 
304
  step=1,
305
+ value=0,
306
  )
307
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
308
+ with gr.Column():
309
+ max_dimension = gr.Slider(
310
+ label="Max Dimension",
311
+ minimum=512,
312
+ maximum=1024,
313
+ step=128,
314
+ value=720,
315
+ )
316
+
317
+ guidance_scale = gr.Slider(
318
+ label="Guidance Scale",
319
+ minimum=1,
320
+ maximum=30,
321
+ step=0.5,
322
+ # value=50, # noqa: ERA001
323
+ value=30,
324
+ )
325
+
326
+ num_inference_steps = gr.Slider(
327
+ label="Number of inference steps",
328
+ minimum=1,
329
+ maximum=50,
330
+ step=1,
331
+ value=20,
332
+ )
333
 
334
+ with gr.Column(elem_id="col-showcase"):
335
+ gr.HTML("""
336
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
337
+ <div> </div>
338
+ <br>
339
+ <div>
340
+ AnyFurnish examples in pairs of furniture and room images
341
+ </div>
342
+ </div>
343
+ """)
344
+ show_case = gr.Examples(
345
+ examples=[
346
+ [
347
+ EXAMPLES_DIR / "1" / "furniture_image.png",
348
+ make_example(
349
+ EXAMPLES_DIR / "1" / "room_image.png",
350
+ EXAMPLES_DIR / "1" / "room_mask.png",
351
+ ),
352
+ ],
353
+ [
354
+ EXAMPLES_DIR / "2" / "furniture_image.png",
355
+ make_example(
356
+ EXAMPLES_DIR / "2" / "room_image.png",
357
+ EXAMPLES_DIR / "2" / "room_mask.png",
358
+ ),
359
+ ],
360
+ ],
361
+ inputs=[furniture_image, room_image],
362
+ label=None,
363
+ )
364
  gr.on(
365
+ triggers=[run_button.click, furniture_prompt.submit],
366
  fn=infer,
367
  inputs=[
368
  furniture_image,
369
  room_image,
370
+ furniture_prompt,
371
  seed,
372
  randomize_seed,
373
  guidance_scale,
374
  num_inference_steps,
375
+ max_dimension,
376
  ],
377
  outputs=[results, seed],
378
  )
examples/1/furniture_image.png ADDED
examples/1/room_image.png ADDED
examples/1/room_mask.png ADDED
examples/2/furniture_image.png ADDED
examples/2/room_image.png ADDED
examples/2/room_mask.png ADDED