Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -159,6 +159,7 @@ noise_scheduler = DDIMScheduler(
|
|
159 |
steps_offset=1,
|
160 |
)
|
161 |
|
|
|
162 |
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
|
163 |
unet = UNet2DConditionModel.from_pretrained(base_model_path, subfolder="unet", in_channels=13, low_cpu_mem_usage=False, ignore_mismatched_sizes=True).to(dtype=torch.float16)
|
164 |
|
@@ -177,13 +178,14 @@ referencenet = ReferenceNet.from_pretrained(ref_model_path, subfolder="unet").to
|
|
177 |
mimicbrush_model = MimicBrush_RefNet(pipe, image_encoder_path, mimicbrush_ckpt, depth_anything_model, depth_guider, referencenet, device)
|
178 |
mask_processor = VaeImageProcessor(vae_scale_factor=1, do_normalize=False, do_binarize=True, do_convert_grayscale=True)
|
179 |
|
180 |
-
|
181 |
def infer_single(ref_image, target_image, target_mask, seed = -1, num_inference_steps=50, guidance_scale = 5, enable_shape_control = False):
|
182 |
#return ref_image
|
183 |
"""
|
184 |
mask: 0/1 1-channel np.array
|
185 |
image: rgb np.array
|
186 |
"""
|
|
|
187 |
ref_image = ref_image.astype(np.uint8)
|
188 |
target_image = target_image.astype(np.uint8)
|
189 |
target_mask = target_mask .astype(np.uint8)
|
@@ -228,6 +230,7 @@ def infer_single(ref_image, target_image, target_mask, seed = -1, num_inference_
|
|
228 |
return pred, depth_pred.astype(np.uint8)
|
229 |
|
230 |
|
|
|
231 |
def inference_single_image(ref_image,
|
232 |
tar_image,
|
233 |
tar_mask,
|
@@ -246,10 +249,12 @@ def inference_single_image(ref_image,
|
|
246 |
def run_local(base,
|
247 |
ref,
|
248 |
*args):
|
249 |
-
image = base["image"].convert("RGB")
|
250 |
-
mask = base["mask"].convert("L")
|
|
|
251 |
image = np.asarray(image)
|
252 |
-
mask = np.asarray(mask)
|
|
|
253 |
mask = np.where(mask > 128, 1, 0).astype(np.uint8)
|
254 |
|
255 |
|
@@ -294,18 +299,22 @@ with gr.Blocks() as demo:
|
|
294 |
|
295 |
gr.Markdown("### Tutorial")
|
296 |
gr.Markdown("1. Upload the source image and the reference image")
|
297 |
-
gr.Markdown("2.
|
298 |
gr.Markdown("3. Click generate ")
|
299 |
gr.Markdown("#### You shoud click \"keep the original shape\" to conduct texture transfer ")
|
300 |
-
|
301 |
|
302 |
gr.Markdown("# Upload the source image and reference image")
|
303 |
-
gr.Markdown("### Tips: you could adjust the brush size
|
304 |
|
305 |
with gr.Row():
|
306 |
-
base = gr.
|
307 |
-
|
308 |
-
|
|
|
|
|
|
|
|
|
|
|
309 |
|
310 |
|
311 |
|
@@ -316,6 +325,7 @@ with gr.Blocks() as demo:
|
|
316 |
'./demo_example/005_source.png',
|
317 |
'./demo_example/005_reference.png',
|
318 |
],
|
|
|
319 |
[
|
320 |
'./demo_example/000_source.png',
|
321 |
'./demo_example/000_reference.png',
|
@@ -344,6 +354,7 @@ with gr.Blocks() as demo:
|
|
344 |
'./demo_example/007_source.png',
|
345 |
'./demo_example/007_reference.png',
|
346 |
],
|
|
|
347 |
],
|
348 |
|
349 |
inputs=[
|
|
|
159 |
steps_offset=1,
|
160 |
)
|
161 |
|
162 |
+
|
163 |
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
|
164 |
unet = UNet2DConditionModel.from_pretrained(base_model_path, subfolder="unet", in_channels=13, low_cpu_mem_usage=False, ignore_mismatched_sizes=True).to(dtype=torch.float16)
|
165 |
|
|
|
178 |
mimicbrush_model = MimicBrush_RefNet(pipe, image_encoder_path, mimicbrush_ckpt, depth_anything_model, depth_guider, referencenet, device)
|
179 |
mask_processor = VaeImageProcessor(vae_scale_factor=1, do_normalize=False, do_binarize=True, do_convert_grayscale=True)
|
180 |
|
181 |
+
|
182 |
def infer_single(ref_image, target_image, target_mask, seed = -1, num_inference_steps=50, guidance_scale = 5, enable_shape_control = False):
|
183 |
#return ref_image
|
184 |
"""
|
185 |
mask: 0/1 1-channel np.array
|
186 |
image: rgb np.array
|
187 |
"""
|
188 |
+
|
189 |
ref_image = ref_image.astype(np.uint8)
|
190 |
target_image = target_image.astype(np.uint8)
|
191 |
target_mask = target_mask .astype(np.uint8)
|
|
|
230 |
return pred, depth_pred.astype(np.uint8)
|
231 |
|
232 |
|
233 |
+
|
234 |
def inference_single_image(ref_image,
|
235 |
tar_image,
|
236 |
tar_mask,
|
|
|
249 |
def run_local(base,
|
250 |
ref,
|
251 |
*args):
|
252 |
+
image = base["background"].convert("RGB") #base["image"].convert("RGB")
|
253 |
+
mask = base["layers"][0] #base["mask"].convert("L")
|
254 |
+
|
255 |
image = np.asarray(image)
|
256 |
+
mask = np.asarray(mask)[:,:,-1]
|
257 |
+
#print(image.shape, mask.shape, mask.max(), mask.min())
|
258 |
mask = np.where(mask > 128, 1, 0).astype(np.uint8)
|
259 |
|
260 |
|
|
|
299 |
|
300 |
gr.Markdown("### Tutorial")
|
301 |
gr.Markdown("1. Upload the source image and the reference image")
|
302 |
+
gr.Markdown("2. Select the \"draw button\" to mask the to-edit region on the source image ")
|
303 |
gr.Markdown("3. Click generate ")
|
304 |
gr.Markdown("#### You shoud click \"keep the original shape\" to conduct texture transfer ")
|
|
|
305 |
|
306 |
gr.Markdown("# Upload the source image and reference image")
|
307 |
+
gr.Markdown("### Tips: you could adjust the brush size")
|
308 |
|
309 |
with gr.Row():
|
310 |
+
base = gr.ImageEditor( label="Source",
|
311 |
+
type="pil",
|
312 |
+
brush=gr.Brush(colors=["#000000"],default_size = 50,color_mode = "fixed"),
|
313 |
+
layers = False,
|
314 |
+
interactive=True
|
315 |
+
)
|
316 |
+
ref = gr.Image(label="Reference", sources="upload", type="pil", height=512)
|
317 |
+
run_local_button = gr.Button(value="Run")
|
318 |
|
319 |
|
320 |
|
|
|
325 |
'./demo_example/005_source.png',
|
326 |
'./demo_example/005_reference.png',
|
327 |
],
|
328 |
+
|
329 |
[
|
330 |
'./demo_example/000_source.png',
|
331 |
'./demo_example/000_reference.png',
|
|
|
354 |
'./demo_example/007_source.png',
|
355 |
'./demo_example/007_reference.png',
|
356 |
],
|
357 |
+
|
358 |
],
|
359 |
|
360 |
inputs=[
|