Update handler.py
Browse files- handler.py +17 -21
handler.py
CHANGED
@@ -42,7 +42,7 @@ class EndpointHandler:
|
|
42 |
self.app = FaceAnalysis(name='antelopev2', root=model_dir, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
43 |
|
44 |
self.app.prepare(ctx_id=0, det_size=(640, 640))
|
45 |
-
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
46 |
|
47 |
# Path to InstantID models
|
48 |
controlnet_path = os.path.join(model_dir, "checkpoints", "ControlNetModel")
|
@@ -63,19 +63,14 @@ class EndpointHandler:
|
|
63 |
controlnet_canny_model, torch_dtype=dtype
|
64 |
).to(device)
|
65 |
|
66 |
-
def get_canny_image(image, t1=100, t2=200):
|
67 |
-
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
68 |
-
edges = cv2.Canny(image, t1, t2)
|
69 |
-
return Image.fromarray(edges, "L")
|
70 |
-
|
71 |
self.controlnet_map = {
|
72 |
"pose": controlnet_pose,
|
73 |
"canny": controlnet_canny
|
74 |
}
|
75 |
|
76 |
self.controlnet_map_fn = {
|
77 |
-
"pose": openpose,
|
78 |
-
"canny": get_canny_image
|
79 |
}
|
80 |
|
81 |
pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
|
@@ -92,15 +87,14 @@ class EndpointHandler:
|
|
92 |
self.pipe.scheduler.config
|
93 |
)
|
94 |
|
95 |
-
#
|
96 |
self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
97 |
-
self.pipe.disable_lora()
|
98 |
-
|
99 |
-
self.pipe.cuda()
|
100 |
-
self.pipe.load_ip_adapter_instantid(face_adapter)
|
101 |
-
self.pipe.image_proj_model.to("cuda")
|
102 |
-
self.pipe.unet.to("cuda")
|
103 |
|
|
|
|
|
|
|
|
|
|
|
104 |
def __call__(self, data):
|
105 |
|
106 |
def convert_from_cv2_to_image(img: np.ndarray) -> Image:
|
@@ -152,7 +146,7 @@ class EndpointHandler:
|
|
152 |
return image
|
153 |
|
154 |
# Debug: Print incoming data
|
155 |
-
print("Incoming data:",
|
156 |
|
157 |
face_image_base64 = data.pop("face_image_base64", None)
|
158 |
if face_image_base64 is not None:
|
@@ -182,22 +176,24 @@ class EndpointHandler:
|
|
182 |
enhance_non_face_region = data.pop("enhance_non_face_region", False)
|
183 |
seed = data.pop("seed", 42)
|
184 |
|
185 |
-
|
186 |
-
print(f"[Debug] Prompt: {prompt}")
|
187 |
-
print(f"[Debug] Negative Prompt: {negative_prompt}")
|
188 |
print(f"[Debug] IdentityNet Strength Ratio: {identitynet_strength_ratio}")
|
189 |
print(f"[Debug] Adapter Strength Ratio: {adapter_strength_ratio}")
|
190 |
print(f"[Debug] Pose Strength: {pose_strength}")
|
191 |
print(f"[Debug] Canny Strength: {canny_strength}")
|
192 |
print(f"[Debug] Num Steps: {num_steps}")
|
193 |
print(f"[Debug] Guidance Scale: {guidance_scale}")
|
194 |
-
print(f"[Debug] ControlNet Selection: {controlnet_selection}")
|
195 |
print(f"[Debug] Scheduler: {scheduler}")
|
196 |
print(f"[Debug] Enable Fast Inference: {enable_fast_inference}")
|
197 |
print(f"[Debug] Enhance Non-Face Region: {enhance_non_face_region}")
|
198 |
print(f"[Debug] Seed: {seed}")
|
199 |
|
200 |
-
|
|
|
|
|
|
|
|
|
|
|
201 |
scheduler_class_name = scheduler.split("-")[0]
|
202 |
|
203 |
add_kwargs = {}
|
|
|
42 |
self.app = FaceAnalysis(name='antelopev2', root=model_dir, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
43 |
|
44 |
self.app.prepare(ctx_id=0, det_size=(640, 640))
|
45 |
+
self.openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
46 |
|
47 |
# Path to InstantID models
|
48 |
controlnet_path = os.path.join(model_dir, "checkpoints", "ControlNetModel")
|
|
|
63 |
controlnet_canny_model, torch_dtype=dtype
|
64 |
).to(device)
|
65 |
|
|
|
|
|
|
|
|
|
|
|
66 |
self.controlnet_map = {
|
67 |
"pose": controlnet_pose,
|
68 |
"canny": controlnet_canny
|
69 |
}
|
70 |
|
71 |
self.controlnet_map_fn = {
|
72 |
+
"pose": self.openpose,
|
73 |
+
"canny": self.get_canny_image
|
74 |
}
|
75 |
|
76 |
pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
|
|
|
87 |
self.pipe.scheduler.config
|
88 |
)
|
89 |
|
90 |
+
# Load LoRA weights for LCM
|
91 |
self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
+
def get_canny_image(self, image, t1=100, t2=200):
|
94 |
+
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
95 |
+
edges = cv2.Canny(image, t1, t2)
|
96 |
+
return Image.fromarray(edges, "L")
|
97 |
+
|
98 |
def __call__(self, data):
|
99 |
|
100 |
def convert_from_cv2_to_image(img: np.ndarray) -> Image:
|
|
|
146 |
return image
|
147 |
|
148 |
# Debug: Print incoming data
|
149 |
+
print("Incoming data:", data)
|
150 |
|
151 |
face_image_base64 = data.pop("face_image_base64", None)
|
152 |
if face_image_base64 is not None:
|
|
|
176 |
enhance_non_face_region = data.pop("enhance_non_face_region", False)
|
177 |
seed = data.pop("seed", 42)
|
178 |
|
179 |
+
# Debug: Print additional settings
|
|
|
|
|
180 |
print(f"[Debug] IdentityNet Strength Ratio: {identitynet_strength_ratio}")
|
181 |
print(f"[Debug] Adapter Strength Ratio: {adapter_strength_ratio}")
|
182 |
print(f"[Debug] Pose Strength: {pose_strength}")
|
183 |
print(f"[Debug] Canny Strength: {canny_strength}")
|
184 |
print(f"[Debug] Num Steps: {num_steps}")
|
185 |
print(f"[Debug] Guidance Scale: {guidance_scale}")
|
|
|
186 |
print(f"[Debug] Scheduler: {scheduler}")
|
187 |
print(f"[Debug] Enable Fast Inference: {enable_fast_inference}")
|
188 |
print(f"[Debug] Enhance Non-Face Region: {enhance_non_face_region}")
|
189 |
print(f"[Debug] Seed: {seed}")
|
190 |
|
191 |
+
# Enable LCM if fast inference is enabled
|
192 |
+
if enable_fast_inference:
|
193 |
+
self.pipe.enable_lora()
|
194 |
+
else:
|
195 |
+
self.pipe.disable_lora()
|
196 |
+
|
197 |
scheduler_class_name = scheduler.split("-")[0]
|
198 |
|
199 |
add_kwargs = {}
|