# import aiohttp | |
# from PIL import Image | |
# import io | |
# import torch | |
# import numpy as np | |
# import base64 | |
# import json | |
# import asyncio | |
# import ssl | |
# ssl_context = ssl.create_default_context() | |
# class WefaDoorNode: | |
# CATEGORY = "WEFA-DOOR" | |
# @classmethod | |
# def INPUT_TYPES(cls): | |
# return { | |
# "required": { | |
# "ref_image": ("IMAGE", {}), | |
# "ref_mask": ("IMAGE", {}), | |
# "tar_image": ("IMAGE", {}), | |
# "tar_mask": ("IMAGE", {}), | |
# "model": (["boys", "girls", "men", "women"],), # Dropdown list for model selection | |
# } | |
# } | |
# RETURN_TYPES = ("IMAGE",) | |
# RETURN_NAMES = ("result_image",) | |
# FUNCTION = "send_to_api_sync" | |
# @staticmethod | |
# async def image_to_base64(image_tensor): | |
# image_np = image_tensor.cpu().detach().numpy() | |
# if image_np.ndim == 4 and image_np.shape[0] == 1: | |
# image_np = image_np.squeeze(0) | |
# if image_np.ndim == 3 and image_np.shape[0] in {1, 3}: | |
# image_np = image_np.transpose(1, 2, 0) | |
# image_np = (image_np * 255).astype(np.uint8) | |
# if image_np.ndim == 3 and image_np.shape[2] == 1: | |
# image_np = image_np.squeeze(2) | |
# image_pil = Image.fromarray(image_np) | |
# buffer = io.BytesIO() | |
# image_pil.save(buffer, format="PNG") | |
# return base64.b64encode(buffer.getvalue()).decode() | |
# async def send_to_api(self, tar_image, tar_mask, ref_image, ref_mask, model): | |
# url = 'http://63.141.33.9:22037' | |
# headers = { | |
# 'X-API-KEY': 'xiCQTaoQKXUNATzuFLWRgtoJKiFXiDGvnk', | |
# 'Content-Type': 'application/json' | |
# } | |
# json_payload = json.dumps({ | |
# "ref_image": await WefaDoorNode.image_to_base64(ref_image), | |
# "ref_mask": await WefaDoorNode.image_to_base64(ref_mask), | |
# "tar_image": await WefaDoorNode.image_to_base64(tar_image), | |
# "tar_mask": await WefaDoorNode.image_to_base64(tar_mask), | |
# "model": model, # Include the selected model in the payload | |
# "seed": 123123, | |
# "steps": 45, | |
# "guidance_scale": 5 | |
# }) | |
# async with aiohttp.ClientSession() as session: | |
# async with session.post(url, data=json_payload, headers=headers, ssl=ssl_context) as response: | |
# if response.status != 200: | |
# response_text = await response.text() | |
# print(f"Error: Response status {response.status}. Response text: {response_text}") | |
# return None | |
# else: | |
# image_data = await response.read() | |
# result_image = Image.open(io.BytesIO(image_data)) | |
# result_image = result_image.convert('RGB') | |
# result_tensor = torch.tensor(np.array(result_image)).unsqueeze(0).float() / 255.0 | |
# return (result_tensor,) | |
# def send_to_api_sync(self, tar_image, tar_mask, ref_image, ref_mask, model): | |
# # return asyncio.run(self.send_to_api(tar_image, tar_mask, ref_image, ref_mask, model)) | |
# import aiohttp | |
# from PIL import Image | |
# import io | |
# import torch | |
# import numpy as np | |
# import base64 | |
# import json | |
# import asyncio | |
# import ssl | |
# ssl_context = ssl.create_default_context() | |
# class WefaDoorNode: | |
# CATEGORY = "WEFA-DOOR" | |
# @classmethod | |
# def INPUT_TYPES(cls): | |
# return { | |
# "required": { | |
# "ref_image": ("IMAGE", {}), | |
# "ref_mask": ("IMAGE", {}), | |
# "tar_image": ("IMAGE", {}), | |
# "tar_mask": ("IMAGE", {}), | |
# }, | |
# "optional": { | |
# "model_index": ("INT", {}), # Input for model selection as an integer | |
# "model": (["none", "women", "girls", "men", "boys"],), # Dropdown list for model selection | |
# } | |
# } | |
# RETURN_TYPES = ("IMAGE",) | |
# RETURN_NAMES = ("result_image",) | |
# FUNCTION = "send_to_api_sync" | |
# @staticmethod | |
# async def image_to_base64(image_tensor): | |
# image_np = image_tensor.cpu().detach().numpy() | |
# if image_np.ndim == 4 and image_np.shape[0] == 1: | |
# image_np = image_np.squeeze(0) | |
# if image_np.ndim == 3 and image_np.shape[0] in {1, 3}: | |
# image_np = image_np.transpose(1, 2, 0) | |
# image_np = (image_np * 255).astype(np.uint8) | |
# if image_np.ndim == 3 and image_np.shape[2] == 1: | |
# image_np = image_np.squeeze(2) | |
# image_pil = Image.fromarray(image_np) | |
# buffer = io.BytesIO() | |
# image_pil.save(buffer, format="PNG") | |
# return base64.b64encode(buffer.getvalue()).decode() | |
# async def send_to_api(self, tar_image, tar_mask, ref_image, ref_mask, model_index=None, model=None): | |
# url = 'http://63.141.33.9:22037' | |
# headers = { | |
# 'X-API-KEY': 'xiCQTaoQKXUNATzuFLWRgtoJKiFXiDGvnk', | |
# 'Content-Type': 'application/json' | |
# } | |
# # Determine the model to use | |
# models = ["none", "women", "girls", "men", "boys"] | |
# if model_index is not None: | |
# model = models[model_index] | |
# elif model is not None and model != "none": | |
# model = model | |
# else: | |
# model = models[0] # Default to "none" | |
# json_payload = json.dumps({ | |
# "ref_image": await WefaDoorNode.image_to_base64(ref_image), | |
# "ref_mask": await WefaDoorNode.image_to_base64(ref_mask), | |
# "tar_image": await WefaDoorNode.image_to_base64(tar_image), | |
# "tar_mask": await WefaDoorNode.image_to_base64(tar_mask), | |
# "model": model, # Use the selected model | |
# "seed": 123123, | |
# "steps": 45, | |
# "guidance_scale": 5 | |
# }) | |
# async with aiohttp.ClientSession() as session: | |
# async with session.post(url, data=json_payload, headers=headers, ssl=ssl_context) as response: | |
# if response.status != 200: | |
# response_text = await response.text() | |
# print(f"Error: Response status {response.status}. Response text: {response_text}") | |
# return None | |
# else: | |
# image_data = await response.read() | |
# result_image = Image.open(io.BytesIO(image_data)) | |
# result_image = result_image.convert('RGB') | |
# result_tensor = torch.tensor(np.array(result_image)).unsqueeze(0).float() / 255.0 | |
# return (result_tensor,) | |
# def send_to_api_sync(self, tar_image, tar_mask, ref_image, ref_mask, model_index=None, model=None): | |
# return asyncio.run(self.send_to_api(tar_image, tar_mask, ref_image, ref_mask, model_index, model)) | |
import aiohttp | |
from PIL import Image | |
import io | |
import torch | |
import numpy as np | |
import base64 | |
import json | |
import asyncio | |
import ssl | |
ssl_context = ssl.create_default_context() | |
class WefaDoorNode: | |
CATEGORY = "WEFA-DOOR" | |
def INPUT_TYPES(cls): | |
return { | |
"required": { | |
"ref_image": ("IMAGE", {}), | |
"ref_mask": ("IMAGE", {}), | |
"tar_image": ("IMAGE", {}), | |
"tar_mask": ("IMAGE", {}), | |
}, | |
"optional": { | |
"model_index": ("INT", {}), # Input for model selection as an integer | |
"model": (["none", "women", "girls", "male", "boys", "4-boys", "4-girls", "x", "y", "z", "a", "b"],), # Dropdown list for model selection | |
"steps": ("INT", {"default": 55}), # Number of steps (default: 45) | |
"cfg_scale": ("FLOAT", {"default": 1.5}), # CFG Scale (default: 5.0) | |
"seed": ("INT", {"default": 123123}) # Seed (default: 123123) | |
} | |
} | |
RETURN_TYPES = ("IMAGE",) | |
RETURN_NAMES = ("result_image",) | |
FUNCTION = "send_to_api_sync" | |
async def image_to_base64(image_tensor): | |
image_np = image_tensor.cpu().detach().numpy() | |
if image_np.ndim == 4 and image_np.shape[0] == 1: | |
image_np = image_np.squeeze(0) | |
if image_np.ndim == 3 and image_np.shape[0] in {1, 3}: | |
image_np = image_np.transpose(1, 2, 0) | |
image_np = (image_np * 255).astype(np.uint8) | |
if image_np.ndim == 3 and image_np.shape[2] == 1: | |
image_np = image_np.squeeze(2) | |
image_pil = Image.fromarray(image_np) | |
buffer = io.BytesIO() | |
image_pil.save(buffer, format="PNG") | |
return base64.b64encode(buffer.getvalue()).decode() | |
async def send_to_api(self, tar_image, tar_mask, ref_image, ref_mask, model_index=None, model=None, steps=55, cfg_scale=1.5, seed=None, prompt=""): | |
url = 'http://63.141.33.9:22037' | |
headers = { | |
'X-API-KEY': 'xiCQTaoQKXUNATzuFLWRgtoJKiFXiDGvnk', | |
'Content-Type': 'application/json' | |
} | |
# Determine the model to use | |
models = ["none", "women", "girls", "male", "boys", "4-boys", "4-girls", "x", "y", "z", "a", "b"] | |
if model_index is not None: | |
model = models[model_index] | |
elif model is not None and model != "none": | |
model = model | |
else: | |
model = models[0] # Default to "none" | |
# Prepare JSON payload with optional parameters | |
json_payload = json.dumps({ | |
"ref_image": await WefaDoorNode.image_to_base64(ref_image), | |
"ref_mask": await WefaDoorNode.image_to_base64(ref_mask), | |
"tar_image": await WefaDoorNode.image_to_base64(tar_image), | |
"tar_mask": await WefaDoorNode.image_to_base64(tar_mask), | |
"model": model, # Use the selected model | |
"seed": seed, # Use the provided or default seed | |
"steps": steps, # Use the provided or default steps | |
"guidance_scale": cfg_scale, # Use the provided or default cfg_scale | |
"prompt": prompt # Include the provided prompt or empty string | |
}) | |
async with aiohttp.ClientSession() as session: | |
async with session.post(url, data=json_payload, headers=headers, ssl=ssl_context) as response: | |
if response.status != 200: | |
response_text = await response.text() | |
print(f"Error: Response status {response.status}. Response text: {response_text}") | |
return None | |
else: | |
image_data = await response.read() | |
result_image = Image.open(io.BytesIO(image_data)) | |
result_image = result_image.convert('RGB') | |
result_tensor = torch.tensor(np.array(result_image)).unsqueeze(0).float() / 255.0 | |
return (result_tensor,) | |
def send_to_api_sync(self, tar_image, tar_mask, ref_image, ref_mask, model_index=None, model=None, steps=55, cfg_scale=1.5, seed=None, prompt=""): | |
return asyncio.run(self.send_to_api(tar_image, tar_mask, ref_image, ref_mask, model_index, model, steps, cfg_scale, seed, prompt)) | |