|
|
|
import subprocess
|
|
import os
|
|
import folder_paths
|
|
import random
|
|
from comfy import model_management
|
|
import torch
|
|
|
|
|
|
|
|
is_v2_model = 0
|
|
parameterization = 0
|
|
|
|
reg_data_dir = ""
|
|
|
|
|
|
network_module = "networks.lora"
|
|
network_weights = ""
|
|
network_dim = 32
|
|
network_alpha = 32
|
|
|
|
|
|
resolution = "512,512"
|
|
|
|
|
|
|
|
|
|
train_unet_only = 0
|
|
train_text_encoder_only = 0
|
|
stop_text_encoder_training = 0
|
|
|
|
noise_offset = 0
|
|
keep_tokens = 0
|
|
min_snr_gamma = 0
|
|
|
|
|
|
lr = "1e-4"
|
|
unet_lr = "1e-4"
|
|
text_encoder_lr = "1e-5"
|
|
lr_scheduler = "cosine_with_restarts"
|
|
lr_warmup_steps = 0
|
|
lr_restart_cycles = 1
|
|
|
|
|
|
optimizer_type = "AdamW8bit"
|
|
|
|
|
|
|
|
save_model_as = "safetensors"
|
|
|
|
|
|
save_state = 0
|
|
resume = ""
|
|
|
|
|
|
min_bucket_reso = 256
|
|
max_bucket_reso = 1584
|
|
persistent_data_loader_workers = 1
|
|
|
|
multi_gpu = 0
|
|
lowram = 0
|
|
|
|
|
|
algo = "lora"
|
|
conv_dim = 4
|
|
conv_alpha = 4
|
|
dropout = "0"
|
|
|
|
|
|
use_wandb = 0
|
|
wandb_api_key = ""
|
|
log_tracker_name = ""
|
|
|
|
|
|
|
|
logging_dir = './logs'
|
|
log_prefix = ''
|
|
mixed_precision = 'fp16'
|
|
caption_extension = '.txt'
|
|
|
|
|
|
os.environ['HF_HOME'] = "huggingface"
|
|
os.environ['XFORMERS_FORCE_DISABLE_TRITON'] = "1"
|
|
ext_args = []
|
|
launch_args = []
|
|
|
|
|
|
class LoraTraininginComfy:
|
|
def __init__(self):
|
|
pass
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
|
|
|
|
"data_path": ("STRING", {"default": "Insert path of image folders"}),
|
|
"batch_size": ("INT", {"default": 1, "min":1}),
|
|
"max_train_epoches": ("INT", {"default":10, "min":1}),
|
|
"save_every_n_epochs": ("INT", {"default":10, "min":1}),
|
|
|
|
|
|
"output_name": ("STRING", {"default":'Desired name for LoRA.'}),
|
|
"clip_skip": ("INT", {"default":2, "min":1}),
|
|
"output_dir": ("STRING", {"default":'models/loras'}),
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = ()
|
|
RETURN_NAMES = ()
|
|
|
|
FUNCTION = "loratraining"
|
|
|
|
OUTPUT_NODE = True
|
|
|
|
CATEGORY = "LJRE/LORA"
|
|
|
|
|
|
def loratraining(self, ckpt_name, data_path, batch_size, max_train_epoches, save_every_n_epochs, output_name, clip_skip, output_dir):
|
|
|
|
loadedmodels=model_management.current_loaded_models
|
|
unloaded_model = False
|
|
for i in range(len(loadedmodels) -1, -1, -1):
|
|
m = loadedmodels.pop(i)
|
|
m.model_unload()
|
|
del m
|
|
unloaded_model = True
|
|
if unloaded_model:
|
|
model_management.soft_empty_cache()
|
|
|
|
print(model_management.current_loaded_models)
|
|
|
|
|
|
|
|
train_data_dir = data_path.replace( "\\", "/")
|
|
|
|
|
|
|
|
theseed = random.randint(0, 2^32-1)
|
|
|
|
if multi_gpu:
|
|
launch_args.append("--multi_gpu")
|
|
|
|
if lowram:
|
|
ext_args.append("--lowram")
|
|
|
|
if is_v2_model:
|
|
ext_args.append("--v2")
|
|
else:
|
|
ext_args.append(f"--clip_skip={clip_skip}")
|
|
|
|
if parameterization:
|
|
ext_args.append("--v_parameterization")
|
|
|
|
if train_unet_only:
|
|
ext_args.append("--network_train_unet_only")
|
|
|
|
if train_text_encoder_only:
|
|
ext_args.append("--network_train_text_encoder_only")
|
|
|
|
if network_weights:
|
|
ext_args.append(f"--network_weights={network_weights}")
|
|
|
|
if reg_data_dir:
|
|
ext_args.append(f"--reg_data_dir={reg_data_dir}")
|
|
|
|
if optimizer_type:
|
|
ext_args.append(f"--optimizer_type={optimizer_type}")
|
|
|
|
if optimizer_type == "DAdaptation":
|
|
ext_args.append("--optimizer_args")
|
|
ext_args.append("decouple=True")
|
|
|
|
if network_module == "lycoris.kohya":
|
|
ext_args.extend([
|
|
f"--network_args",
|
|
f"conv_dim={conv_dim}",
|
|
f"conv_alpha={conv_alpha}",
|
|
f"algo={algo}",
|
|
f"dropout={dropout}"
|
|
])
|
|
|
|
if noise_offset != 0:
|
|
ext_args.append(f"--noise_offset={noise_offset}")
|
|
|
|
if stop_text_encoder_training != 0:
|
|
ext_args.append(f"--stop_text_encoder_training={stop_text_encoder_training}")
|
|
|
|
if save_state == 1:
|
|
ext_args.append("--save_state")
|
|
|
|
if resume:
|
|
ext_args.append(f"--resume={resume}")
|
|
|
|
if min_snr_gamma != 0:
|
|
ext_args.append(f"--min_snr_gamma={min_snr_gamma}")
|
|
|
|
if persistent_data_loader_workers:
|
|
ext_args.append("--persistent_data_loader_workers")
|
|
|
|
if use_wandb == 1:
|
|
ext_args.append("--log_with=all")
|
|
if wandb_api_key:
|
|
ext_args.append(f"--wandb_api_key={wandb_api_key}")
|
|
if log_tracker_name:
|
|
ext_args.append(f"--log_tracker_name={log_tracker_name}")
|
|
else:
|
|
ext_args.append("--log_with=tensorboard")
|
|
|
|
launchargs=' '.join(launch_args)
|
|
extargs=' '.join(ext_args)
|
|
|
|
pretrained_model = folder_paths.get_full_path("checkpoints", ckpt_name)
|
|
|
|
|
|
progpath = os.getcwd()
|
|
nodespath=''
|
|
for dirpath, dirnames, filenames in os.walk(progpath):
|
|
if 'sd-scripts' in dirnames:
|
|
nodespath= dirpath + '/sd-scripts/train_network.py'
|
|
print(nodespath)
|
|
|
|
nodespath = nodespath.replace( "\\", "/")
|
|
command = "python -m accelerate.commands.launch " + launchargs + f'--num_cpu_threads_per_process=8 "{nodespath}" --enable_bucket --pretrained_model_name_or_path={pretrained_model} --train_data_dir="{train_data_dir}" --output_dir="{output_dir}" --logging_dir="./logs" --log_prefix={output_name} --resolution={resolution} --network_module={network_module} --max_train_epochs={max_train_epoches} --learning_rate={lr} --unet_lr={unet_lr} --text_encoder_lr={text_encoder_lr} --lr_scheduler={lr_scheduler} --lr_warmup_steps={lr_warmup_steps} --lr_scheduler_num_cycles={lr_restart_cycles} --network_dim={network_dim} --network_alpha={network_alpha} --output_name={output_name} --train_batch_size={batch_size} --save_every_n_epochs={save_every_n_epochs} --mixed_precision="fp16" --save_precision="fp16" --seed={theseed} --cache_latents --prior_loss_weight=1 --max_token_length=225 --caption_extension=".txt" --save_model_as={save_model_as} --min_bucket_reso={min_bucket_reso} --max_bucket_reso={max_bucket_reso} --keep_tokens={keep_tokens} --xformers --shuffle_caption ' + extargs
|
|
|
|
subprocess.run(command, shell=True)
|
|
print("Train finished")
|
|
|
|
return ()
|
|
|
|
|
|
|
|
class LoraTraininginComfyAdvanced:
|
|
def __init__(self):
|
|
pass
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
|
|
"v2": (["No", "Yes"], ),
|
|
"networkmodule": (["networks.lora", "lycoris.kohya"], ),
|
|
"networkdimension": ("INT", {"default": 32, "min":0}),
|
|
"networkalpha": ("INT", {"default":32, "min":0}),
|
|
"trainingresolution": ("INT", {"default":512, "step":8}),
|
|
"data_path": ("STRING", {"default": "Insert path of image folders"}),
|
|
"batch_size": ("INT", {"default": 1, "min":1}),
|
|
"max_train_epoches": ("INT", {"default":10, "min":1}),
|
|
"save_every_n_epochs": ("INT", {"default":10, "min":1}),
|
|
"keeptokens": ("INT", {"default":0, "min":0}),
|
|
"minSNRgamma": ("FLOAT", {"default":0, "min":0, "step":0.1}),
|
|
"learningrateText": ("FLOAT", {"default":0.0001, "min":0, "step":0.00001}),
|
|
"learningrateUnet": ("FLOAT", {"default":0.0001, "min":0, "step":0.00001}),
|
|
"learningRateScheduler": (["cosine_with_restarts", "linear", "cosine", "polynomial", "constant", "constant_with_warmup"], ),
|
|
"lrRestartCycles": ("INT", {"default":1, "min":1}),
|
|
"optimizerType": (["AdamW8bit", "Lion8bit", "SGDNesterov8bit", "AdaFactor", "prodigy"], ),
|
|
"output_name": ("STRING", {"default":'Desired name for LoRA.'}),
|
|
"algorithm": (["lora","loha","lokr","ia3","dylora", "locon"], ),
|
|
"networkDropout": ("FLOAT", {"default": 0, "step":0.1}),
|
|
"clip_skip": ("INT", {"default":2, "min":1}),
|
|
"output_dir": ("STRING", {"default":'models/loras'}),
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = ()
|
|
RETURN_NAMES = ()
|
|
|
|
FUNCTION = "loratraining"
|
|
|
|
OUTPUT_NODE = True
|
|
|
|
CATEGORY = "LJRE/LORA"
|
|
|
|
def loratraining(self, ckpt_name, v2, networkmodule, networkdimension, networkalpha, trainingresolution, data_path, batch_size, max_train_epoches, save_every_n_epochs, keeptokens, minSNRgamma, learningrateText, learningrateUnet, learningRateScheduler, lrRestartCycles, optimizerType, output_name, algorithm, networkDropout, clip_skip, output_dir):
|
|
|
|
loadedmodels=model_management.current_loaded_models
|
|
unloaded_model = False
|
|
for i in range(len(loadedmodels) -1, -1, -1):
|
|
m = loadedmodels.pop(i)
|
|
m.model_unload()
|
|
del m
|
|
unloaded_model = True
|
|
if unloaded_model:
|
|
model_management.soft_empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_data_dir = data_path.replace( "\\", "/")
|
|
|
|
|
|
|
|
|
|
is_v2_model=0
|
|
network_moduke="networks.lora"
|
|
network_dim=32
|
|
network_alpha=32
|
|
resolution = "512,512"
|
|
keep_tokens = 0
|
|
min_snr_gamma = 0
|
|
unet_lr = "1e-4"
|
|
text_encoder_lr = "1e-5"
|
|
lr_scheduler = "cosine_with_restarts"
|
|
lr_restart_cycles = 0
|
|
optimizer_type = "AdamW8bit"
|
|
algo= "lora"
|
|
dropout = 0.0
|
|
|
|
if v2 == "Yes":
|
|
is_v2_model = 1
|
|
|
|
network_module = networkmodule
|
|
network_dim = networkdimension
|
|
network_alpha = networkalpha
|
|
resolution = f"{trainingresolution},{trainingresolution}"
|
|
|
|
formatted_value = str(format(learningrateText, "e")).rstrip('0').rstrip()
|
|
text_encoder_lr = ''.join(c for c in formatted_value if not (c == '0'))
|
|
|
|
formatted_value2 = str(format(learningrateUnet, "e")).rstrip('0').rstrip()
|
|
unet_lr = ''.join(c for c in formatted_value2 if not (c == '0'))
|
|
|
|
keep_tokens = keeptokens
|
|
min_snr_gamma = minSNRgamma
|
|
lr_scheduler = learningRateScheduler
|
|
lr_restart_cycles = lrRestartCycles
|
|
optimizer_type = optimizerType
|
|
algo = algorithm
|
|
dropout = f"{networkDropout}"
|
|
|
|
|
|
theseed = random.randint(0, 2^32-1)
|
|
|
|
if multi_gpu:
|
|
launch_args.append("--multi_gpu")
|
|
|
|
if lowram:
|
|
ext_args.append("--lowram")
|
|
|
|
if is_v2_model:
|
|
ext_args.append("--v2")
|
|
else:
|
|
ext_args.append(f"--clip_skip={clip_skip}")
|
|
|
|
if parameterization:
|
|
ext_args.append("--v_parameterization")
|
|
|
|
if train_unet_only:
|
|
ext_args.append("--network_train_unet_only")
|
|
|
|
if train_text_encoder_only:
|
|
ext_args.append("--network_train_text_encoder_only")
|
|
|
|
if network_weights:
|
|
ext_args.append(f"--network_weights={network_weights}")
|
|
|
|
if reg_data_dir:
|
|
ext_args.append(f"--reg_data_dir={reg_data_dir}")
|
|
|
|
if optimizer_type:
|
|
ext_args.append(f"--optimizer_type={optimizer_type}")
|
|
|
|
if optimizer_type == "DAdaptation":
|
|
ext_args.append("--optimizer_args")
|
|
ext_args.append("decouple=True")
|
|
|
|
if network_module == "lycoris.kohya":
|
|
ext_args.extend([
|
|
f"--network_args",
|
|
f"conv_dim={conv_dim}",
|
|
f"conv_alpha={conv_alpha}",
|
|
f"algo={algo}",
|
|
f"dropout={dropout}"
|
|
])
|
|
|
|
if noise_offset != 0:
|
|
ext_args.append(f"--noise_offset={noise_offset}")
|
|
|
|
if stop_text_encoder_training != 0:
|
|
ext_args.append(f"--stop_text_encoder_training={stop_text_encoder_training}")
|
|
|
|
if save_state == 1:
|
|
ext_args.append("--save_state")
|
|
|
|
if resume:
|
|
ext_args.append(f"--resume={resume}")
|
|
|
|
if min_snr_gamma != 0:
|
|
ext_args.append(f"--min_snr_gamma={min_snr_gamma}")
|
|
|
|
if persistent_data_loader_workers:
|
|
ext_args.append("--persistent_data_loader_workers")
|
|
|
|
if use_wandb == 1:
|
|
ext_args.append("--log_with=all")
|
|
if wandb_api_key:
|
|
ext_args.append(f"--wandb_api_key={wandb_api_key}")
|
|
if log_tracker_name:
|
|
ext_args.append(f"--log_tracker_name={log_tracker_name}")
|
|
else:
|
|
ext_args.append("--log_with=tensorboard")
|
|
|
|
launchargs=' '.join(launch_args)
|
|
extargs=' '.join(ext_args)
|
|
|
|
pretrained_model = folder_paths.get_full_path("checkpoints", ckpt_name)
|
|
|
|
|
|
progpath = os.getcwd()
|
|
nodespath=''
|
|
for dirpath, dirnames, filenames in os.walk(progpath):
|
|
if 'sd-scripts' in dirnames:
|
|
nodespath= dirpath + '/sd-scripts/train_network.py'
|
|
print(nodespath)
|
|
|
|
nodespath = nodespath.replace( "\\", "/")
|
|
|
|
command = "python -m accelerate.commands.launch " + launchargs + f'--num_cpu_threads_per_process=8 "custom_nodes/Lora-Training-in-Comfy/sd-scripts/train_network.py" --enable_bucket --pretrained_model_name_or_path={pretrained_model} --train_data_dir="{train_data_dir}" --output_dir="{output_dir}" --logging_dir="./logs" --log_prefix={output_name} --resolution={resolution} --network_module={network_module} --max_train_epochs={max_train_epoches} --learning_rate={lr} --unet_lr={unet_lr} --text_encoder_lr={text_encoder_lr} --lr_scheduler={lr_scheduler} --lr_warmup_steps={lr_warmup_steps} --lr_scheduler_num_cycles={lr_restart_cycles} --network_dim={network_dim} --network_alpha={network_alpha} --output_name={output_name} --train_batch_size={batch_size} --save_every_n_epochs={save_every_n_epochs} --mixed_precision="fp16" --save_precision="fp16" --seed={theseed} --cache_latents --prior_loss_weight=1 --max_token_length=225 --caption_extension=".txt" --save_model_as={save_model_as} --min_bucket_reso={min_bucket_reso} --max_bucket_reso={max_bucket_reso} --keep_tokens={keep_tokens} --xformers --shuffle_caption ' + extargs
|
|
|
|
subprocess.run(command, shell=True)
|
|
print("Train finished")
|
|
|
|
return ()
|
|
|
|
|
|
class TensorboardAccess:
|
|
def __init__(self):
|
|
pass
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = ()
|
|
RETURN_NAMES = ()
|
|
|
|
FUNCTION = "opentensorboard"
|
|
|
|
OUTPUT_NODE = True
|
|
|
|
CATEGORY = "LJRE/LORA"
|
|
|
|
def opentensorboard(self):
|
|
command = 'tensorboard --logdir="logs"'
|
|
subprocess.Popen(command, shell=True)
|
|
return() |