root1101's picture
End of training
ce4acea
adam_beta1: 0.9
adam_beta2: 0.999
adam_epsilon: 1.0e-08
adam_weight_decay: 0.01
allow_tf32: false
cache_dir: null
caption_column: text
center_crop: true
checkpointing_steps: 500
checkpoints_total_limit: null
dataloader_num_workers: 4
dataset_config_name: null
dataset_name: lambdalabs/pokemon-blip-captions
enable_xformers_memory_efficient_attention: false
gradient_accumulation_steps: 4
gradient_checkpointing: false
hub_model_id: pokemon-lora
hub_token: null
image_column: image
learning_rate: 0.0001
local_rank: -1
logging_dir: logs
lr_scheduler: cosine
lr_warmup_steps: 0
max_grad_norm: 1.0
max_train_samples: null
max_train_steps: 15000
mixed_precision: null
noise_offset: 0
num_train_epochs: 72
num_validation_images: 4
output_dir: D:/Git/sddata/finetune/lora/pokemon
pretrained_model_name_or_path: runwayml/stable-diffusion-v1-5
push_to_hub: true
random_flip: true
report_to: tensorboard
resolution: 512
resume_from_checkpoint: null
revision: null
scale_lr: false
seed: 1337
snr_gamma: null
train_batch_size: 1
train_data_dir: null
use_8bit_adam: false
validation_epochs: 1
validation_prompt: Totoro