# Dataset config file. output_dir = '/workspace/training_output' dataset = 'dataset.toml' # Training settings epochs = 50 micro_batch_size_per_gpu = 1 pipeline_stages = 1 gradient_accumulation_steps = 4 gradient_clipping = 1.0 warmup_steps = 100 # eval settings eval_every_n_epochs = 5 eval_before_first_step = true eval_micro_batch_size_per_gpu = 1 eval_gradient_accumulation_steps = 1 # misc settings save_every_n_epochs = 5 checkpoint_every_n_minutes = 30 activation_checkpointing = true partition_method = 'parameters' save_dtype = 'bfloat16' caching_batch_size = 1 steps_per_print = 1 video_clip_mode = 'single_middle' [model] type = 'hunyuan-video' transformer_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors' vae_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors' llm_path = '/workspace/diffusion-pipe/models/llm' clip_path = '/workspace/diffusion-pipe/models/clip' dtype = 'bfloat16' transformer_dtype = 'float8' timestep_sample_method = 'logit_normal' [adapter] type = 'lora' rank = 64 dtype = 'bfloat16' [optimizer] type = 'adamw_optimi' lr = 5e-5 betas = [0.9, 0.99] weight_decay = 0.02 eps = 1e-8