# set train hyperparams | |
unset LD_PRELOAD | |
export HF_DATASETS_CACHE="/researchdisk/datasets_cache" | |
export USE_TORCH=0 | |
python3 run_clm_flax.py \ | |
--output_dir="./" \ | |
--model_type="gpt2" \ | |
--config_name="./" \ | |
--tokenizer_name="./" \ | |
--dataset_filepath="/researchdisk/training_dataset_full_deduplicated" \ | |
--do_train --do_eval \ | |
--block_size="512" \ | |
--per_device_train_batch_size="8" \ | |
--per_device_eval_batch_size="8" \ | |
--preprocessing_num_workers="96" \ | |
--adam_beta1="0.9" \ | |
--adam_beta2="0.98" \ | |
--learning_rate="4e-5" \ | |
--weight_decay="0.01" \ | |
--warmup_steps="4000" \ | |
--cosine_decay \ | |
--overwrite_output_dir \ | |
--logging_steps="500" \ | |
--eval_steps="10000" \ | |
--save_steps="10000" \ | |
--num_train_epochs="5" \ | |
--dtype="bfloat16" \ | |
--push_to_hub \ | |
--hub_model_id="Finnish-NLP/gpt2-large-finnish" |