aapot
commited on
Commit
•
e0ba7eb
1
Parent(s):
b221124
Try fix git
Browse files- README.md +0 -0
- added_tokens.json +0 -0
- config.json +1 -1
- flax_model_to_pytorch.py +0 -0
- merges.txt +0 -0
- replace_token_script.py +0 -0
- run_clm_flax.py +1 -1
- special_tokens_map.json +0 -0
- start_train.sh +6 -6
- tokenizer.json +0 -0
- tokenizer_config.json +0 -0
- train_tokenizer.py +0 -0
- vocab.json +0 -0
README.md
CHANGED
File without changes
|
added_tokens.json
CHANGED
File without changes
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPT2LMHeadModel"
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "./",
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPT2LMHeadModel"
|
flax_model_to_pytorch.py
CHANGED
File without changes
|
merges.txt
CHANGED
File without changes
|
replace_token_script.py
CHANGED
File without changes
|
run_clm_flax.py
CHANGED
@@ -61,7 +61,7 @@ from transformers import (
|
|
61 |
from transformers.file_utils import get_full_repo_name
|
62 |
from transformers.testing_utils import CaptureLogger
|
63 |
|
64 |
-
from distributed_shampoo import distributed_shampoo, GraftingType
|
65 |
|
66 |
|
67 |
logger = logging.getLogger(__name__)
|
|
|
61 |
from transformers.file_utils import get_full_repo_name
|
62 |
from transformers.testing_utils import CaptureLogger
|
63 |
|
64 |
+
# from distributed_shampoo import distributed_shampoo, GraftingType
|
65 |
|
66 |
|
67 |
logger = logging.getLogger(__name__)
|
special_tokens_map.json
CHANGED
File without changes
|
start_train.sh
CHANGED
@@ -10,19 +10,19 @@ python3 run_clm_flax.py \
|
|
10 |
--dataset_filepath="/researchdisk/training_dataset_full_deduplicated" \
|
11 |
--do_train --do_eval \
|
12 |
--block_size="512" \
|
13 |
-
--per_device_train_batch_size="
|
14 |
-
--per_device_eval_batch_size="
|
15 |
-
--preprocessing_num_workers="
|
16 |
--adam_beta1="0.9" \
|
17 |
--adam_beta2="0.98" \
|
18 |
-
--learning_rate="
|
19 |
--weight_decay="0.01" \
|
20 |
--warmup_steps="4000" \
|
21 |
--cosine_decay \
|
22 |
--overwrite_output_dir \
|
23 |
--logging_steps="500" \
|
24 |
-
--eval_steps="
|
25 |
-
--save_steps="
|
26 |
--num_train_epochs="5" \
|
27 |
--dtype="bfloat16" \
|
28 |
--push_to_hub \
|
|
|
10 |
--dataset_filepath="/researchdisk/training_dataset_full_deduplicated" \
|
11 |
--do_train --do_eval \
|
12 |
--block_size="512" \
|
13 |
+
--per_device_train_batch_size="8" \
|
14 |
+
--per_device_eval_batch_size="8" \
|
15 |
+
--preprocessing_num_workers="96" \
|
16 |
--adam_beta1="0.9" \
|
17 |
--adam_beta2="0.98" \
|
18 |
+
--learning_rate="4e-5" \
|
19 |
--weight_decay="0.01" \
|
20 |
--warmup_steps="4000" \
|
21 |
--cosine_decay \
|
22 |
--overwrite_output_dir \
|
23 |
--logging_steps="500" \
|
24 |
+
--eval_steps="100" \
|
25 |
+
--save_steps="100" \
|
26 |
--num_train_epochs="5" \
|
27 |
--dtype="bfloat16" \
|
28 |
--push_to_hub \
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
File without changes
|
train_tokenizer.py
CHANGED
File without changes
|
vocab.json
CHANGED
File without changes
|