Upload 22 files
Browse files- .gitattributes +1 -0
- all_results.json +17 -0
- checkpoint-236688/config.json +29 -0
- checkpoint-236688/optimizer.pt +3 -0
- checkpoint-236688/pytorch_model.bin +3 -0
- checkpoint-236688/rng_state.pth +3 -0
- checkpoint-236688/scheduler.pt +3 -0
- checkpoint-236688/special_tokens_map.json +1 -0
- checkpoint-236688/spiece.model +3 -0
- checkpoint-236688/tokenizer_config.json +1 -0
- checkpoint-236688/trainer_state.json +144 -0
- checkpoint-236688/training_args.bin +3 -0
- config.json +29 -0
- eval_results.json +10 -0
- generated_predictions.txt +3 -0
- predict_results.json +9 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- tokenizer_config.json +1 -0
- train_results.json +8 -0
- trainer_state.json +153 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
generated_predictions.txt filter=lfs diff=lfs merge=lfs -text
|
all_results.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"eval_loss": 2.7799501419067383,
|
3 |
+
"eval_sacrebleu": 8.1449,
|
4 |
+
"eval_gen_len": 47.0545,
|
5 |
+
"eval_runtime": 1481.1488,
|
6 |
+
"eval_samples_per_second": 1.537,
|
7 |
+
"eval_steps_per_second": 0.769,
|
8 |
+
"epoch": 8.0,
|
9 |
+
"eval_samples": 2277,
|
10 |
+
"predict_loss": 2.773041248321533,
|
11 |
+
"predict_sacrebleu": 8.4276,
|
12 |
+
"predict_gen_len": 47.2727,
|
13 |
+
"predict_runtime": 154422.7036,
|
14 |
+
"predict_samples_per_second": 1.518,
|
15 |
+
"predict_steps_per_second": 0.759,
|
16 |
+
"predict_samples": 234392
|
17 |
+
}
|
checkpoint-236688/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "csebuetnlp/banglat5",
|
3 |
+
"architectures": [
|
4 |
+
"T5ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"d_ff": 2048,
|
7 |
+
"d_kv": 64,
|
8 |
+
"d_model": 768,
|
9 |
+
"decoder_start_token_id": 0,
|
10 |
+
"dropout_rate": 0.1,
|
11 |
+
"eos_token_id": 1,
|
12 |
+
"feed_forward_proj": "gated-gelu",
|
13 |
+
"gradient_checkpointing": false,
|
14 |
+
"initializer_factor": 1.0,
|
15 |
+
"is_encoder_decoder": true,
|
16 |
+
"layer_norm_epsilon": 1e-06,
|
17 |
+
"model_type": "t5",
|
18 |
+
"num_decoder_layers": 12,
|
19 |
+
"num_heads": 12,
|
20 |
+
"num_layers": 12,
|
21 |
+
"output_past": true,
|
22 |
+
"pad_token_id": 0,
|
23 |
+
"relative_attention_num_buckets": 32,
|
24 |
+
"tie_word_embeddings": false,
|
25 |
+
"torch_dtype": "float32",
|
26 |
+
"transformers_version": "4.11.0.dev0",
|
27 |
+
"use_cache": true,
|
28 |
+
"vocab_size": 32100
|
29 |
+
}
|
checkpoint-236688/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:090882263d999594bab27f09c78c107c5d07fc6362a9b6838b92c6c42e74553f
|
3 |
+
size 1980458179
|
checkpoint-236688/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd9bcd2521c9e6080d5f97c0fa99c5ef00c22273b7bc74c87161e8237c829558
|
3 |
+
size 990273369
|
checkpoint-236688/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02471ae2a5bf632e5b42fde6317e60023d59cb6b345b90b8fa6fa3d7390f7ac9
|
3 |
+
size 14657
|
checkpoint-236688/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94c54b41671c92a8abfbe0513c59f51e87fe5f9020cfec985e4e7ffd21e4e89c
|
3 |
+
size 623
|
checkpoint-236688/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
|
checkpoint-236688/spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7dcab96935a2a51b1461c84e44c952ea8a3640c8bc3e2c6ae7a21d855454ae27
|
3 |
+
size 1111492
|
checkpoint-236688/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": "/home/rifat/.cache/huggingface/transformers/ff0ed476d41a6f336fa52bd906c6c8f0a8684fe67bec634b201ed2d24331c915.c94798918c92ded6aeef2d2f0e666d2cc4145eca1aa6e1336fde07f2e13e2f46", "tokenizer_file": null, "name_or_path": "csebuetnlp/banglat5", "tokenizer_class": "T5Tokenizer"}
|
checkpoint-236688/trainer_state.json
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.5348,
|
3 |
+
"best_model_checkpoint": "outputs/checkpoint-29586",
|
4 |
+
"epoch": 8.0,
|
5 |
+
"global_step": 236688,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 1.0,
|
12 |
+
"learning_rate": 0.0004469415766030179,
|
13 |
+
"loss": 3.6106,
|
14 |
+
"step": 29586
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"eval_gen_len": 15.296,
|
19 |
+
"eval_loss": 2.7799501419067383,
|
20 |
+
"eval_runtime": 368.7401,
|
21 |
+
"eval_sacrebleu": 0.5348,
|
22 |
+
"eval_samples_per_second": 6.175,
|
23 |
+
"eval_steps_per_second": 3.089,
|
24 |
+
"step": 29586
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"epoch": 2.0,
|
28 |
+
"learning_rate": 0.00038309277994544387,
|
29 |
+
"loss": 2.7653,
|
30 |
+
"step": 59172
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 2.0,
|
34 |
+
"eval_gen_len": 14.7356,
|
35 |
+
"eval_loss": 2.742591619491577,
|
36 |
+
"eval_runtime": 368.9941,
|
37 |
+
"eval_sacrebleu": 0.4271,
|
38 |
+
"eval_samples_per_second": 6.171,
|
39 |
+
"eval_steps_per_second": 3.087,
|
40 |
+
"step": 59172
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"epoch": 3.0,
|
44 |
+
"learning_rate": 0.0003192439832878699,
|
45 |
+
"loss": 2.7302,
|
46 |
+
"step": 88758
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"epoch": 3.0,
|
50 |
+
"eval_gen_len": 14.7444,
|
51 |
+
"eval_loss": 2.7364578247070312,
|
52 |
+
"eval_runtime": 357.3278,
|
53 |
+
"eval_sacrebleu": 0.4211,
|
54 |
+
"eval_samples_per_second": 6.372,
|
55 |
+
"eval_steps_per_second": 3.188,
|
56 |
+
"step": 88758
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 4.0,
|
60 |
+
"learning_rate": 0.0002553951866302959,
|
61 |
+
"loss": 2.705,
|
62 |
+
"step": 118344
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 4.0,
|
66 |
+
"eval_gen_len": 14.8669,
|
67 |
+
"eval_loss": 2.7070209980010986,
|
68 |
+
"eval_runtime": 365.3611,
|
69 |
+
"eval_sacrebleu": 0.4635,
|
70 |
+
"eval_samples_per_second": 6.232,
|
71 |
+
"eval_steps_per_second": 3.117,
|
72 |
+
"step": 118344
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 5.0,
|
76 |
+
"learning_rate": 0.00019154638997272193,
|
77 |
+
"loss": 2.6613,
|
78 |
+
"step": 147930
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"epoch": 5.0,
|
82 |
+
"eval_gen_len": 13.9183,
|
83 |
+
"eval_loss": 2.6642842292785645,
|
84 |
+
"eval_runtime": 363.1965,
|
85 |
+
"eval_sacrebleu": 0.3438,
|
86 |
+
"eval_samples_per_second": 6.269,
|
87 |
+
"eval_steps_per_second": 3.136,
|
88 |
+
"step": 147930
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"epoch": 6.0,
|
92 |
+
"learning_rate": 0.00012769759331514796,
|
93 |
+
"loss": 2.6045,
|
94 |
+
"step": 177516
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"epoch": 6.0,
|
98 |
+
"eval_gen_len": 14.9087,
|
99 |
+
"eval_loss": 2.614426374435425,
|
100 |
+
"eval_runtime": 359.5255,
|
101 |
+
"eval_sacrebleu": 0.4651,
|
102 |
+
"eval_samples_per_second": 6.333,
|
103 |
+
"eval_steps_per_second": 3.168,
|
104 |
+
"step": 177516
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 7.0,
|
108 |
+
"learning_rate": 6.384879665757398e-05,
|
109 |
+
"loss": 2.5271,
|
110 |
+
"step": 207102
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 7.0,
|
114 |
+
"eval_gen_len": 14.6271,
|
115 |
+
"eval_loss": 2.5494837760925293,
|
116 |
+
"eval_runtime": 358.1093,
|
117 |
+
"eval_sacrebleu": 0.468,
|
118 |
+
"eval_samples_per_second": 6.358,
|
119 |
+
"eval_steps_per_second": 3.181,
|
120 |
+
"step": 207102
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"epoch": 8.0,
|
124 |
+
"learning_rate": 0.0,
|
125 |
+
"loss": 2.4325,
|
126 |
+
"step": 236688
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"epoch": 8.0,
|
130 |
+
"eval_gen_len": 14.7071,
|
131 |
+
"eval_loss": 2.4964401721954346,
|
132 |
+
"eval_runtime": 362.2324,
|
133 |
+
"eval_sacrebleu": 0.484,
|
134 |
+
"eval_samples_per_second": 6.286,
|
135 |
+
"eval_steps_per_second": 3.144,
|
136 |
+
"step": 236688
|
137 |
+
}
|
138 |
+
],
|
139 |
+
"max_steps": 236688,
|
140 |
+
"num_train_epochs": 8,
|
141 |
+
"total_flos": 3.211579566884874e+18,
|
142 |
+
"trial_name": null,
|
143 |
+
"trial_params": null
|
144 |
+
}
|
checkpoint-236688/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c63fd50675c2ea4c0750ddab1d9445c41255e3a9d8b20cb8ac31eb23d682f50a
|
3 |
+
size 2735
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "csebuetnlp/banglat5",
|
3 |
+
"architectures": [
|
4 |
+
"T5ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"d_ff": 2048,
|
7 |
+
"d_kv": 64,
|
8 |
+
"d_model": 768,
|
9 |
+
"decoder_start_token_id": 0,
|
10 |
+
"dropout_rate": 0.1,
|
11 |
+
"eos_token_id": 1,
|
12 |
+
"feed_forward_proj": "gated-gelu",
|
13 |
+
"gradient_checkpointing": false,
|
14 |
+
"initializer_factor": 1.0,
|
15 |
+
"is_encoder_decoder": true,
|
16 |
+
"layer_norm_epsilon": 1e-06,
|
17 |
+
"model_type": "t5",
|
18 |
+
"num_decoder_layers": 12,
|
19 |
+
"num_heads": 12,
|
20 |
+
"num_layers": 12,
|
21 |
+
"output_past": true,
|
22 |
+
"pad_token_id": 0,
|
23 |
+
"relative_attention_num_buckets": 32,
|
24 |
+
"tie_word_embeddings": false,
|
25 |
+
"torch_dtype": "float32",
|
26 |
+
"transformers_version": "4.11.0.dev0",
|
27 |
+
"use_cache": true,
|
28 |
+
"vocab_size": 32100
|
29 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 8.0,
|
3 |
+
"eval_gen_len": 47.0545,
|
4 |
+
"eval_loss": 2.7799501419067383,
|
5 |
+
"eval_runtime": 1481.1488,
|
6 |
+
"eval_sacrebleu": 8.1449,
|
7 |
+
"eval_samples": 2277,
|
8 |
+
"eval_samples_per_second": 1.537,
|
9 |
+
"eval_steps_per_second": 0.769
|
10 |
+
}
|
generated_predictions.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bac2ba5767d25920a264dbdbfeca03065bffa054175091133e13083680344073
|
3 |
+
size 124518156
|
predict_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"predict_gen_len": 47.2727,
|
3 |
+
"predict_loss": 2.773041248321533,
|
4 |
+
"predict_runtime": 154422.7036,
|
5 |
+
"predict_sacrebleu": 8.4276,
|
6 |
+
"predict_samples": 234392,
|
7 |
+
"predict_samples_per_second": 1.518,
|
8 |
+
"predict_steps_per_second": 0.759
|
9 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:247b9355f1787c3c32aea79d28cdb11c0bab1ed556b2a3201025deffed2d6872
|
3 |
+
size 990273369
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7dcab96935a2a51b1461c84e44c952ea8a3640c8bc3e2c6ae7a21d855454ae27
|
3 |
+
size 1111492
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": "/home/rifat/.cache/huggingface/transformers/ff0ed476d41a6f336fa52bd906c6c8f0a8684fe67bec634b201ed2d24331c915.c94798918c92ded6aeef2d2f0e666d2cc4145eca1aa6e1336fde07f2e13e2f46", "tokenizer_file": null, "name_or_path": "csebuetnlp/banglat5", "tokenizer_class": "T5Tokenizer"}
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 8.0,
|
3 |
+
"train_loss": 2.7545470263067835,
|
4 |
+
"train_runtime": 376582.9978,
|
5 |
+
"train_samples": 946752,
|
6 |
+
"train_samples_per_second": 20.112,
|
7 |
+
"train_steps_per_second": 0.629
|
8 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.5348,
|
3 |
+
"best_model_checkpoint": "outputs/checkpoint-29586",
|
4 |
+
"epoch": 8.0,
|
5 |
+
"global_step": 236688,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 1.0,
|
12 |
+
"learning_rate": 0.0004469415766030179,
|
13 |
+
"loss": 3.6106,
|
14 |
+
"step": 29586
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"eval_gen_len": 15.296,
|
19 |
+
"eval_loss": 2.7799501419067383,
|
20 |
+
"eval_runtime": 368.7401,
|
21 |
+
"eval_sacrebleu": 0.5348,
|
22 |
+
"eval_samples_per_second": 6.175,
|
23 |
+
"eval_steps_per_second": 3.089,
|
24 |
+
"step": 29586
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"epoch": 2.0,
|
28 |
+
"learning_rate": 0.00038309277994544387,
|
29 |
+
"loss": 2.7653,
|
30 |
+
"step": 59172
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 2.0,
|
34 |
+
"eval_gen_len": 14.7356,
|
35 |
+
"eval_loss": 2.742591619491577,
|
36 |
+
"eval_runtime": 368.9941,
|
37 |
+
"eval_sacrebleu": 0.4271,
|
38 |
+
"eval_samples_per_second": 6.171,
|
39 |
+
"eval_steps_per_second": 3.087,
|
40 |
+
"step": 59172
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"epoch": 3.0,
|
44 |
+
"learning_rate": 0.0003192439832878699,
|
45 |
+
"loss": 2.7302,
|
46 |
+
"step": 88758
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"epoch": 3.0,
|
50 |
+
"eval_gen_len": 14.7444,
|
51 |
+
"eval_loss": 2.7364578247070312,
|
52 |
+
"eval_runtime": 357.3278,
|
53 |
+
"eval_sacrebleu": 0.4211,
|
54 |
+
"eval_samples_per_second": 6.372,
|
55 |
+
"eval_steps_per_second": 3.188,
|
56 |
+
"step": 88758
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 4.0,
|
60 |
+
"learning_rate": 0.0002553951866302959,
|
61 |
+
"loss": 2.705,
|
62 |
+
"step": 118344
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 4.0,
|
66 |
+
"eval_gen_len": 14.8669,
|
67 |
+
"eval_loss": 2.7070209980010986,
|
68 |
+
"eval_runtime": 365.3611,
|
69 |
+
"eval_sacrebleu": 0.4635,
|
70 |
+
"eval_samples_per_second": 6.232,
|
71 |
+
"eval_steps_per_second": 3.117,
|
72 |
+
"step": 118344
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 5.0,
|
76 |
+
"learning_rate": 0.00019154638997272193,
|
77 |
+
"loss": 2.6613,
|
78 |
+
"step": 147930
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"epoch": 5.0,
|
82 |
+
"eval_gen_len": 13.9183,
|
83 |
+
"eval_loss": 2.6642842292785645,
|
84 |
+
"eval_runtime": 363.1965,
|
85 |
+
"eval_sacrebleu": 0.3438,
|
86 |
+
"eval_samples_per_second": 6.269,
|
87 |
+
"eval_steps_per_second": 3.136,
|
88 |
+
"step": 147930
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"epoch": 6.0,
|
92 |
+
"learning_rate": 0.00012769759331514796,
|
93 |
+
"loss": 2.6045,
|
94 |
+
"step": 177516
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"epoch": 6.0,
|
98 |
+
"eval_gen_len": 14.9087,
|
99 |
+
"eval_loss": 2.614426374435425,
|
100 |
+
"eval_runtime": 359.5255,
|
101 |
+
"eval_sacrebleu": 0.4651,
|
102 |
+
"eval_samples_per_second": 6.333,
|
103 |
+
"eval_steps_per_second": 3.168,
|
104 |
+
"step": 177516
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 7.0,
|
108 |
+
"learning_rate": 6.384879665757398e-05,
|
109 |
+
"loss": 2.5271,
|
110 |
+
"step": 207102
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 7.0,
|
114 |
+
"eval_gen_len": 14.6271,
|
115 |
+
"eval_loss": 2.5494837760925293,
|
116 |
+
"eval_runtime": 358.1093,
|
117 |
+
"eval_sacrebleu": 0.468,
|
118 |
+
"eval_samples_per_second": 6.358,
|
119 |
+
"eval_steps_per_second": 3.181,
|
120 |
+
"step": 207102
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"epoch": 8.0,
|
124 |
+
"learning_rate": 0.0,
|
125 |
+
"loss": 2.4325,
|
126 |
+
"step": 236688
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"epoch": 8.0,
|
130 |
+
"eval_gen_len": 14.7071,
|
131 |
+
"eval_loss": 2.4964401721954346,
|
132 |
+
"eval_runtime": 362.2324,
|
133 |
+
"eval_sacrebleu": 0.484,
|
134 |
+
"eval_samples_per_second": 6.286,
|
135 |
+
"eval_steps_per_second": 3.144,
|
136 |
+
"step": 236688
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"epoch": 8.0,
|
140 |
+
"step": 236688,
|
141 |
+
"total_flos": 3.211579566884874e+18,
|
142 |
+
"train_loss": 2.7545470263067835,
|
143 |
+
"train_runtime": 376582.9978,
|
144 |
+
"train_samples_per_second": 20.112,
|
145 |
+
"train_steps_per_second": 0.629
|
146 |
+
}
|
147 |
+
],
|
148 |
+
"max_steps": 236688,
|
149 |
+
"num_train_epochs": 8,
|
150 |
+
"total_flos": 3.211579566884874e+18,
|
151 |
+
"trial_name": null,
|
152 |
+
"trial_params": null
|
153 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c63fd50675c2ea4c0750ddab1d9445c41255e3a9d8b20cb8ac31eb23d682f50a
|
3 |
+
size 2735
|