End of training
Browse files- README.md +5 -4
- config.json +1 -1
- generation_config.json +1 -1
- model.safetensors +1 -1
- tokenizer.json +6 -1
- tokenizer_config.json +1 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -29,17 +29,18 @@ print(output["generated_text"])
|
|
29 |
|
30 |
## Training procedure
|
31 |
|
|
|
32 |
|
33 |
|
34 |
This model was trained with SFT.
|
35 |
|
36 |
### Framework versions
|
37 |
|
38 |
-
- TRL: 0.
|
39 |
-
- Transformers: 4.
|
40 |
- Pytorch: 2.5.1
|
41 |
-
- Datasets: 3.
|
42 |
-
- Tokenizers: 0.
|
43 |
|
44 |
## Citations
|
45 |
|
|
|
29 |
|
30 |
## Training procedure
|
31 |
|
32 |
+
|
33 |
|
34 |
|
35 |
This model was trained with SFT.
|
36 |
|
37 |
### Framework versions
|
38 |
|
39 |
+
- TRL: 0.13.0
|
40 |
+
- Transformers: 4.48.0
|
41 |
- Pytorch: 2.5.1
|
42 |
+
- Datasets: 3.2.0
|
43 |
+
- Tokenizers: 0.21.0
|
44 |
|
45 |
## Citations
|
46 |
|
config.json
CHANGED
@@ -27,7 +27,7 @@
|
|
27 |
"rope_theta": 100000,
|
28 |
"tie_word_embeddings": true,
|
29 |
"torch_dtype": "float32",
|
30 |
-
"transformers_version": "4.
|
31 |
"use_cache": true,
|
32 |
"vocab_size": 49152
|
33 |
}
|
|
|
27 |
"rope_theta": 100000,
|
28 |
"tie_word_embeddings": true,
|
29 |
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.48.0",
|
31 |
"use_cache": true,
|
32 |
"vocab_size": 49152
|
33 |
}
|
generation_config.json
CHANGED
@@ -3,5 +3,5 @@
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 2,
|
6 |
-
"transformers_version": "4.
|
7 |
}
|
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 2,
|
6 |
+
"transformers_version": "4.48.0"
|
7 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 538090408
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5869564fc98e8c817bb3259acee67d509c4161b594a2b57c61ca83f7aab07109
|
3 |
size 538090408
|
tokenizer.json
CHANGED
@@ -1,6 +1,11 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
|
|
|
|
|
|
|
|
|
|
4 |
"padding": null,
|
5 |
"added_tokens": [
|
6 |
{
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": {
|
4 |
+
"direction": "Right",
|
5 |
+
"max_length": 1024,
|
6 |
+
"strategy": "LongestFirst",
|
7 |
+
"stride": 0
|
8 |
+
},
|
9 |
"padding": null,
|
10 |
"added_tokens": [
|
11 |
{
|
tokenizer_config.json
CHANGED
@@ -146,6 +146,7 @@
|
|
146 |
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
147 |
"clean_up_tokenization_spaces": false,
|
148 |
"eos_token": "<|im_end|>",
|
|
|
149 |
"model_max_length": 8192,
|
150 |
"pad_token": "<|im_end|>",
|
151 |
"tokenizer_class": "GPT2Tokenizer",
|
|
|
146 |
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
147 |
"clean_up_tokenization_spaces": false,
|
148 |
"eos_token": "<|im_end|>",
|
149 |
+
"extra_special_tokens": {},
|
150 |
"model_max_length": 8192,
|
151 |
"pad_token": "<|im_end|>",
|
152 |
"tokenizer_class": "GPT2Tokenizer",
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:210c7ede970dcf902a0bd65ba35e79047a790bff78b36a9b3be02b3d40bf78af
|
3 |
+
size 5624
|