Upload tokenizer
Browse files- tokenizer.json +2 -2
- tokenizer_config.json +0 -1
tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21a68e564414e6bcedce28fdd835307053ec0080f2bcd0993f95753889dce3b3
|
3 |
+
size 34356481
|
tokenizer_config.json
CHANGED
@@ -1744,7 +1744,6 @@
|
|
1744 |
"<end_of_turn>"
|
1745 |
],
|
1746 |
"bos_token": "<bos>",
|
1747 |
-
"chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
|
1748 |
"clean_up_tokenization_spaces": false,
|
1749 |
"eos_token": "<eos>",
|
1750 |
"model_max_length": 1000000000000000019884624838656,
|
|
|
1744 |
"<end_of_turn>"
|
1745 |
],
|
1746 |
"bos_token": "<bos>",
|
|
|
1747 |
"clean_up_tokenization_spaces": false,
|
1748 |
"eos_token": "<eos>",
|
1749 |
"model_max_length": 1000000000000000019884624838656,
|