Update tokenizer_config.json

#1
by datnguyen - opened
Files changed (1) hide show
  1. tokenizer_config.json +12 -8
tokenizer_config.json CHANGED
@@ -24,6 +24,14 @@
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
 
 
 
 
 
 
 
 
27
  }
28
  },
29
  "additional_special_tokens": [
@@ -31,18 +39,14 @@
31
  "<|im_end|>"
32
  ],
33
  "bos_token": null,
34
- "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
35
  "clean_up_tokenization_spaces": false,
36
  "eos_token": "<|im_end|>",
37
  "errors": "replace",
38
- "max_length": 512,
39
- "model_max_length": 32768,
40
- "pad_token": "<|endoftext|>",
41
- "padding_side": "right",
42
  "split_special_tokens": false,
43
- "stride": 0,
44
  "tokenizer_class": "Qwen2Tokenizer",
45
- "truncation_side": "right",
46
- "truncation_strategy": "longest_first",
47
  "unk_token": null
48
  }
 
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|PAD_TOKEN|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
  }
36
  },
37
  "additional_special_tokens": [
 
39
  "<|im_end|>"
40
  ],
41
  "bos_token": null,
42
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a talented biomedical assistant named MedVi<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
43
  "clean_up_tokenization_spaces": false,
44
  "eos_token": "<|im_end|>",
45
  "errors": "replace",
46
+ "model_max_length": 131072,
47
+ "pad_token": "<|PAD_TOKEN|>",
48
+ "padding_side": "left",
 
49
  "split_special_tokens": false,
 
50
  "tokenizer_class": "Qwen2Tokenizer",
 
 
51
  "unk_token": null
52
  }