pacozaa commited on
Commit
68921ac
1 Parent(s): 7511575

Upload model trained with Unsloth

Browse files

Upload model trained with Unsloth 2x faster

Files changed (4) hide show
  1. README.md +3 -3
  2. special_tokens_map.json +3 -21
  3. tokenizer.json +1 -0
  4. tokenizer_config.json +2 -0
README.md CHANGED
@@ -1,4 +1,7 @@
1
  ---
 
 
 
2
  language:
3
  - en
4
  license: apache-2.0
@@ -12,9 +15,6 @@ tags:
12
  - LoRA Adapter
13
  - PEFT
14
  - ollama
15
- base_model: unsloth/mistral-7b-bnb-4bit
16
- datasets:
17
- - liyucheng/ShareGPT90K
18
  ---
19
 
20
  # Uploaded model
 
1
  ---
2
+ base_model: unsloth/mistral-7b-bnb-4bit
3
+ datasets:
4
+ - liyucheng/ShareGPT90K
5
  language:
6
  - en
7
  license: apache-2.0
 
15
  - LoRA Adapter
16
  - PEFT
17
  - ollama
 
 
 
18
  ---
19
 
20
  # Uploaded model
special_tokens_map.json CHANGED
@@ -1,24 +1,6 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|im_end|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
  "pad_token": "<unk>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "<|im_end|>",
 
 
 
 
 
 
 
 
 
 
 
 
4
  "pad_token": "<unk>",
5
+ "unk_token": "<unk>"
 
 
 
 
 
 
6
  }
tokenizer.json CHANGED
@@ -134,6 +134,7 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
+ "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
tokenizer_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
@@ -31,6 +32,7 @@
31
  "chat_template": "{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "<|im_end|>",
 
34
  "model_max_length": 1000000000000000019884624838656,
35
  "pad_token": "<unk>",
36
  "tokenizer_class": "LlamaTokenizer",
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
32
  "chat_template": "{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "<|im_end|>",
35
+ "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
  "pad_token": "<unk>",
38
  "tokenizer_class": "LlamaTokenizer",