morriszms commited on
Commit
1638652
·
verified ·
1 Parent(s): d1f0c62

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ magnum-v3-27b-kto-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ magnum-v3-27b-kto-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ magnum-v3-27b-kto-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ magnum-v3-27b-kto-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ magnum-v3-27b-kto-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ magnum-v3-27b-kto-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ magnum-v3-27b-kto-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ magnum-v3-27b-kto-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ magnum-v3-27b-kto-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ magnum-v3-27b-kto-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ magnum-v3-27b-kto-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ magnum-v3-27b-kto-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ library_name: transformers
4
+ tags:
5
+ - gemma-2
6
+ - TensorBlock
7
+ - GGUF
8
+ base_model: anthracite-org/magnum-v3-27b-kto
9
+ pipeline_tag: text-generation
10
+ model-index:
11
+ - name: magnum-v3-27b-kto
12
+ results:
13
+ - task:
14
+ type: text-generation
15
+ name: Text Generation
16
+ dataset:
17
+ name: IFEval (0-Shot)
18
+ type: HuggingFaceH4/ifeval
19
+ args:
20
+ num_few_shot: 0
21
+ metrics:
22
+ - type: inst_level_strict_acc and prompt_level_strict_acc
23
+ value: 56.75
24
+ name: strict accuracy
25
+ source:
26
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v3-27b-kto
27
+ name: Open LLM Leaderboard
28
+ - task:
29
+ type: text-generation
30
+ name: Text Generation
31
+ dataset:
32
+ name: BBH (3-Shot)
33
+ type: BBH
34
+ args:
35
+ num_few_shot: 3
36
+ metrics:
37
+ - type: acc_norm
38
+ value: 41.16
39
+ name: normalized accuracy
40
+ source:
41
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v3-27b-kto
42
+ name: Open LLM Leaderboard
43
+ - task:
44
+ type: text-generation
45
+ name: Text Generation
46
+ dataset:
47
+ name: MATH Lvl 5 (4-Shot)
48
+ type: hendrycks/competition_math
49
+ args:
50
+ num_few_shot: 4
51
+ metrics:
52
+ - type: exact_match
53
+ value: 15.48
54
+ name: exact match
55
+ source:
56
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v3-27b-kto
57
+ name: Open LLM Leaderboard
58
+ - task:
59
+ type: text-generation
60
+ name: Text Generation
61
+ dataset:
62
+ name: GPQA (0-shot)
63
+ type: Idavidrein/gpqa
64
+ args:
65
+ num_few_shot: 0
66
+ metrics:
67
+ - type: acc_norm
68
+ value: 14.09
69
+ name: acc_norm
70
+ source:
71
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v3-27b-kto
72
+ name: Open LLM Leaderboard
73
+ - task:
74
+ type: text-generation
75
+ name: Text Generation
76
+ dataset:
77
+ name: MuSR (0-shot)
78
+ type: TAUR-Lab/MuSR
79
+ args:
80
+ num_few_shot: 0
81
+ metrics:
82
+ - type: acc_norm
83
+ value: 9.92
84
+ name: acc_norm
85
+ source:
86
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v3-27b-kto
87
+ name: Open LLM Leaderboard
88
+ - task:
89
+ type: text-generation
90
+ name: Text Generation
91
+ dataset:
92
+ name: MMLU-PRO (5-shot)
93
+ type: TIGER-Lab/MMLU-Pro
94
+ config: main
95
+ split: test
96
+ args:
97
+ num_few_shot: 5
98
+ metrics:
99
+ - type: acc
100
+ value: 35.98
101
+ name: accuracy
102
+ source:
103
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v3-27b-kto
104
+ name: Open LLM Leaderboard
105
+ ---
106
+
107
+ <div style="width: auto; margin-left: auto; margin-right: auto">
108
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
109
+ </div>
110
+ <div style="display: flex; justify-content: space-between; width: 100%;">
111
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
112
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
113
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
114
+ </p>
115
+ </div>
116
+ </div>
117
+
118
+ ## anthracite-org/magnum-v3-27b-kto - GGUF
119
+
120
+ This repo contains GGUF format model files for [anthracite-org/magnum-v3-27b-kto](https://huggingface.co/anthracite-org/magnum-v3-27b-kto).
121
+
122
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
123
+
124
+ ## Prompt template
125
+
126
+ ```
127
+ <|im_start|>system
128
+ {system_prompt}<|im_end|>
129
+ <|im_start|>user
130
+ {prompt}<|im_end|>
131
+ <|im_start|>assistant
132
+ ```
133
+
134
+ ## Model file specification
135
+
136
+ | Filename | Quant type | File Size | Description |
137
+ | -------- | ---------- | --------- | ----------- |
138
+ | [magnum-v3-27b-kto-Q2_K.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q2_K.gguf) | Q2_K | 9.732 GB | smallest, significant quality loss - not recommended for most purposes |
139
+ | [magnum-v3-27b-kto-Q3_K_S.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q3_K_S.gguf) | Q3_K_S | 11.333 GB | very small, high quality loss |
140
+ | [magnum-v3-27b-kto-Q3_K_M.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q3_K_M.gguf) | Q3_K_M | 12.503 GB | very small, high quality loss |
141
+ | [magnum-v3-27b-kto-Q3_K_L.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q3_K_L.gguf) | Q3_K_L | 13.522 GB | small, substantial quality loss |
142
+ | [magnum-v3-27b-kto-Q4_0.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q4_0.gguf) | Q4_0 | 14.555 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
143
+ | [magnum-v3-27b-kto-Q4_K_S.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q4_K_S.gguf) | Q4_K_S | 14.658 GB | small, greater quality loss |
144
+ | [magnum-v3-27b-kto-Q4_K_M.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q4_K_M.gguf) | Q4_K_M | 15.502 GB | medium, balanced quality - recommended |
145
+ | [magnum-v3-27b-kto-Q5_0.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q5_0.gguf) | Q5_0 | 17.587 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
146
+ | [magnum-v3-27b-kto-Q5_K_S.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q5_K_S.gguf) | Q5_K_S | 17.587 GB | large, low quality loss - recommended |
147
+ | [magnum-v3-27b-kto-Q5_K_M.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q5_K_M.gguf) | Q5_K_M | 18.075 GB | large, very low quality loss - recommended |
148
+ | [magnum-v3-27b-kto-Q6_K.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q6_K.gguf) | Q6_K | 20.809 GB | very large, extremely low quality loss |
149
+ | [magnum-v3-27b-kto-Q8_0.gguf](https://huggingface.co/tensorblock/magnum-v3-27b-kto-GGUF/tree/main/magnum-v3-27b-kto-Q8_0.gguf) | Q8_0 | 26.950 GB | very large, extremely low quality loss - not recommended |
150
+
151
+
152
+ ## Downloading instruction
153
+
154
+ ### Command line
155
+
156
+ Firstly, install Huggingface Client
157
+
158
+ ```shell
159
+ pip install -U "huggingface_hub[cli]"
160
+ ```
161
+
162
+ Then, downoad the individual model file the a local directory
163
+
164
+ ```shell
165
+ huggingface-cli download tensorblock/magnum-v3-27b-kto-GGUF --include "magnum-v3-27b-kto-Q2_K.gguf" --local-dir MY_LOCAL_DIR
166
+ ```
167
+
168
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
169
+
170
+ ```shell
171
+ huggingface-cli download tensorblock/magnum-v3-27b-kto-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
172
+ ```
magnum-v3-27b-kto-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46664de7287a37d2aa702661c4f08d3a2acdb936a3e08998eac0443a3726202f
3
+ size 10449576192
magnum-v3-27b-kto-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7620b187809cdd8b231fbb049ae57647292b8808a5fe35efaf4ddfde0211fff4
3
+ size 14519361792
magnum-v3-27b-kto-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e8b85976107db51b7b6498c0ce5cfdd188c99d4d2ce9d31596f01243fef043
3
+ size 13424648448
magnum-v3-27b-kto-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:676d6d89ca02099756c61205f21ec51fd3cbcf2b511ae92430cb885e68a562fa
3
+ size 12169060608
magnum-v3-27b-kto-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfb849ff1c3c7e60cdf687c4a88c29fdc1cdd8823ed6862d42708198aceaadea
3
+ size 15628378368
magnum-v3-27b-kto-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d63cab4841f973c2b0133e4c6fde19aabd1e976998f04eae8cf9854d8af3fe
3
+ size 16645382400
magnum-v3-27b-kto-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d2ca2a92d99e7626736dd7674859b5a94fc1ff4c8a0e7ac9da07ec0baf9f439
3
+ size 15739265280
magnum-v3-27b-kto-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be06c92a5441617374bc6c52b1f9b237f7ac453e809d8c9aa6ff489edf869276
3
+ size 18884206848
magnum-v3-27b-kto-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac0252bcd9df0f4a033fb0e895da9925c64218ea4115a3aa645d6b827cb7e060
3
+ size 19408118016
magnum-v3-27b-kto-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab71dbc811f3ee5edabdcbf27c0a5a58e0d9bcc7fc64a5d491702bc2e5f63c58
3
+ size 18884206848
magnum-v3-27b-kto-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b6e0dacc5b2a5e74f72c3071d2b13de7b5f35aafb24470eae59d3f327f0e92b
3
+ size 22343524608
magnum-v3-27b-kto-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096d9819ada2db2137cc98914ab47cf83e76de281d89e7e83b9fc044db9973d0
3
+ size 28937388288