Upload folder using huggingface_hub
Browse files- .gitattributes +4 -0
- MixTAO-7Bx2-MoE-v8.1.f16.gguf +3 -0
- MixTAO-7Bx2-MoE-v8.1.q5_k.gguf +3 -0
- MixTAO-7Bx2-MoE-v8.1.q6_k.gguf +3 -0
- MixTAO-7Bx2-MoE-v8.1.q8_0.gguf +3 -0
- README.md +14 -0
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
MixTAO-7Bx2-MoE-v8.1.f16.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
MixTAO-7Bx2-MoE-v8.1.q5_k.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
MixTAO-7Bx2-MoE-v8.1.q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
MixTAO-7Bx2-MoE-v8.1.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
MixTAO-7Bx2-MoE-v8.1.f16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3d1fb572f786afd67dbb59f8a7741156f587560bfe4d4ab75aa0681442660ba
|
3 |
+
size 25760072928
|
MixTAO-7Bx2-MoE-v8.1.q5_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d02d6debf23e0372dee1244797eec60dd65f0dc524fabd30cfc9c368044edecc
|
3 |
+
size 9459434720
|
MixTAO-7Bx2-MoE-v8.1.q6_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b865a5b4cfe5a5e9caafb15c669cb68830f75f437ac18b1a7df8d8849acc028b
|
3 |
+
size 10876585184
|
MixTAO-7Bx2-MoE-v8.1.q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4cde692a6e35bc4ed5249d550f947cc040543577071adf341ed7a24161dcc041
|
3 |
+
size 13932135648
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
license: mit
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
---
|
7 |
+
|
8 |
+
My own (ZeroWw) quantizations.
|
9 |
+
output and embed tensors quantized to f16.
|
10 |
+
all other tensors quantized to q5_k or q6_k.
|
11 |
+
|
12 |
+
Result:
|
13 |
+
both f16.q6 and f16.q5 are smaller than q8_0 standard quantization
|
14 |
+
and they perform as well as the pure f16.
|