Duplicate from cortexso/llama3
Browse filesCo-authored-by: Jan Team <[email protected]>
- .gitattributes +36 -0
- README.md +38 -0
- model.gguf +3 -0
- model.yml +23 -0
.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
model.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: llama3
|
3 |
+
---
|
4 |
+
|
5 |
+
## Overview
|
6 |
+
|
7 |
+
Meta developed and released the [Meta Llama 3](https://huggingface.co/meta-llama/Meta-Llama-3-8B) family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.
|
8 |
+
|
9 |
+
## Variants
|
10 |
+
|
11 |
+
| No | Variant | Cortex CLI command |
|
12 |
+
| --- | --- | --- |
|
13 |
+
| 1 | [onnx](https://huggingface.co/cortexso/llama3/tree/onnx) | `cortex run llama3:onnx` |
|
14 |
+
| 2 | [gguf](https://huggingface.co/cortexso/llama3/tree/gguf) | `cortex run llama3:gguf` |
|
15 |
+
| 3 | [default](https://huggingface.co/cortexso/llama3/tree/default) | `cortex run llama3` |
|
16 |
+
|
17 |
+
## Use it with Jan (UI)
|
18 |
+
|
19 |
+
1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)
|
20 |
+
2. Use in Jan model Hub:
|
21 |
+
```
|
22 |
+
cortexso/llama3
|
23 |
+
```
|
24 |
+
|
25 |
+
## Use it with Cortex (CLI)
|
26 |
+
|
27 |
+
1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)
|
28 |
+
2. Run the model with command:
|
29 |
+
```
|
30 |
+
cortex run llama3
|
31 |
+
```
|
32 |
+
|
33 |
+
## Credits
|
34 |
+
|
35 |
+
- **Author:** Meta
|
36 |
+
- **Converter:** [Homebrew](https://www.homebrew.ltd/)
|
37 |
+
- **Original License:** [License](https://llama.meta.com/llama3/license/)
|
38 |
+
- **Papers:** [Llama-3 Blog](https://llama.meta.com/llama3/)
|
model.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:848b442759fa041b5b41c7666282c05999817d2340b4d49e5b63005f7912bede
|
3 |
+
size 4920733952
|
model.yml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Llama 3
|
2 |
+
model: llama3:8B
|
3 |
+
version: 1
|
4 |
+
|
5 |
+
# Results Preferences
|
6 |
+
stop:
|
7 |
+
- <|end_of_text|>
|
8 |
+
- <|eot_id|>
|
9 |
+
top_p: 0.95
|
10 |
+
temperature: 0.7
|
11 |
+
frequency_penalty: 0
|
12 |
+
presence_penalty: 0
|
13 |
+
max_tokens: 8192 # Infer from base config.json -> max_position_embeddings
|
14 |
+
stream: true # true | false
|
15 |
+
|
16 |
+
# Engine / Model Settings
|
17 |
+
ngl: 33 # Infer from base config.json -> num_attention_heads
|
18 |
+
ctx_len: 8192 # Infer from base config.json -> max_position_embeddings
|
19 |
+
engine: cortex.llamacpp
|
20 |
+
prompt_template: "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
21 |
+
# Prompt template: Can only be retrieved from instruct model
|
22 |
+
# - https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json#L2053
|
23 |
+
# - Requires jinja format parser
|