Update base_model formatting
Browse files
README.md
CHANGED
@@ -1,12 +1,18 @@
|
|
1 |
---
|
2 |
-
arxiv: 2307.09288
|
3 |
-
base_model: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
|
4 |
-
inference: false
|
5 |
language:
|
6 |
- en
|
7 |
license: llama2
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
9 |
model_name: Llama 2 7B Chat
|
|
|
|
|
|
|
|
|
10 |
model_type: llama
|
11 |
pipeline_tag: text-generation
|
12 |
prompt_template: '[INST] <<SYS>>
|
@@ -24,12 +30,6 @@ prompt_template: '[INST] <<SYS>>
|
|
24 |
|
25 |
'
|
26 |
quantized_by: TheBloke
|
27 |
-
tags:
|
28 |
-
- facebook
|
29 |
-
- meta
|
30 |
-
- pytorch
|
31 |
-
- llama
|
32 |
-
- llama-2
|
33 |
---
|
34 |
|
35 |
<!-- header start -->
|
|
|
1 |
---
|
|
|
|
|
|
|
2 |
language:
|
3 |
- en
|
4 |
license: llama2
|
5 |
+
tags:
|
6 |
+
- facebook
|
7 |
+
- meta
|
8 |
+
- pytorch
|
9 |
+
- llama
|
10 |
+
- llama-2
|
11 |
model_name: Llama 2 7B Chat
|
12 |
+
arxiv: 2307.09288
|
13 |
+
base_model: meta-llama/Llama-2-7b-chat-hf
|
14 |
+
inference: false
|
15 |
+
model_creator: Meta Llama 2
|
16 |
model_type: llama
|
17 |
pipeline_tag: text-generation
|
18 |
prompt_template: '[INST] <<SYS>>
|
|
|
30 |
|
31 |
'
|
32 |
quantized_by: TheBloke
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
---
|
34 |
|
35 |
<!-- header start -->
|