picocreator
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -5,6 +5,7 @@
|
|
5 |
**! Important Note !**
|
6 |
|
7 |
The following is the HF transformers implementation of the RWKV-5 Eagle 7B model. And is meant to be used **only with huggingface transformers**
|
|
|
8 |
For the full model weights on its own, for use with other RWKV libraries, refer to [here](https://huggingface.co/RWKV/v5-Eagle-7B)
|
9 |
|
10 |
#### Running on CPU via HF transformers
|
@@ -32,8 +33,8 @@ User: {instruction}
|
|
32 |
Assistant:"""
|
33 |
|
34 |
|
35 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/
|
36 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/
|
37 |
|
38 |
text = "请介绍北京的旅游景点"
|
39 |
prompt = generate_prompt(text)
|
@@ -88,8 +89,8 @@ User: {instruction}
|
|
88 |
Assistant:"""
|
89 |
|
90 |
|
91 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/
|
92 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/
|
93 |
|
94 |
text = "介绍一下大熊猫"
|
95 |
prompt = generate_prompt(text)
|
@@ -135,8 +136,8 @@ User: {instruction}
|
|
135 |
|
136 |
Assistant:"""
|
137 |
|
138 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/
|
139 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/
|
140 |
|
141 |
texts = ["请介绍北京的旅游景点", "介绍一下大熊猫", "乌兰察布"]
|
142 |
prompts = [generate_prompt(text) for text in texts]
|
|
|
5 |
**! Important Note !**
|
6 |
|
7 |
The following is the HF transformers implementation of the RWKV-5 Eagle 7B model. And is meant to be used **only with huggingface transformers**
|
8 |
+
|
9 |
For the full model weights on its own, for use with other RWKV libraries, refer to [here](https://huggingface.co/RWKV/v5-Eagle-7B)
|
10 |
|
11 |
#### Running on CPU via HF transformers
|
|
|
33 |
Assistant:"""
|
34 |
|
35 |
|
36 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/HF_v5-Eagle-7B", trust_remote_code=True).to(torch.float32)
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/HF_v5-Eagle-7B", trust_remote_code=True)
|
38 |
|
39 |
text = "请介绍北京的旅游景点"
|
40 |
prompt = generate_prompt(text)
|
|
|
89 |
Assistant:"""
|
90 |
|
91 |
|
92 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/HF_v5-Eagle-7B", trust_remote_code=True, torch_dtype=torch.float16).to(0)
|
93 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/HF_v5-Eagle-7B", trust_remote_code=True)
|
94 |
|
95 |
text = "介绍一下大熊猫"
|
96 |
prompt = generate_prompt(text)
|
|
|
136 |
|
137 |
Assistant:"""
|
138 |
|
139 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/HF_v5-Eagle-7B", trust_remote_code=True).to(torch.float32)
|
140 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/HF_v5-Eagle-7B", trust_remote_code=True)
|
141 |
|
142 |
texts = ["请介绍北京的旅游景点", "介绍一下大熊猫", "乌兰察布"]
|
143 |
prompts = [generate_prompt(text) for text in texts]
|