Update README.md
Browse files
README.md
CHANGED
@@ -42,22 +42,31 @@ dtype: bfloat16
|
|
42 |
```python
|
43 |
!pip install -qU transformers accelerate
|
44 |
|
45 |
-
from transformers import AutoTokenizer
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
"
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
)
|
|
|
|
|
60 |
|
61 |
-
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
62 |
-
print(outputs[0]["generated_text"])
|
63 |
```
|
|
|
42 |
```python
|
43 |
!pip install -qU transformers accelerate
|
44 |
|
45 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
46 |
+
|
47 |
+
model_id = "shenzhi-wang/Llama3-8B-Chinese-Chat"
|
48 |
+
|
49 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
50 |
+
model = AutoModelForCausalLM.from_pretrained(
|
51 |
+
model_id, torch_dtype="auto", device_map="auto"
|
52 |
+
)
|
53 |
+
|
54 |
+
messages = [
|
55 |
+
{"role": "user", "content": "Tell me about yourself"},
|
56 |
+
]
|
57 |
+
|
58 |
+
input_ids = tokenizer.apply_chat_template(
|
59 |
+
messages, add_generation_prompt=True, return_tensors="pt"
|
60 |
+
).to(model.device)
|
61 |
+
|
62 |
+
outputs = model.generate(
|
63 |
+
input_ids,
|
64 |
+
max_new_tokens=8192,
|
65 |
+
do_sample=True,
|
66 |
+
temperature=0.6,
|
67 |
+
top_p=0.9,
|
68 |
)
|
69 |
+
response = outputs[0][input_ids.shape[-1]:]
|
70 |
+
print(tokenizer.decode(response, skip_special_tokens=True))
|
71 |
|
|
|
|
|
72 |
```
|