Update README.md
Browse files
README.md
CHANGED
@@ -8,21 +8,7 @@ datasets:
|
|
8 |
```python
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
10 |
|
11 |
-
|
12 |
-
json.dump({
|
13 |
-
"pad_token_id": 0,
|
14 |
-
"bos_token_id": 1,
|
15 |
-
"eos_token_id": 2,
|
16 |
-
"temperature": 0.3,
|
17 |
-
"top_p": 0.9,
|
18 |
-
"top_k": 50,
|
19 |
-
"do_sample": True,
|
20 |
-
"max_new_tokens": 1536,
|
21 |
-
"repetition_penalty": 1.1,
|
22 |
-
"no_repeat_ngram_size": 15,
|
23 |
-
}, fp, indent=4)
|
24 |
-
|
25 |
-
MODEL_NAME = "Vikhrmodels/Vikhr_instruct"
|
26 |
TEMPLATE = "<s>{role}\n{content}</s>\n"
|
27 |
SYSTEM_PROMPT = "Ты – полезный помощник по имени Вихрь. Ты разговариваешь с людьми и помогаешь им."
|
28 |
|
@@ -31,7 +17,7 @@ model.to('cuda')
|
|
31 |
model.eval()
|
32 |
|
33 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
|
34 |
-
generation_config = GenerationConfig.from_pretrained(
|
35 |
|
36 |
class Conversation:
|
37 |
def __init__(
|
|
|
8 |
```python
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
10 |
|
11 |
+
MODEL_NAME = "Vikhrmodels/Vikhr-7B-instruct"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
TEMPLATE = "<s>{role}\n{content}</s>\n"
|
13 |
SYSTEM_PROMPT = "Ты – полезный помощник по имени Вихрь. Ты разговариваешь с людьми и помогаешь им."
|
14 |
|
|
|
17 |
model.eval()
|
18 |
|
19 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
|
20 |
+
generation_config = GenerationConfig.from_pretrained(MODEL_NAME)
|
21 |
|
22 |
class Conversation:
|
23 |
def __init__(
|