Update model.py
Browse files
model.py
CHANGED
@@ -3,7 +3,7 @@ from typing import Iterator
|
|
3 |
|
4 |
|
5 |
|
6 |
-
model_id = '
|
7 |
|
8 |
from huggingface_hub import snapshot_download,hf_hub_download
|
9 |
#旧
|
@@ -11,7 +11,7 @@ from huggingface_hub import snapshot_download,hf_hub_download
|
|
11 |
snapshot_download(model_id, local_dir="./",revision="b2414a0ceee68fe09c99ace44446cfc9a1c52b08")
|
12 |
hf_hub_download(repo_id="baichuan-inc/Baichuan-13B-Chat",local_dir="./", filename="tokenizer.model")
|
13 |
from llama_cpp import Llama
|
14 |
-
llm = Llama(model_path="./
|
15 |
|
16 |
def run(message: str,
|
17 |
chat_history: list[tuple[str, str]],
|
|
|
3 |
|
4 |
|
5 |
|
6 |
+
model_id = 'shaowenchen/baichuan2-7b-base-gguf'
|
7 |
|
8 |
from huggingface_hub import snapshot_download,hf_hub_download
|
9 |
#旧
|
|
|
11 |
snapshot_download(model_id, local_dir="./",revision="b2414a0ceee68fe09c99ace44446cfc9a1c52b08")
|
12 |
hf_hub_download(repo_id="baichuan-inc/Baichuan-13B-Chat",local_dir="./", filename="tokenizer.model")
|
13 |
from llama_cpp import Llama
|
14 |
+
llm = Llama(model_path="./baichuan2-7b-base.Q3_K_L.gguf", n_ctx=4096,seed=-1)
|
15 |
|
16 |
def run(message: str,
|
17 |
chat_history: list[tuple[str, str]],
|