Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,22 @@
|
|
1 |
-
from transformers import AutoModelForCausalLM,
|
2 |
from tokenization_yi import YiTokenizer
|
3 |
import torch
|
4 |
import os
|
5 |
import gradio as gr
|
6 |
import sentencepiece
|
7 |
|
8 |
-
model_id = "
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
|
22 |
prompt = get_prompt(message, chat_history)
|
23 |
input_ids = tokenizer.encode(prompt, return_tensors='pt')
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, GPTQConfig
|
2 |
from tokenization_yi import YiTokenizer
|
3 |
import torch
|
4 |
import os
|
5 |
import gradio as gr
|
6 |
import sentencepiece
|
7 |
|
8 |
+
model_id = "TheBloke/Yi-34B-200K-Llamafied-GPTQ"
|
9 |
+
|
10 |
+
gptq_config = GPTQConfig(
|
11 |
+
bits=4,
|
12 |
+
exllama_config={"version": 2}
|
13 |
+
)
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yi-34B-200K-Llamafied-GPTQ")
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(
|
16 |
+
model_id,
|
17 |
+
device_map="auto",
|
18 |
+
quantization_config=gptq_config
|
19 |
+
)
|
|
|
20 |
def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
|
21 |
prompt = get_prompt(message, chat_history)
|
22 |
input_ids = tokenizer.encode(prompt, return_tensors='pt')
|