Update README.md
Browse files
README.md
CHANGED
@@ -16,26 +16,88 @@ quantized_by: weiren119
|
|
16 |
<!-- header start -->
|
17 |
<!-- header end -->
|
18 |
|
19 |
-
# Taiwan-LLaMa-v1.0
|
20 |
- Model creator: [Yen-Ting Lin](https://huggingface.co/yentinglin)
|
21 |
- Original model: [Language Models for Taiwanese Culture v1.0](https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0)
|
22 |
|
23 |
## Description
|
24 |
|
25 |
-
This repo contains
|
|
|
26 |
|
27 |
-
|
28 |
-
* [llama.cpp](https://github.com/ggerganov/llama.cpp), commit `e76d630` and later.
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
34 |
|
35 |
-
## Repositories available
|
36 |
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
|
41 |
<!-- footer start -->
|
@@ -205,79 +267,4 @@ The data included in this project were generated using OpenAI's models and are s
|
|
205 |
## Acknowledgements
|
206 |
|
207 |
We thank [Meta LLaMA team](https://github.com/facebookresearch/llama) and [Vicuna team](https://github.com/lm-sys/FastChat) for their open-source efforts in democratizing large language models.
|
208 |
-
---
|
209 |
-
## Intro
|
210 |
|
211 |
-
- The 4bits-GQTQ model was converted from [Taiwan-LLaMa-v1.0 13b](https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0) by the package [auto-gptq](https://github.com/PanQiWei/AutoGPTQ)
|
212 |
-
|
213 |
-
## How to use gptq model pyhton code
|
214 |
-
- Install gptq package: `pip install auto-gptq`
|
215 |
-
- Here is the example code
|
216 |
-
```
|
217 |
-
from transformers import AutoTokenizer,TextStreamer,TextIteratorStreamer
|
218 |
-
from auto_gptq import AutoGPTQForCausalLM
|
219 |
-
|
220 |
-
|
221 |
-
class TaiwanLLaMaGPTQ:
|
222 |
-
def __init__(self, model_dir):
|
223 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=True)
|
224 |
-
self.model = AutoGPTQForCausalLM.from_quantized(model_dir,
|
225 |
-
trust_remote_code=True,
|
226 |
-
use_safetensors=True,
|
227 |
-
device_map="auto",
|
228 |
-
use_triton=False,
|
229 |
-
strict=False)
|
230 |
-
self.chat_history = []
|
231 |
-
self.system_prompt = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
232 |
-
|
233 |
-
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
|
234 |
-
|
235 |
-
self.streamer = TextStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
236 |
-
self.thread_streamer = TextIteratorStreamer(self.tokenizer, skip_special_tokens=True)
|
237 |
-
def get_prompt(self, message: str, chat_history: list[tuple[str, str]]) -> str:
|
238 |
-
texts = [f'[INST] <<SYS>>\n{self.system_prompt}\n<</SYS>>\n\n']
|
239 |
-
for user_input, response in chat_history:
|
240 |
-
texts.append(f'{user_input.strip()} [/INST] {response.strip()} </s><s> [INST] ')
|
241 |
-
texts.append(f'{message.strip()} [/INST]')
|
242 |
-
return ''.join(texts)
|
243 |
-
|
244 |
-
def generate(self, message: str):
|
245 |
-
prompt = self.get_prompt(message, self.chat_history)
|
246 |
-
tokens = self.tokenizer(prompt, return_tensors='pt').input_ids
|
247 |
-
generate_ids = self.model.generate(input_ids=tokens.cuda(), max_new_tokens=4096, streamer=self.streamer)
|
248 |
-
output = self.tokenizer.decode(generate_ids[0, len(tokens[0]):-1]).strip()
|
249 |
-
self.chat_history.append([message, output])
|
250 |
-
return output
|
251 |
-
|
252 |
-
def thread_generate(self, message:str):
|
253 |
-
from threading import Thread
|
254 |
-
prompt = self.get_prompt(message, self.chat_history)
|
255 |
-
inputs = self.tokenizer(prompt, return_tensors="pt")
|
256 |
-
|
257 |
-
generation_kwargs = dict(
|
258 |
-
inputs=inputs.input_ids.cuda(),
|
259 |
-
attention_mask=inputs.attention_mask,
|
260 |
-
temperature=0.1,
|
261 |
-
max_new_tokens=1024,
|
262 |
-
streamer=self.thread_streamer,
|
263 |
-
)
|
264 |
-
|
265 |
-
# Run generation on separate thread to enable response streaming.
|
266 |
-
thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
|
267 |
-
thread.start()
|
268 |
-
for new_text in self.thread_streamer:
|
269 |
-
yield new_text
|
270 |
-
|
271 |
-
thread.join()
|
272 |
-
|
273 |
-
inferencer = TaiwanLLaMaGPTQ("weiren119/Taiwan-LLaMa-v1.0-4bits-GPTQ")
|
274 |
-
|
275 |
-
|
276 |
-
s = ''
|
277 |
-
while True:
|
278 |
-
s = input("User: ")
|
279 |
-
if s != '':
|
280 |
-
print ('Answer:')
|
281 |
-
print (inferencer.generate(s))
|
282 |
-
print ('-'*80)
|
283 |
-
```
|
|
|
16 |
<!-- header start -->
|
17 |
<!-- header end -->
|
18 |
|
19 |
+
# Taiwan-LLaMa-v1.0-GPTQ
|
20 |
- Model creator: [Yen-Ting Lin](https://huggingface.co/yentinglin)
|
21 |
- Original model: [Language Models for Taiwanese Culture v1.0](https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0)
|
22 |
|
23 |
## Description
|
24 |
|
25 |
+
This repo contains GPTQ format model files for [Yen-Ting Lin's Language Models for Taiwanese Culture v1.0](https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0).
|
26 |
+
## Intro
|
27 |
|
28 |
+
- The 4bits-GQTQ model was converted from [Taiwan-LLaMa-v1.0 13b](https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0) by the package [auto-gptq](https://github.com/PanQiWei/AutoGPTQ)
|
|
|
29 |
|
30 |
+
## How to use gptq model pyhton code
|
31 |
+
- Install gptq package: `pip install auto-gptq`
|
32 |
+
- Here is the example code
|
33 |
+
```
|
34 |
+
from transformers import AutoTokenizer,TextStreamer,TextIteratorStreamer
|
35 |
+
from auto_gptq import AutoGPTQForCausalLM
|
36 |
|
|
|
37 |
|
38 |
+
class TaiwanLLaMaGPTQ:
|
39 |
+
def __init__(self, model_dir):
|
40 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=True)
|
41 |
+
self.model = AutoGPTQForCausalLM.from_quantized(model_dir,
|
42 |
+
trust_remote_code=True,
|
43 |
+
use_safetensors=True,
|
44 |
+
device_map="auto",
|
45 |
+
use_triton=False,
|
46 |
+
strict=False)
|
47 |
+
self.chat_history = []
|
48 |
+
self.system_prompt = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
49 |
+
|
50 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
|
51 |
+
|
52 |
+
self.streamer = TextStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
53 |
+
self.thread_streamer = TextIteratorStreamer(self.tokenizer, skip_special_tokens=True)
|
54 |
+
def get_prompt(self, message: str, chat_history: list[tuple[str, str]]) -> str:
|
55 |
+
texts = [f'[INST] <<SYS>>\n{self.system_prompt}\n<</SYS>>\n\n']
|
56 |
+
for user_input, response in chat_history:
|
57 |
+
texts.append(f'{user_input.strip()} [/INST] {response.strip()} </s><s> [INST] ')
|
58 |
+
texts.append(f'{message.strip()} [/INST]')
|
59 |
+
return ''.join(texts)
|
60 |
+
|
61 |
+
def generate(self, message: str):
|
62 |
+
prompt = self.get_prompt(message, self.chat_history)
|
63 |
+
tokens = self.tokenizer(prompt, return_tensors='pt').input_ids
|
64 |
+
generate_ids = self.model.generate(input_ids=tokens.cuda(), max_new_tokens=4096, streamer=self.streamer)
|
65 |
+
output = self.tokenizer.decode(generate_ids[0, len(tokens[0]):-1]).strip()
|
66 |
+
self.chat_history.append([message, output])
|
67 |
+
return output
|
68 |
+
|
69 |
+
def thread_generate(self, message:str):
|
70 |
+
from threading import Thread
|
71 |
+
prompt = self.get_prompt(message, self.chat_history)
|
72 |
+
inputs = self.tokenizer(prompt, return_tensors="pt")
|
73 |
+
|
74 |
+
generation_kwargs = dict(
|
75 |
+
inputs=inputs.input_ids.cuda(),
|
76 |
+
attention_mask=inputs.attention_mask,
|
77 |
+
temperature=0.1,
|
78 |
+
max_new_tokens=1024,
|
79 |
+
streamer=self.thread_streamer,
|
80 |
+
)
|
81 |
+
|
82 |
+
# Run generation on separate thread to enable response streaming.
|
83 |
+
thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
|
84 |
+
thread.start()
|
85 |
+
for new_text in self.thread_streamer:
|
86 |
+
yield new_text
|
87 |
+
|
88 |
+
thread.join()
|
89 |
+
|
90 |
+
inferencer = TaiwanLLaMaGPTQ("weiren119/Taiwan-LLaMa-v1.0-4bits-GPTQ")
|
91 |
+
|
92 |
+
|
93 |
+
s = ''
|
94 |
+
while True:
|
95 |
+
s = input("User: ")
|
96 |
+
if s != '':
|
97 |
+
print ('Answer:')
|
98 |
+
print (inferencer.generate(s))
|
99 |
+
print ('-'*80)
|
100 |
+
```
|
101 |
|
102 |
|
103 |
<!-- footer start -->
|
|
|
267 |
## Acknowledgements
|
268 |
|
269 |
We thank [Meta LLaMA team](https://github.com/facebookresearch/llama) and [Vicuna team](https://github.com/lm-sys/FastChat) for their open-source efforts in democratizing large language models.
|
|
|
|
|
270 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|