Spaces:
Runtime error
Runtime error
Cran-May
commited on
Commit
•
3cc267d
0
Parent(s):
Duplicate from Cran-May/yugang-Baichuan-13B-Int4
Browse files- .gitattributes +35 -0
- README.md +15 -0
- app.py +250 -0
- llama_cpp_python-0.1.73-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl +0 -0
- model.py +34 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Baichuan 13B Chat
|
3 |
+
emoji: 💻
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.38.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
models:
|
11 |
+
- baichuan-inc/Baichuan-13B-Chat
|
12 |
+
duplicated_from: Cran-May/yugang-Baichuan-13B-Int4
|
13 |
+
---
|
14 |
+
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Iterator
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
|
6 |
+
from model import run
|
7 |
+
|
8 |
+
DEFAULT_SYSTEM_PROMPT = ""
|
9 |
+
MAX_MAX_NEW_TOKENS = 2048
|
10 |
+
DEFAULT_MAX_NEW_TOKENS = 1024
|
11 |
+
MAX_INPUT_TOKEN_LENGTH = 4000
|
12 |
+
|
13 |
+
DESCRIPTION = """
|
14 |
+
# Baichuan-13B-Chat
|
15 |
+
"""
|
16 |
+
LICENSE=""
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
def clear_and_save_textbox(message: str) -> tuple[str, str]:
|
21 |
+
return '', message
|
22 |
+
|
23 |
+
|
24 |
+
def display_input(message: str,
|
25 |
+
history: list[tuple[str, str]]) -> list[tuple[str, str]]:
|
26 |
+
history.append((message, ''))
|
27 |
+
return history
|
28 |
+
|
29 |
+
|
30 |
+
def delete_prev_fn(
|
31 |
+
history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
|
32 |
+
try:
|
33 |
+
message, _ = history.pop()
|
34 |
+
except IndexError:
|
35 |
+
message = ''
|
36 |
+
return history, message or ''
|
37 |
+
|
38 |
+
|
39 |
+
def generate(
|
40 |
+
message: str,
|
41 |
+
history_with_input: list[tuple[str, str]],
|
42 |
+
system_prompt: str,
|
43 |
+
max_new_tokens: int,
|
44 |
+
temperature: float,
|
45 |
+
top_p: float,
|
46 |
+
top_k: int,
|
47 |
+
) -> Iterator[list[tuple[str, str]]]:
|
48 |
+
|
49 |
+
history = history_with_input[:-1]
|
50 |
+
generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
|
51 |
+
for response in generator:
|
52 |
+
yield history + [(message, response)]
|
53 |
+
|
54 |
+
|
55 |
+
def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
|
56 |
+
generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 8192, 1, 0.95, 50)
|
57 |
+
for x in generator:
|
58 |
+
pass
|
59 |
+
return '', x
|
60 |
+
|
61 |
+
|
62 |
+
def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
|
63 |
+
a = 1
|
64 |
+
|
65 |
+
|
66 |
+
with gr.Blocks(css='style.css') as demo:
|
67 |
+
gr.Markdown(DESCRIPTION)
|
68 |
+
gr.DuplicateButton(value='Duplicate Space for private use',
|
69 |
+
elem_id='duplicate-button')
|
70 |
+
|
71 |
+
with gr.Group():
|
72 |
+
chatbot = gr.Chatbot(label='Chatbot')
|
73 |
+
with gr.Row():
|
74 |
+
textbox = gr.Textbox(
|
75 |
+
container=False,
|
76 |
+
show_label=False,
|
77 |
+
placeholder='Type a message...',
|
78 |
+
scale=10,
|
79 |
+
)
|
80 |
+
submit_button = gr.Button('Submit',
|
81 |
+
variant='primary',
|
82 |
+
scale=1,
|
83 |
+
min_width=0)
|
84 |
+
with gr.Row():
|
85 |
+
retry_button = gr.Button('🔄 Retry', variant='secondary')
|
86 |
+
undo_button = gr.Button('↩️ Undo', variant='secondary')
|
87 |
+
clear_button = gr.Button('🗑️ Clear', variant='secondary')
|
88 |
+
|
89 |
+
saved_input = gr.State()
|
90 |
+
|
91 |
+
with gr.Accordion(label='Advanced options', open=False):
|
92 |
+
system_prompt = gr.Textbox(label='System prompt',
|
93 |
+
value=DEFAULT_SYSTEM_PROMPT,
|
94 |
+
lines=6)
|
95 |
+
max_new_tokens = gr.Slider(
|
96 |
+
label='Max new tokens',
|
97 |
+
minimum=1,
|
98 |
+
maximum=MAX_MAX_NEW_TOKENS,
|
99 |
+
step=1,
|
100 |
+
value=DEFAULT_MAX_NEW_TOKENS,
|
101 |
+
)
|
102 |
+
temperature = gr.Slider(
|
103 |
+
label='Temperature',
|
104 |
+
minimum=0.1,
|
105 |
+
maximum=4.0,
|
106 |
+
step=0.1,
|
107 |
+
value=0.3,
|
108 |
+
)
|
109 |
+
top_p = gr.Slider(
|
110 |
+
label='Top-p (nucleus sampling)',
|
111 |
+
minimum=0.05,
|
112 |
+
maximum=1.0,
|
113 |
+
step=0.05,
|
114 |
+
value=0.85,
|
115 |
+
)
|
116 |
+
top_k = gr.Slider(
|
117 |
+
label='Top-k',
|
118 |
+
minimum=1,
|
119 |
+
maximum=1000,
|
120 |
+
step=1,
|
121 |
+
value=5,
|
122 |
+
)
|
123 |
+
|
124 |
+
gr.Examples(
|
125 |
+
examples=[
|
126 |
+
'世界上第二高的山峰是哪座',
|
127 |
+
|
128 |
+
],
|
129 |
+
inputs=textbox,
|
130 |
+
outputs=[textbox, chatbot],
|
131 |
+
fn=process_example,
|
132 |
+
cache_examples=True,
|
133 |
+
)
|
134 |
+
|
135 |
+
gr.Markdown(LICENSE)
|
136 |
+
|
137 |
+
textbox.submit(
|
138 |
+
fn=clear_and_save_textbox,
|
139 |
+
inputs=textbox,
|
140 |
+
outputs=[textbox, saved_input],
|
141 |
+
api_name=False,
|
142 |
+
queue=False,
|
143 |
+
).then(
|
144 |
+
fn=display_input,
|
145 |
+
inputs=[saved_input, chatbot],
|
146 |
+
outputs=chatbot,
|
147 |
+
api_name=False,
|
148 |
+
queue=False,
|
149 |
+
).then(
|
150 |
+
fn=check_input_token_length,
|
151 |
+
inputs=[saved_input, chatbot, system_prompt],
|
152 |
+
api_name=False,
|
153 |
+
queue=False,
|
154 |
+
).success(
|
155 |
+
fn=generate,
|
156 |
+
inputs=[
|
157 |
+
saved_input,
|
158 |
+
chatbot,
|
159 |
+
system_prompt,
|
160 |
+
max_new_tokens,
|
161 |
+
temperature,
|
162 |
+
top_p,
|
163 |
+
top_k,
|
164 |
+
],
|
165 |
+
outputs=chatbot,
|
166 |
+
api_name=False,
|
167 |
+
)
|
168 |
+
|
169 |
+
button_event_preprocess = submit_button.click(
|
170 |
+
fn=clear_and_save_textbox,
|
171 |
+
inputs=textbox,
|
172 |
+
outputs=[textbox, saved_input],
|
173 |
+
api_name=False,
|
174 |
+
queue=False,
|
175 |
+
).then(
|
176 |
+
fn=display_input,
|
177 |
+
inputs=[saved_input, chatbot],
|
178 |
+
outputs=chatbot,
|
179 |
+
api_name=False,
|
180 |
+
queue=False,
|
181 |
+
).then(
|
182 |
+
fn=check_input_token_length,
|
183 |
+
inputs=[saved_input, chatbot, system_prompt],
|
184 |
+
api_name=False,
|
185 |
+
queue=False,
|
186 |
+
).success(
|
187 |
+
fn=generate,
|
188 |
+
inputs=[
|
189 |
+
saved_input,
|
190 |
+
chatbot,
|
191 |
+
system_prompt,
|
192 |
+
max_new_tokens,
|
193 |
+
temperature,
|
194 |
+
top_p,
|
195 |
+
top_k,
|
196 |
+
],
|
197 |
+
outputs=chatbot,
|
198 |
+
api_name=False,
|
199 |
+
)
|
200 |
+
|
201 |
+
retry_button.click(
|
202 |
+
fn=delete_prev_fn,
|
203 |
+
inputs=chatbot,
|
204 |
+
outputs=[chatbot, saved_input],
|
205 |
+
api_name=False,
|
206 |
+
queue=False,
|
207 |
+
).then(
|
208 |
+
fn=display_input,
|
209 |
+
inputs=[saved_input, chatbot],
|
210 |
+
outputs=chatbot,
|
211 |
+
api_name=False,
|
212 |
+
queue=False,
|
213 |
+
).then(
|
214 |
+
fn=generate,
|
215 |
+
inputs=[
|
216 |
+
saved_input,
|
217 |
+
chatbot,
|
218 |
+
system_prompt,
|
219 |
+
max_new_tokens,
|
220 |
+
temperature,
|
221 |
+
top_p,
|
222 |
+
top_k,
|
223 |
+
],
|
224 |
+
outputs=chatbot,
|
225 |
+
api_name=False,
|
226 |
+
)
|
227 |
+
|
228 |
+
undo_button.click(
|
229 |
+
|
230 |
+
fn=delete_prev_fn,
|
231 |
+
inputs=chatbot,
|
232 |
+
outputs=[chatbot, saved_input],
|
233 |
+
api_name=False,
|
234 |
+
queue=False,
|
235 |
+
).then(
|
236 |
+
fn=lambda x: x,
|
237 |
+
inputs=[saved_input],
|
238 |
+
outputs=textbox,
|
239 |
+
api_name=False,
|
240 |
+
queue=False,
|
241 |
+
)
|
242 |
+
|
243 |
+
clear_button.click(
|
244 |
+
fn=lambda: ([], ''),
|
245 |
+
outputs=[chatbot, saved_input],
|
246 |
+
queue=False,
|
247 |
+
api_name=False,
|
248 |
+
)
|
249 |
+
|
250 |
+
demo.queue(max_size=20).launch()
|
llama_cpp_python-0.1.73-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
ADDED
Binary file (287 kB). View file
|
|
model.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from typing import Iterator
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
model_id = 'xuqinyang/baichuan-13b-chat-ggml-int4'
|
7 |
+
|
8 |
+
from huggingface_hub import snapshot_download,hf_hub_download
|
9 |
+
#旧
|
10 |
+
#snapshot_download(model_id, local_dir="./",revision="7f71a8abefa7b2eede3f74ce0564abe5fbe6874a")
|
11 |
+
snapshot_download(model_id, local_dir="./",revision="b2414a0ceee68fe09c99ace44446cfc9a1c52b08")
|
12 |
+
hf_hub_download(repo_id="baichuan-inc/Baichuan-13B-Chat",local_dir="./", filename="tokenizer.model")
|
13 |
+
from llama_cpp import Llama
|
14 |
+
llm = Llama(model_path="./ggml-model-q4_0.bin", n_ctx=4096,seed=-1)
|
15 |
+
|
16 |
+
def run(message: str,
|
17 |
+
chat_history: list[tuple[str, str]],
|
18 |
+
system_prompt: str,
|
19 |
+
max_new_tokens: int = 1024,
|
20 |
+
temperature: float = 0.3,
|
21 |
+
top_p: float = 0.85,
|
22 |
+
top_k: int = 5) -> Iterator[str]:
|
23 |
+
history = []
|
24 |
+
print(chat_history)
|
25 |
+
result=""
|
26 |
+
for i in chat_history:
|
27 |
+
history.append({"role": "user", "content": i[0]})
|
28 |
+
history.append({"role": "assistant", "content": i[1]})
|
29 |
+
print(history)
|
30 |
+
history.append({"role": "user", "content": message})
|
31 |
+
for response in llm.create_chat_completion(history,stop=["</s>"],stream=True,max_tokens=-1,temperature=temperature,top_k=top_k,top_p=top_p,repeat_penalty=1.1):
|
32 |
+
if "content" in response["choices"][0]["delta"]:
|
33 |
+
result = result + response["choices"][0]["delta"]["content"]
|
34 |
+
yield result
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
https://github.com/xqy2006/baichuan13b-cpp-python/releases/download/1.0.6/llama_cpp_python-0.1.73-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
|
2 |
+
huggingface_hub
|