xuqinyang commited on
Commit
5a175b0
0 Parent(s):

Duplicate from xqy2006/Baichuan-13B-Chat

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Baichuan 13B Chat
3
+ emoji: 💻
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.38.0
8
+ app_file: app.py
9
+ pinned: false
10
+ models:
11
+ - baichuan-inc/Baichuan-13B-Chat
12
+ duplicated_from: xqy2006/Baichuan-13B-Chat
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator
2
+
3
+ import gradio as gr
4
+
5
+
6
+ from model import run
7
+
8
+ DEFAULT_SYSTEM_PROMPT = ""
9
+ MAX_MAX_NEW_TOKENS = 2048
10
+ DEFAULT_MAX_NEW_TOKENS = 1024
11
+ MAX_INPUT_TOKEN_LENGTH = 4000
12
+
13
+ DESCRIPTION = """
14
+ # Baichuan-13B-Chat
15
+ """
16
+ LICENSE=""
17
+
18
+
19
+
20
+ def clear_and_save_textbox(message: str) -> tuple[str, str]:
21
+ return '', message
22
+
23
+
24
+ def display_input(message: str,
25
+ history: list[tuple[str, str]]) -> list[tuple[str, str]]:
26
+ history.append((message, ''))
27
+ return history
28
+
29
+
30
+ def delete_prev_fn(
31
+ history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
32
+ try:
33
+ message, _ = history.pop()
34
+ except IndexError:
35
+ message = ''
36
+ return history, message or ''
37
+
38
+
39
+ def generate(
40
+ message: str,
41
+ history_with_input: list[tuple[str, str]],
42
+ system_prompt: str,
43
+ max_new_tokens: int,
44
+ temperature: float,
45
+ top_p: float,
46
+ top_k: int,
47
+ ) -> Iterator[list[tuple[str, str]]]:
48
+
49
+ history = history_with_input[:-1]
50
+ generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
51
+ for response in generator:
52
+ yield history + [(message, response)]
53
+
54
+
55
+ def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
56
+ generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 8192, 1, 0.95, 50)
57
+ for x in generator:
58
+ pass
59
+ return '', x
60
+
61
+
62
+ def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
63
+ a = 1
64
+
65
+
66
+ with gr.Blocks(css='style.css') as demo:
67
+ gr.Markdown(DESCRIPTION)
68
+ gr.DuplicateButton(value='Duplicate Space for private use',
69
+ elem_id='duplicate-button')
70
+
71
+ with gr.Group():
72
+ chatbot = gr.Chatbot(label='Chatbot')
73
+ with gr.Row():
74
+ textbox = gr.Textbox(
75
+ container=False,
76
+ show_label=False,
77
+ placeholder='Type a message...',
78
+ scale=10,
79
+ )
80
+ submit_button = gr.Button('Submit',
81
+ variant='primary',
82
+ scale=1,
83
+ min_width=0)
84
+ with gr.Row():
85
+ retry_button = gr.Button('🔄 Retry', variant='secondary')
86
+ undo_button = gr.Button('↩️ Undo', variant='secondary')
87
+ clear_button = gr.Button('🗑️ Clear', variant='secondary')
88
+
89
+ saved_input = gr.State()
90
+
91
+ with gr.Accordion(label='Advanced options', open=False):
92
+ system_prompt = gr.Textbox(label='System prompt',
93
+ value=DEFAULT_SYSTEM_PROMPT,
94
+ lines=6)
95
+ max_new_tokens = gr.Slider(
96
+ label='Max new tokens',
97
+ minimum=1,
98
+ maximum=MAX_MAX_NEW_TOKENS,
99
+ step=1,
100
+ value=DEFAULT_MAX_NEW_TOKENS,
101
+ )
102
+ temperature = gr.Slider(
103
+ label='Temperature',
104
+ minimum=0.1,
105
+ maximum=4.0,
106
+ step=0.1,
107
+ value=0.3,
108
+ )
109
+ top_p = gr.Slider(
110
+ label='Top-p (nucleus sampling)',
111
+ minimum=0.05,
112
+ maximum=1.0,
113
+ step=0.05,
114
+ value=0.85,
115
+ )
116
+ top_k = gr.Slider(
117
+ label='Top-k',
118
+ minimum=1,
119
+ maximum=1000,
120
+ step=1,
121
+ value=5,
122
+ )
123
+
124
+ gr.Examples(
125
+ examples=[
126
+ '用中文回答,When is the best time to visit Beijing, and do you have any suggestions for me?',
127
+ '用中文回答,特朗普是谁?',
128
+
129
+ ],
130
+ inputs=textbox,
131
+ outputs=[textbox, chatbot],
132
+ fn=process_example,
133
+ cache_examples=True,
134
+ )
135
+
136
+ gr.Markdown(LICENSE)
137
+
138
+ textbox.submit(
139
+ fn=clear_and_save_textbox,
140
+ inputs=textbox,
141
+ outputs=[textbox, saved_input],
142
+ api_name=False,
143
+ queue=False,
144
+ ).then(
145
+ fn=display_input,
146
+ inputs=[saved_input, chatbot],
147
+ outputs=chatbot,
148
+ api_name=False,
149
+ queue=False,
150
+ ).then(
151
+ fn=check_input_token_length,
152
+ inputs=[saved_input, chatbot, system_prompt],
153
+ api_name=False,
154
+ queue=False,
155
+ ).success(
156
+ fn=generate,
157
+ inputs=[
158
+ saved_input,
159
+ chatbot,
160
+ system_prompt,
161
+ max_new_tokens,
162
+ temperature,
163
+ top_p,
164
+ top_k,
165
+ ],
166
+ outputs=chatbot,
167
+ api_name=False,
168
+ )
169
+
170
+ button_event_preprocess = submit_button.click(
171
+ fn=clear_and_save_textbox,
172
+ inputs=textbox,
173
+ outputs=[textbox, saved_input],
174
+ api_name=False,
175
+ queue=False,
176
+ ).then(
177
+ fn=display_input,
178
+ inputs=[saved_input, chatbot],
179
+ outputs=chatbot,
180
+ api_name=False,
181
+ queue=False,
182
+ ).then(
183
+ fn=check_input_token_length,
184
+ inputs=[saved_input, chatbot, system_prompt],
185
+ api_name=False,
186
+ queue=False,
187
+ ).success(
188
+ fn=generate,
189
+ inputs=[
190
+ saved_input,
191
+ chatbot,
192
+ system_prompt,
193
+ max_new_tokens,
194
+ temperature,
195
+ top_p,
196
+ top_k,
197
+ ],
198
+ outputs=chatbot,
199
+ api_name=False,
200
+ )
201
+
202
+ retry_button.click(
203
+ fn=delete_prev_fn,
204
+ inputs=chatbot,
205
+ outputs=[chatbot, saved_input],
206
+ api_name=False,
207
+ queue=False,
208
+ ).then(
209
+ fn=display_input,
210
+ inputs=[saved_input, chatbot],
211
+ outputs=chatbot,
212
+ api_name=False,
213
+ queue=False,
214
+ ).then(
215
+ fn=generate,
216
+ inputs=[
217
+ saved_input,
218
+ chatbot,
219
+ system_prompt,
220
+ max_new_tokens,
221
+ temperature,
222
+ top_p,
223
+ top_k,
224
+ ],
225
+ outputs=chatbot,
226
+ api_name=False,
227
+ )
228
+
229
+ undo_button.click(
230
+
231
+ fn=delete_prev_fn,
232
+ inputs=chatbot,
233
+ outputs=[chatbot, saved_input],
234
+ api_name=False,
235
+ queue=False,
236
+ ).then(
237
+ fn=lambda x: x,
238
+ inputs=[saved_input],
239
+ outputs=textbox,
240
+ api_name=False,
241
+ queue=False,
242
+ )
243
+
244
+ clear_button.click(
245
+ fn=lambda: ([], ''),
246
+ outputs=[chatbot, saved_input],
247
+ queue=False,
248
+ api_name=False,
249
+ )
250
+
251
+ demo.queue(max_size=20).launch()
llama_cpp_python-0.1.73-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl ADDED
Binary file (287 kB). View file
 
model.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import Iterator
3
+
4
+
5
+
6
+ model_id = 'xuqinyang/baichuan-13b-chat-ggml-int4'
7
+
8
+ from huggingface_hub import snapshot_download
9
+
10
+ snapshot_download(model_id, local_dir="./")
11
+ from llama_cpp import Llama
12
+ llm = Llama(model_path="./ggml-model-q4_0.bin", n_ctx=4096,seed=-1,n_threads=4)
13
+
14
+ def run(message: str,
15
+ chat_history: list[tuple[str, str]],
16
+ system_prompt: str,
17
+ max_new_tokens: int = 1024,
18
+ temperature: float = 0.3,
19
+ top_p: float = 0.85,
20
+ top_k: int = 5) -> Iterator[str]:
21
+ history = []
22
+ print(chat_history)
23
+ result=""
24
+ for i in chat_history:
25
+ history.append({"role": "user", "content": i[0]})
26
+ history.append({"role": "assistant", "content": i[1]})
27
+ print(history)
28
+ history.append({"role": "user", "content": message})
29
+ for response in llm.create_chat_completion(history,stream=True,max_tokens=-1,temperature=temperature,top_k=top_k,top_p=top_p,repeat_penalty=1.1):
30
+ if "content" in response["choices"][0]["delta"]:
31
+ result = result + response["choices"][0]["delta"]["content"]
32
+ yield result
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ https://github.com/xqy2006/baichuan13b.cpp/raw/master/python_blinding_release/llama_cpp_python-0.1.73-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
2
+ gradio
3
+ huggingface_hub