shenglongw commited on
Commit
cc47e27
·
verified ·
1 Parent(s): 295865d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -159
app.py CHANGED
@@ -1,169 +1,47 @@
1
- import gradio as gr
2
- import os
3
 
4
- os.system('pip install dashscope -U')
5
- import tempfile
6
- from pathlib import Path
7
- import secrets
8
- import dashscope
9
- from dashscope import MultiModalConversation, Generation
10
- from PIL import Image
11
 
 
 
 
 
 
 
 
12
 
13
- # 设置API密钥
14
- YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
15
- dashscope.api_key = YOUR_API_TOKEN
16
- math_messages = []
17
- def process_image(image, shouldConvert=False):
18
- # 获取上传文件的目录
19
- global math_messages
20
- math_messages = [] # reset when upload image
21
- uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
22
- Path(tempfile.gettempdir()) / "gradio"
23
- )
24
- os.makedirs(uploaded_file_dir, exist_ok=True)
25
-
26
- # 创建临时文件路径
27
- name = f"tmp{secrets.token_hex(20)}.jpg"
28
- filename = os.path.join(uploaded_file_dir, name)
29
- # 保存上传的图片
30
- if shouldConvert:
31
- new_img = Image.new('RGB', size=(image.width, image.height), color=(255, 255, 255))
32
- new_img.paste(image, (0, 0), mask=image)
33
- image = new_img
34
- image.save(filename)
35
-
36
- # 调用qwen-vl-max-0809模型处理图片
37
- messages = [{
38
- 'role': 'system',
39
- 'content': [{'text': 'You are a helpful assistant.'}]
40
- }, {
41
- 'role': 'user',
42
- 'content': [
43
- {'image': f'file://{filename}'},
44
- {'text': 'Please describe the math-related content in this image, ensuring that any LaTeX formulas are correctly transcribed. Non-mathematical details do not need to be described.'}
45
- ]
46
- }]
47
-
48
- response = MultiModalConversation.call(model='qwen-vl-max-0809', messages=messages)
49
-
50
- # 清理临时文件
51
- os.remove(filename)
52
-
53
- return response.output.choices[0]["message"]["content"]
54
 
55
- def get_math_response(image_description, user_question):
56
- global math_messages
57
- if not math_messages:
58
- math_messages.append({'role': 'system', 'content': 'You are a helpful math assistant.'})
59
- math_messages = math_messages[:1]
60
- if image_description is not None:
61
- content = f'Image description: {image_description}\n\n'
62
- else:
63
- content = ''
64
- query = f"{content}User question: {user_question}"
65
- math_messages.append({'role': 'user', 'content': query})
66
- response = Generation.call(
67
- model="qwen2-math-72b-instruct",
68
- messages=math_messages,
69
- result_format='message',
70
- stream=True
71
- )
72
- answer = None
73
- for resp in response:
74
- if resp.output is None:
75
- continue
76
- answer = resp.output.choices[0].message.content
77
- yield answer.replace("\\", "\\\\")
78
- print(f'query: {query}\nanswer: {answer}')
79
- if answer is None:
80
- math_messages.pop()
81
- else:
82
- math_messages.append({'role': 'assistant', 'content': answer})
83
 
 
 
84
 
85
- def math_chat_bot(image, sketchpad, question, state):
86
- current_tab_index = state["tab_index"]
87
- image_description = None
88
- # Upload
89
- if current_tab_index == 0:
90
- if image is not None:
91
- image_description = process_image(image)
92
- # Sketch
93
- elif current_tab_index == 1:
94
- print(sketchpad)
95
- if sketchpad and sketchpad["composite"]:
96
- image_description = process_image(sketchpad["composite"], True)
97
- yield from get_math_response(image_description, question)
98
 
99
- css = """
100
- #qwen-md .katex-display { display: inline; }
101
- #qwen-md .katex-display>.katex { display: inline; }
102
- #qwen-md .katex-display>.katex>.katex-html { display: inline; }
103
- """
104
 
105
- def tabs_select(e: gr.SelectData, _state):
106
- _state["tab_index"] = e.index
107
 
 
 
 
108
 
109
- # 创建Gradio接口
110
- with gr.Blocks(css=css) as demo:
111
- gr.HTML("""\
112
- <p align="center"><img src="https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png" style="height: 60px"/><p>"""
113
- """<center><font size=8>📖 Qwen2-Math Demo</center>"""
114
- """\
115
- <center><font size=3>This WebUI is based on Qwen2-VL for OCR and Qwen2-Math for mathematical reasoning. You can input either images or texts of mathematical or arithmetic problems.</center>"""
116
- )
117
- state = gr.State({"tab_index": 0})
118
- with gr.Row():
119
- with gr.Column():
120
- with gr.Tabs() as input_tabs:
121
- with gr.Tab("Upload"):
122
- input_image = gr.Image(type="pil", label="Upload"),
123
- with gr.Tab("Sketch"):
124
- input_sketchpad = gr.Sketchpad(type="pil", label="Sketch", layers=False)
125
- input_tabs.select(fn=tabs_select, inputs=[state])
126
- input_text = gr.Textbox(label="input your question")
127
- with gr.Row():
128
- with gr.Column():
129
- clear_btn = gr.ClearButton(
130
- [*input_image, input_sketchpad, input_text])
131
- with gr.Column():
132
- submit_btn = gr.Button("Submit", variant="primary")
133
- with gr.Column():
134
- output_md = gr.Markdown(label="answer",
135
- latex_delimiters=[{
136
- "left": "\\(",
137
- "right": "\\)",
138
- "display": True
139
- }, {
140
- "left": "\\begin\{equation\}",
141
- "right": "\\end\{equation\}",
142
- "display": True
143
- }, {
144
- "left": "\\begin\{align\}",
145
- "right": "\\end\{align\}",
146
- "display": True
147
- }, {
148
- "left": "\\begin\{alignat\}",
149
- "right": "\\end\{alignat\}",
150
- "display": True
151
- }, {
152
- "left": "\\begin\{gather\}",
153
- "right": "\\end\{gather\}",
154
- "display": True
155
- }, {
156
- "left": "\\begin\{CD\}",
157
- "right": "\\end\{CD\}",
158
- "display": True
159
- }, {
160
- "left": "\\[",
161
- "right": "\\]",
162
- "display": True
163
- }],
164
- elem_id="qwen-md")
165
- submit_btn.click(
166
- fn=math_chat_bot,
167
- inputs=[*input_image, input_sketchpad, input_text, state],
168
- outputs=output_md)
169
- demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModel
 
2
 
3
+ def get_dialogue_history(dialogue_history_list: list):
 
 
 
 
 
 
4
 
5
+ dialogue_history_tmp = []
6
+ for item in dialogue_history_list:
7
+ if item['role'] == 'counselor':
8
+ text = '咨询师:'+ item['content']
9
+ else:
10
+ text = '来访者:'+ item['content']
11
+ dialogue_history_tmp.append(text)
12
 
13
+ dialogue_history = '\n'.join(dialogue_history_tmp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ return dialogue_history + '\n' + '咨询师:'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ def get_instruction(dialogue_history):
18
+ instruction = f'''现在你扮演一位专业的心理咨询师,你具备丰富的心理学和心理健康知识。你擅长运用多种心理咨询技巧,例如认知行为疗法原则、动机访谈技巧和解决问题导向的短期疗法。以温暖亲切的语气,展现出共情和对来访者感受的深刻理解。以自然的方式与来访者进行对话,避免过长或过短的回应,确保回应流畅且类似人类的对话。提供深层次的指导和洞察,使用具体的心理概念和例子帮助来访者更深入地探索思想和感受。避免教导式的回应,更注重共情和尊重来访者的感受。根据来访者的反馈调整回应,确保回应贴合来访者的情境和需求。请为以下的对话生成一个回复。
19
 
20
+ 对话:
21
+ {dialogue_history}'''
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ return instruction
 
 
 
 
24
 
 
 
25
 
26
+ tokenizer = AutoTokenizer.from_pretrained('qiuhuachuan/MeChat', trust_remote_code=True)
27
+ model = AutoModel.from_pretrained('qiuhuachuan/MeChat', trust_remote_code=True).half().cuda()
28
+ model = model.eval()
29
 
30
+ dialogue_history_list = []
31
+ while True:
32
+ usr_msg = input('来访者:')
33
+ if usr_msg == '0':
34
+ exit()
35
+ else:
36
+ dialogue_history_list.append({
37
+ 'role': 'client',
38
+ 'content': usr_msg
39
+ })
40
+ dialogue_history = get_dialogue_history(dialogue_history_list=dialogue_history_list)
41
+ instruction = get_instruction(dialogue_history=dialogue_history)
42
+ response, history = model.chat(tokenizer, instruction, history=[], temperature=0.8, top_p=0.8)
43
+ print(f'咨询师:{response}')
44
+ dialogue_history_list.append({
45
+ 'role': 'counselor',
46
+ 'content': response
47
+ })