Dibiddo commited on
Commit
08863c9
·
verified ·
1 Parent(s): cd9dd4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -5,7 +5,7 @@ import gradio as gr
5
  # 加载指令模型
6
  model = AutoModelForCausalLM.from_pretrained(
7
  "MediaTek-Research/Breeze-7B-Instruct-v1_0",
8
- device_map="auto", # 保留这一行以使用设备映射
9
  torch_dtype=torch.bfloat16,
10
  )
11
 
@@ -15,14 +15,16 @@ tokenizer = AutoTokenizer.from_pretrained("MediaTek-Research/Breeze-7B-Instruct-
15
  # 定义SYS_PROMPT
16
  SYS_PROMPT = "You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan."
17
 
 
 
 
18
  def generate_response(user_input):
19
- # 定义聊天内容
20
- chat = [
21
- {"role": "user", "content": user_input},
22
- ]
23
 
24
  # 应用聊天模板
25
- prompt = tokenizer.apply_chat_template(chat, tokenize=False)
26
  full_prompt = f"<s>{SYS_PROMPT} [INST] {prompt} [/INST]"
27
 
28
  # 生成文本
@@ -38,10 +40,22 @@ def generate_response(user_input):
38
 
39
  # 解码输出
40
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
41
  return generated_text
42
 
43
  # 创建Gradio界面
44
- iface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
45
 
46
  # 启动Gradio界面并共享链接
47
  iface.launch(share=True)
 
5
  # 加载指令模型
6
  model = AutoModelForCausalLM.from_pretrained(
7
  "MediaTek-Research/Breeze-7B-Instruct-v1_0",
8
+ device_map="auto",
9
  torch_dtype=torch.bfloat16,
10
  )
11
 
 
15
  # 定义SYS_PROMPT
16
  SYS_PROMPT = "You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan."
17
 
18
+ # 定义对话历史记录变量
19
+ chat_history = []
20
+
21
  def generate_response(user_input):
22
+ global chat_history
23
+ # 将用户输入添加到对话历史记录
24
+ chat_history.append({"role": "user", "content": user_input})
 
25
 
26
  # 应用聊天模板
27
+ prompt = tokenizer.apply_chat_template(chat_history, tokenize=False)
28
  full_prompt = f"<s>{SYS_PROMPT} [INST] {prompt} [/INST]"
29
 
30
  # 生成文本
 
40
 
41
  # 解码输出
42
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
43
+
44
+ # 将生成的文本添加到对话历史记录
45
+ chat_history.append({"role": "assistant", "content": generated_text})
46
+
47
  return generated_text
48
 
49
  # 创建Gradio界面
50
+ iface = gr.Interface(
51
+ fn=generate_response,
52
+ inputs=gr.Textbox(lines=5, placeholder="請輸入你的問題..."),
53
+ outputs=gr.Textbox(lines=10),
54
+ title="醫療問答助手",
55
+ description="這是一個基於 MediaTek-Research/Breeze-7B-Instruct-v1_0 模型的醫療問答助手。",
56
+ theme="default", # 可以选择不同的主题,例如 "huggingface"
57
+ live=True,
58
+ )
59
 
60
  # 启动Gradio界面并共享链接
61
  iface.launch(share=True)