Spaces:
Runtime error
Runtime error
File size: 1,881 Bytes
fd3a1e2 24dfad1 fd3a1e2 79cac17 fd3a1e2 656dfd6 fd3a1e2 79cac17 656dfd6 79cac17 656dfd6 79cac17 fd3a1e2 79cac17 fd3a1e2 7c17167 79cac17 fd3a1e2 a9dba4f fd3a1e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
from huggingface_hub import InferenceClient
from optimum.intel import OVModelForCausalLM
from transformers import AutoTokenizer, pipeline
# 載入模型和標記器
model_id = "hsuwill000/Qwen2.5-3B-Instruct-openvino"
model = OVModelForCausalLM.from_pretrained(model_id, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id)
# 建立生成管道
#pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def respond(prompt , history):
# 將當前訊息與歷史訊息合併
#input_text = message if not history else history[-1]["content"] + " " + message
#input_text = message+",(450字內回覆)"
messages = [
{"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
{"role": "user", "content": prompt }
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(
**model_inputs,
max_new_tokens=512
)
# 獲取模型的回應
#response = pipe(input_text, max_length=512, truncation=True, num_return_sequences=1)
#reply = response[0]['generated_text']
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
# 返回新的消息格式
print(f"Messages: {messages}")
print(f"Reply: {response}")
return response
# 設定 Gradio 的聊天界面
demo = gr.ChatInterface(fn=respond, title="Qwen2.5-0.5B-Instruct-openvino-4bit", description="Qwen2.5-0.5B-Instruct-openvino-4bit", type='messages')
if __name__ == "__main__":
demo.launch() |