sadzxctv's picture
Update app.py
7085ef3 verified
raw
history blame
3.46 kB
import spaces
import json
import subprocess
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent.chat_history import BasicChatHistory
from llama_cpp_agent.chat_history.messages import Roles
import gradio as gr
from huggingface_hub import hf_hub_download
repoId = "SakuraLLM/Sakura-14B-Qwen2beta-v0.9.2-GGUF"
filename = "sakura-14b-qwen2beta-v0.9.2-q4km.gguf"
systemMessage="你是一个轻小说翻译模型,可以流畅通顺地使用给定的术语表以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,注意不要混淆使役态和被动态的主语和宾语,不要擅自添加原文中没有的代词,也不要擅自增加或减少换行。",
user_message=""
# 下載Sakura-14B模型
hf_hub_download(
repo_id=repoId,
filename=filename,
local_dir="./models"
)
llm = None
llm_model = None
@spaces.GPU(duration=120)
def respond(
message,
history: list[tuple[str, str]],
model=filename,
system_message="",
max_tokens=4096,
temperature=0.1,
top_p=0.3,
top_k=40,
repeat_penalty=1.1,
):
chat_template = MessagesFormatterType.GEMMA_2
global llm
global llm_model
if llm is None or llm_model != model:
llm = Llama(
model_path=f"models/{model}",
flash_attn=True,
n_gpu_layers=81,
n_batch=1024,
n_ctx=8192,
)
llm_model = model
provider = LlamaCppPythonProvider(llm)
agent = LlamaCppAgent(
provider,
system_prompt=f"{systemMessage}",
predefined_messages_formatter_type=chat_template,
debug_output=True
)
settings = provider.get_provider_default_settings()
settings.temperature = temperature
settings.top_k = top_k
settings.top_p = top_p
settings.max_tokens = max_tokens
settings.repeat_penalty = repeat_penalty
settings.stream = True
messages = BasicChatHistory()
print("aaa",history)
for msn in history:
user = {
'role': Roles.user,
'content': "根据以下术语表(可以为空):\n"+"将下面的日文文本根据上述术语表的对应关系和备注翻译成中文,并且列印出使用哪些术语表:"+msn[0]
}
assistant = {
'role': Roles.assistant,
'content': msn[1]
}
messages.add_message(user)
messages.add_message(assistant)
stream = agent.get_chat_response(
message,
llm_sampling_settings=settings,
chat_history=messages,
returns_streaming_generator=True,
print_output=False
)
outputs = ""
for output in stream:
outputs += output
yield outputs
description = """<p align="center">Defaults to Sakura-14B-Qwen2beta</p>
<p><center>
<a href="https://huggingface.co/SakuraLLM/Sakura-14B-Qwen2beta-v0.9.2-GGUF" target="_blank">[Sakura-14B-Qwen2beta Model]</a>
</center></p>
"""
demo = gr.ChatInterface(
respond,
retry_btn="Retry",
undo_btn="Undo",
clear_btn="Clear",
submit_btn="Send",
title="Chat with Sakura 14B using llama.cpp",
description=description,
chatbot=gr.Chatbot(
scale=1,
likeable=False,
show_copy_button=True
)
)
if __name__ == "__main__":
demo.launch()