import spaces import json import subprocess from llama_cpp import Llama from llama_cpp_agent import LlamaCppAgent from llama_cpp_agent.providers import LlamaCppPythonProvider from llama_cpp_agent.chat_history import BasicChatHistory from llama_cpp_agent.chat_history.messages import Roles import gradio as gr from huggingface_hub import hf_hub_download hf_hub_download( repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF", filename="dorna-llama3-8b-instruct.Q4_0.gguf", local_dir = "." ) hf_hub_download( repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF", filename="dorna-llama3-8b-instruct.Q8_0.gguf", local_dir = "." ) hf_hub_download( repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF", filename="dorna-llama3-8b-instruct.Q4_0.gguf", local_dir = "." ) hf_hub_download( repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF", filename="dorna-llama3-8b-instruct.Q5_0.gguf", local_dir = "." ) hf_hub_download( repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF", filename="dorna-llama3-8b-instruct.bf16.gguf", local_dir = "." ) css = """ .message-row { justify-content: space-evenly !important; } .message-bubble-border { border-radius: 6px !important; } .dark.message-bubble-border { border-color: #343140 !important; } .dark.user { background: #1e1c26 !important; } .dark.assistant.dark, .dark.pending.dark { background: #16141c !important; } @import url('https://fonts.googleapis.com/css2?family=Vazirmatn&display=swap'); body, .gradio-container, .gr-button, .gr-input, .gr-slider, .gr-dropdown, .gr-markdown { font-family: 'Vazirmatn', sans-serif !important; } ._button { font-size: 20px; } pre, code { direction: ltr !important; unicode-bidi: plaintext !important; } """ def get_messages_formatter_type(model_name): from llama_cpp_agent import MessagesFormatterType return MessagesFormatterType.CHATML @spaces.GPU(duration=120) def respond( message, history: list[tuple[str, str]], system_message, # max_tokens, temperature, # top_p, # top_k, # repeat_penalty, model, ): chat_template = get_messages_formatter_type(model) llm = Llama( model_path=f"./{model}", n_gpu_layers=-1, n_ctx=2048, ) provider = LlamaCppPythonProvider(llm) agent = LlamaCppAgent( provider, system_prompt=f"{system_message}", predefined_messages_formatter_type=chat_template, debug_output=True ) settings = provider.get_provider_default_settings() settings.temperature = temperature # settings.top_k = top_k # settings.top_p = top_p # settings.max_tokens = max_tokens # settings.repeat_penalty = repeat_penalty settings.stream = True messages = BasicChatHistory() for msn in history: user = { 'role': Roles.user, 'content': msn[0] } assistant = { 'role': Roles.assistant, 'content': msn[1] } messages.add_message(user) messages.add_message(assistant) stream = agent.get_chat_response( message, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True, print_output=False ) outputs = "" for output in stream: outputs += output yield outputs PLACEHOLDER = """