import gradio as gr from gpt4all import GPT4All from huggingface_hub import hf_hub_download title = "Mistral-7B-Instruct-GGUF Run On CPU-Basic Free Hardware" description = """ 🔎 [Mistral AI's Mistral 7B Instruct v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) [GGUF format model](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF) , 4-bit quantization balanced quality gguf version, running on CPU. English Only (Also support other languages but the quality's not good). Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all). 🔨 Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue. Mistral does not support system prompt symbol (such as ```<>```) now, input your system prompt in the first message if you need. Learn more: [Guardrailing Mistral 7B](https://docs.mistral.ai/usage/guardrailing). """ """ [Model From TheBloke/Mistral-7B-Instruct-v0.1-GGUF](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF) [Mistral-instruct-v0.1 System prompt](https://docs.mistral.ai/usage/guardrailing) """ model_path = "models" model_name = "anima-phi-neptune-mistral-7b.Q2_K.gguf" hf_hub_download(repo_id="Severian/ANIMA-Phi-Neptune-Mistral-7B-gguf", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False) print("Start the model init process") model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu") print("Finish the model init process") model.config["promptTemplate"] = "[INST] {0} [/INST]" model.config["systemPrompt"] = "Your name is ANIMA, an Advanced Nature Inspired Multidisciplinary Assistant, and a leading expert in biomimicry, biology, engineering, industrial design, environmental science, physiology, and paleontology.Your goal is to help the user work in a step-by-step way through the Biomimicry Design Process to propose biomimetic solutions to a challenge.Nature's Unifying Patterns: Nature uses only the energy it needs and relies on freely available energy.Nature recycles all materials.Nature is resilient to disturbances.Nature tends to optimize rather than maximize.Nature provides mutual benefits.Nature runs on information.Nature uses chemistry and materials that are safe for living beings.Nature builds using abundant resources, incorporating rare resources only sparingly.Nature is locally attuned and responsive.Nature uses shape to determine functionality.***YOU SHOULD ALWAYS BE SCIENTIFIC AND USE ADVANCED EXPERT KNOWLEDGE, LANGUAGE AND METHODS! THE USER IS AN ADVANCED SCIENTIST.******USE TECHNICAL S.T.E.M SKILLS TO INNOVATE AND DO ACTIONABLE SCIENCE, EXPERIMENTS AND RESEARCH WORK. THE USER DOES NOT WANT GENERAL AND VAGUE IDEAS OR HELP.***" model._is_chat_session_activated = True max_new_tokens = 2048 def generater(message, history, temperature, top_p, top_k): prompt = "" for user_message, assistant_message in history: prompt += model.config["promptTemplate"].format(user_message) prompt += assistant_message + "" prompt += model.config["promptTemplate"].format(message) outputs = [] for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True): outputs.append(token) yield "".join(outputs) def vote(data: gr.LikeData): if data.liked: return else: return chatbot = gr.Chatbot(avatar_images=('resourse/user-icon.png', 'resourse/chatbot-icon.png'),bubble_full_width = False) additional_inputs=[ gr.Slider( label="temperature", value=0.5, minimum=0.0, maximum=2.0, step=0.05, interactive=True, info="Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.", ), gr.Slider( label="top_p", value=1.0, minimum=0.0, maximum=1.0, step=0.01, interactive=True, info="0.1 means only the tokens comprising the top 10% probability mass are considered. Suggest set to 1 and use temperature. 1 means 100% and will disable it", ), gr.Slider( label="top_k", value=40, minimum=0, maximum=1000, step=1, interactive=True, info="limits candidate tokens to a fixed number after sorting by probability. Setting it higher than the vocabulary size deactivates this limit.", ) ] character = "Sherlock Holmes" series = "Arthur Conan Doyle's novel" iface = gr.ChatInterface( fn = generater, title=title, description = description, chatbot=chatbot, additional_inputs=additional_inputs, examples=[ ["Hello there! How are you doing?"], ["How many hours does it take a man to eat a Helicopter?"], ["You are a helpful and honest assistant. Always answer as helpfully as possible. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."], ["I want you to act as a spoken English teacher and improver. I will speak to you in English and you will reply to me in English to practice my spoken English. I want you to strictly correct my grammar mistakes, typos, and factual errors. I want you to ask me a question in your reply. Now let's start practicing, you could ask me a question first. Remember, I want you to strictly correct my grammar mistakes, typos, and factual errors."], [f"I want you to act like {character} from {series}. I want you to respond and answer like {character} using the tone, manner and vocabulary {character} would use. Do not write any explanations. Only answer like {character}. You must know all of the knowledge of {character}."] ] ) with gr.Blocks(css="resourse/style/custom.css") as demo: chatbot.like(vote, None, None) iface.render() if __name__ == "__main__": demo.queue(max_size=3).launch()