# ADD DISCLAIMERS # AND LOGGING import gradio as gr import os os.system("apt install libopenblas-dev") os.system("make clean && LLAMA_OPENBLAS=1 make") os.system('CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python transformers') from llama_cpp import Llama from transformers import AutoTokenizer model_id = "Elijahbodden/eliGPTv1.1" # MODEL model = Llama.from_pretrained( repo_id=model_id, filename="eliGPTv1.1-unsloth.Q5_K_M.gguf", verbose=True, n_threads = 2, n_threads_batch = 2, n_ctx=8192, ) # TOKENIZER AND TEMPLATE tokenizer = AutoTokenizer.from_pretrained(model_id) sys_prompt = """SUMMARY - ELIJAH: Age: 16 Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n""" your_name = "elijah" custom_template = \ f"{{% if {'sys_prompt' in locals()} %}}"\ "{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\ "{% endif %}"\ "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ "{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\ "{% elif message['role'] == 'assistant' %}"\ "{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\ "{% else %}"\ "{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\ "{% endif %}"\ "{% endfor %}"\ "{{ '<|im_start|>" + your_name + "\n' }}"\ tokenizer.chat_template = custom_template def respond( message, history: list[tuple[str, str]], max_tokens, temperature, mirostat_tau, mirostat_eta, ): messages = [] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" convo = tokenizer.apply_chat_template(messages, tokenize=False) print(convo) for message in model.create_completion( convo, temperature=0.75, stream=True, stop=["<|im_end|>"], mirostat_mode=1, mirostat_tau=mirostat_tau, mirostat_eta=mirostat_eta, max_tokens=128 ): token = message["choices"][0]["text"] response += token yield response """ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface """ demo = gr.ChatInterface( respond, additional_inputs=[ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.0, maximum=10.0, value=3.0, step=0.5, label="Mirostat tau", ), gr.Slider( minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Mirostat eta", ), ], ) if __name__ == "__main__": demo.launch()