llama.cpp / app.py
Elijahbodden's picture
Update app.py
fd4ffc9 verified
raw
history blame
5.77 kB
# ADD DISCLAIMERS
import os
os.system('pip install llama-cpp-python transformers torch')
import gradio as gr
from llama_cpp import Llama
from transformers import AutoTokenizer
from huggingface_hub import upload_file
import json
from uuid import uuid4
model_id = "Elijahbodden/eliGPTv1.1"
# MODEL
model = Llama.from_pretrained(
repo_id=model_id,
filename="eliGPTv1.1-unsloth.Q5_K_M.gguf",
verbose=True,
n_threads = 2,
n_threads_batch = 2,
n_ctx=8192,
)
# TOKENIZER AND TEMPLATE
tokenizer = AutoTokenizer.from_pretrained(model_id)
sys_prompt = """SUMMARY - ELIJAH:
Age: 16
Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI
Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism
Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual
Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n"""
your_name = "elijah"
custom_template = \
f"{{% if {'sys_prompt' in locals()} %}}"\
"{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\
"{% endif %}"\
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{'<|im_start|>user\n' + message['content'] + '\n<|im_end|>\n'}}"\
"{% elif message['role'] == 'assistant' %}"\
"{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\
"{% else %}"\
"{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\
"{% endif %}"\
"{% endfor %}"\
"{{ '<|im_start|>" + your_name + "\n' }}"\
tokenizer.chat_template = custom_template
presets = {
# Make sure assistant responses end with a "\n" because reasons
"Default" : [{"role": "user", "content": "good convo, bye"}, {"role": "assistant", "content": "Haha cool ttyl\n"}],
"Rizz ????" : [{"role": "user", "content": "omg it's so hot when you flirt with me"}, {"role": "assistant", "content": "haha well you're lucky can even string a sentence together, the way you take my breath away 😘\n"}, {"role": "user", "content": "alright love you, gn!"}, {"role": "assistant", "content": "ttyl babe πŸ’•\n"}],
"Thinky" : [{"role": "user", "content": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"role": "assistant", "content": "nah our deep convos are always the best, we should talk again soon\nttyl\n"}],
}
def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
generated_tok_number = len(ids) - prompt_tok_len
if (generated_tok_number > lp_start):
print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start))
logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start)
return logits
def respond(
message,
history: list[tuple[str, str]],
preset,
temperature,
min_p,
lp_start,
lp_decay,
frequency_penalty,
presence_penalty,
max_tokens
):
messages = presets[preset].copy()
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
print(tokenizer.apply_chat_template(messages, tokenize=False))
convo = tokenizer.apply_chat_template(messages, tokenize=True)
for message in model.create_completion(
convo,
temperature=temperature,
stream=True,
stop=["<|im_end|>"],
min_p=min_p,
max_tokens=max_tokens,
# Disable top-p pruning
top_k=100000000,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo))
):
token = message["choices"][0]["text"]
response += token
yield response
demo = gr.ChatInterface(
respond,
additional_inputs_accordion=gr.Accordion(label="Options", open=True),
css=".bubble-gap {gap: 6px !important}",
theme="shivi/calm_seafoam",
description="The model may take a while if it hasn't run recently or a lot of people are using it",
title="EliGPT v1.3",
additional_inputs=[
gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT, THE MODEL WILL BECOME VERY SLOW]", value="Default"),
gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
gr.Slider(minimum=0.5, maximum=1.5, value=1.01, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
],
)
if __name__ == "__main__":
demo.launch()