llama.cpp / app.py
Elijahbodden's picture
Update app.py
8c44a86 verified
raw
history blame
6.01 kB
# ADD DISCLAIMERS
import os
os.system('pip install llama-cpp-python transformers torch')
import gradio as gr
from llama_cpp import Llama
from transformers import AutoTokenizer
from huggingface_hub import upload_file
import json
from uuid import uuid4
model_id = "Elijahbodden/eliGPTv1.1"
# MODEL
model = Llama.from_pretrained(
repo_id=model_id,
filename="eliGPTv1.1-unsloth.Q5_K_M.gguf",
verbose=True,
n_threads = 2,
n_threads_batch = 2,
n_ctx=8192,
)
# TOKENIZER AND TEMPLATE
tokenizer = AutoTokenizer.from_pretrained(model_id)
sys_prompt = """SUMMARY - ELIJAH:
Age: 16
Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI
Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism
Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual
Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n"""
your_name = "elijah"
custom_template = \
f"{{% if {'sys_prompt' in locals()} %}}"\
"{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\
"{% endif %}"\
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\
"{% elif message['role'] == 'assistant' %}"\
"{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\
"{% else %}"\
"{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\
"{% endif %}"\
"{% endfor %}"\
"{{ '<|im_start|>" + your_name + "\n' }}"\
tokenizer.chat_template = custom_template
presets = {
"none" : [],
"emojis" : [{"role": "user", "content": "Wait I like you so much more when you use emojis more, keep doing it πŸ˜‚\n"}, {"role": "assistant", "content": "Ummm, ok, looks like i'll be using more emojis πŸ’€\n"}],
"amnesia" : [{"role": "user", "content": "Let's start over. Pretend you don't know me and have no idea who i am.\n"}, {"role": "assistant", "content": "Hahaha I unironically have no clue who you are so that'll be easy πŸ’€\n"}],
"newcomer" : [{"role": "user", "content": "Hi, how are you?\n"}, {"role": "assistant", "content": "Hiiii!\n I don't think we've ever talked before, nice to meet you\n"}],
}
# For logging
def upload_json_to_hub(dict, file_id):
upload_file(path_or_fileobj=json.dumps(dict).encode('utf-8'), path_in_repo=file_id, repo_id="Elijahbodden/EliGPT-convologs", token="os.getenv['HF_API_TOKEN']", repo_type="dataset")
def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
generated_tok_number = len(ids) - prompt_tok_len
if (generated_tok_number > lp_start):
print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start))
logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start)
return logits
def respond(
message,
history: list[tuple[str, str]],
preset,
temperature,
mirostat_tau,
mirostat_eta,
frequency_penalty,
presence_penalty,
lp_start,
lp_decay,
max_tokens
):
messages = presets[preset].copy()
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
convo = tokenizer.apply_chat_template(messages, tokenize=True)
for message in model.create_completion(
convo,
temperature=0.75,
stream=True,
stop=["<|im_end|>"],
mirostat_mode=1,
mirostat_tau=mirostat_tau,
mirostat_eta=mirostat_eta,
max_tokens=128,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo))
):
token = message["choices"][0]["text"]
response += token
yield response
messages.append({"role": "assistant", "content": response})
# Yes we make a new file every completion because fuck my life
upload_json_to_hub(messages, str(uuid4()) + ".json")
demo = gr.ChatInterface(
respond,
additional_inputs_accordion=gr.Accordion(label="Options", open=True),
css=".bubble-gap {gap: 6px !important}",
theme="shivi/calm_seafoam",
description="The model may take a while if it hasn't run recently or a lot of people are using it",
title="EliGPT v1.3",
additional_inputs=[
gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way", value="none"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", info="How chaotic should the model be?"),
gr.Slider(minimum=0.0, maximum=10.0, value=3.0, step=0.5, label="Mirostat tau", info="Basically, how many drugs should the model be on?"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Mirostat eta", info="I don't even know man"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
gr.Slider(minimum=0, maximum=512, value=10, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
gr.Slider(minimum=0.5, maximum=1.5, value=1.02, step=0.01, label="Length penalty decay factor", info='How fast should the stop likelihood increase?'),
gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens", info="How many words can the model generate?"),
],
)
if __name__ == "__main__":
demo.launch()