Spaces:
Sleeping
Sleeping
import os | |
os.system('pip install llama-cpp-python transformers torch') | |
import gradio as gr | |
from llama_cpp import Llama | |
from transformers import AutoTokenizer | |
from huggingface_hub import upload_file | |
import json | |
from uuid import uuid4 | |
model_id = "Elijahbodden/eliGPTv1.3" | |
# MODEL | |
model = Llama.from_pretrained( | |
repo_id=model_id, | |
filename="model.gguf", | |
verbose=True, | |
n_threads = 2, | |
n_threads_batch = 2, | |
n_ctx=8192, | |
) | |
# TOKENIZER AND TEMPLATE | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
presets = { | |
# Gaslight the model by adding sentence fragments to the start | |
# It's weird but it works | |
# If you're curious, default makes sure it doesn't hallucinate by showing that the next message is the start of a new convo | |
"Default" : [{"from": "human", "value": "good convo, bye"}, {"from": "gpt", "value": "Haha cool ttyl"}], | |
"Rizz ????" : [{"from": "human", "value": "omg it's so hot when you flirt with me"}, {"from": "gpt", "value": "haha well you're lucky can even string a sentence together, the way you take my breath away π"}, {"from": "human", "value": "alright love you, gn!"}, {"from": "gpt", "value": "ttyl babe π"}], | |
"Thinky" : [{"from": "human", "value": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"from": "gpt", "value": "nah our deep convos are always the best, we should talk again soon\nttyl"}], | |
} | |
def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len): | |
generated_tok_number = len(ids) - prompt_tok_len | |
if (generated_tok_number > lp_start): | |
print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start)) | |
logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start) | |
return logits | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
preset, | |
min_p, | |
temperature, | |
lp_start, | |
lp_decay, | |
frequency_penalty, | |
presence_penalty, | |
max_tokens | |
): | |
print(preset, temperature, min_p, lp_start, lp_decay, frequency_penalty, presence_penalty, max_tokens) | |
messages = presets[preset].copy() | |
for val in history: | |
if val[0]: | |
messages.append({"from": "human", "value": val[0]}) | |
if val[1]: | |
messages.append({"from": "gpt", "value": val[1]}) | |
messages.append({"from": "human", "value": message}) | |
response = "" | |
print(tokenizer.apply_chat_template(messages, tokenize=False)) | |
convo = tokenizer.apply_chat_template(messages, tokenize=True) | |
for message in model.create_completion( | |
convo, | |
temperature=temperature, | |
stream=True, | |
stop=["<|im_end|>"], | |
min_p=min_p, | |
max_tokens=max_tokens, | |
# Disable top-k pruning | |
top_k=100000000, | |
frequency_penalty=frequency_penalty, | |
presence_penalty=presence_penalty, | |
logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo)) | |
): | |
token = message["choices"][0]["text"] | |
response += token | |
yield response | |
ci = gr.ChatInterface( | |
respond, | |
additional_inputs_accordion=gr.Accordion(label="Options", open=True), | |
additional_inputs=[ | |
gr.Radio(presets.keys(), label="Personality preset", info="Slightly influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"), | |
# ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time) | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""), | |
gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"), | |
gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'), | |
gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'), | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'), | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'), | |
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"), | |
], | |
) | |
with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="shivi/calm_seafoam") as demo: | |
gr.Markdown("# EliGPT v1.3") | |
gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)") | |
with gr.Accordion("Q&A:", open=False): | |
gr.Markdown("""Q: Why is the model so fucking slow | |
A: The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times. | |
Q: Why is the model so dumb | |
A: Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart | |
Q: Either it just made something up or I don't know you at all | |
A: Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt | |
""") | |
ci.render() | |
if __name__ == "__main__": | |
demo.launch() |