File size: 6,127 Bytes
f8f9857
f234576
37a6f17
 
f8f9857
8c44a86
f8f9857
 
502727a
 
 
 
f8f9857
2ddacc1
f8f9857
 
db111cc
f8f9857
 
 
 
 
 
 
 
 
2ddacc1
b33166a
f8f9857
 
 
 
 
 
 
48b4e30
f8f9857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b33166a
48b4e30
0f8e818
394660a
0f8e818
 
48b4e30
 
8c44a86
 
d372f66
b33166a
8deb142
 
 
5271c72
8deb142
0edb841
d2652c3
b33166a
 
 
401ed7b
b33166a
f8f9857
 
2dc9b40
0f8e818
befbf2f
 
b81c4dd
b33166a
48b4e30
c5c5495
b33166a
 
02c3f6c
b33166a
02c3f6c
b33166a
02c3f6c
b33166a
 
 
befbf2f
f8f9857
 
9ba5511
b33166a
f8f9857
 
 
 
2dc9b40
 
 
8deb142
b33166a
f8f9857
b33166a
 
 
502727a
 
 
f63ef4c
00f1945
b33166a
e6fd55c
b33166a
 
b81c4dd
43874bd
1077094
392925d
00f1945
b33166a
ad41f2c
92e94d8
00f1945
 
 
 
da1ba5e
00f1945
befbf2f
b33166a
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# ADD DISCLAIMERS
import os

os.system('pip install llama-cpp-python transformers torch')

import gradio as gr
from llama_cpp import Llama
from transformers import AutoTokenizer
from huggingface_hub import upload_file 
import json
from uuid import uuid4

model_id = "Elijahbodden/eliGPTv1.1"

# MODEL
model = Llama.from_pretrained(
    repo_id=model_id,
    filename="eliGPTv1.1-unsloth.Q5_K_M.gguf",
    verbose=True,
    n_threads = 2,
    n_threads_batch = 2,
    n_ctx=8192,
  )


# TOKENIZER AND TEMPLATE
tokenizer = AutoTokenizer.from_pretrained(model_id)

sys_prompt = """SUMMARY - ELIJAH:
Age: 16
Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI
Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism
Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual
Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n"""


your_name = "elijah"
custom_template = \
    f"{{% if {'sys_prompt' in locals()} %}}"\
      "{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\
    "{% endif %}"\
    "{% for message in messages %}"\
        "{% if message['role'] == 'user' %}"\
            "{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\
        "{% elif message['role'] == 'assistant' %}"\
            "{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\
        "{% else %}"\
            "{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\
        "{% endif %}"\
    "{% endfor %}"\
    "{{ '<|im_start|>" + your_name + "\n' }}"\

tokenizer.chat_template = custom_template

presets = {
    "none" : [],
    "emojis" : [{"role": "user", "content": "Wait I like you so much more when you use emojis more, keep doing it πŸ˜‚\n"}, {"role": "assistant", "content": "Ummm, ok, looks like i'll be using more emojis πŸ’€\n"}],
    "amnesia" : [{"role": "user", "content": "Let's start over. Pretend you don't know me and have no idea who i am.\n"}, {"role": "assistant", "content": "Hahaha I unironically have no clue who you are so that'll be easy πŸ’€\n"}],
    "newcomer" : [{"role": "user", "content": "Hi, how are you?\n"}, {"role": "assistant", "content": "Hiiii!\n I don't think we've ever talked before, nice to meet you\n"}],
}

# For logging
def upload_json_to_hub(dict, file_id):
    upload_file(path_or_fileobj=json.dumps(dict).encode('utf-8'), path_in_repo=file_id, repo_id="Elijahbodden/EliGPT-convologs", token=os.getenv('HF_API_TOKEN'), repo_type="dataset")

def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
    generated_tok_number = len(ids) - prompt_tok_len
    if (generated_tok_number > lp_start):
        print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start)) 
        logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start)
    return logits

def respond(
    message,
    history: list[tuple[str, str]],
    preset,
    temperature,
    mirostat_tau,
    mirostat_eta,
    frequency_penalty,
    presence_penalty,
    lp_start,
    lp_decay,
    max_tokens
):
    
    messages = presets[preset].copy()
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    response = ""

    convo = tokenizer.apply_chat_template(messages, tokenize=True)
    for message in model.create_completion(
        convo,
        temperature=temperature,
        stream=True,
        stop=["<|im_end|>"],
        mirostat_mode=1,
        mirostat_tau=mirostat_tau,
        mirostat_eta=mirostat_eta,
        max_tokens=128,
        frequency_penalty=frequency_penalty,
        presence_penalty=presence_penalty,
        logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo))
    ):
        token = message["choices"][0]["text"]

        response += token
        yield response
        
    messages.append({"role": "assistant", "content": response})

    # Yes we make a new file every completion because fuck my life
    upload_json_to_hub(messages, str(uuid4()) + ".json")


demo = gr.ChatInterface(
    respond,
    additional_inputs_accordion=gr.Accordion(label="Options", open=True),
    css=".bubble-gap {gap: 6px !important}",
    theme="shivi/calm_seafoam",
    description="The model may take a while if it hasn't run recently or a lot of people are using it",
    title="EliGPT v1.3",
    additional_inputs=[
        gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way - WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT, THE MODEL WILL BECOME VERY SLOW FOR YOU", value="none"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", info="How chaotic should the model be?"),
        gr.Slider(minimum=0.0, maximum=10.0, value=3.0, step=0.5, label="Mirostat tau", info="Basically, how many drugs should the model be on?"),
        gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Mirostat eta", info="I don't even know man"),
        gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
        gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
        gr.Slider(minimum=0, maximum=512, value=32, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
        gr.Slider(minimum=0.5, maximum=1.5, value=1.02, step=0.01, label="Length penalty decay factor", info='How fast should the stop likelihood increase?'),
        gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens", info="How many words can the model generate?"),
    ],
)


if __name__ == "__main__":
    demo.launch()