Elijahbodden commited on
Commit
bc659ca
Β·
verified Β·
1 Parent(s): fd4ffc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -30
app.py CHANGED
@@ -26,37 +26,10 @@ model = Llama.from_pretrained(
26
  # TOKENIZER AND TEMPLATE
27
  tokenizer = AutoTokenizer.from_pretrained(model_id)
28
 
29
- sys_prompt = """SUMMARY - ELIJAH:
30
- Age: 16
31
- Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI
32
- Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism
33
- Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual
34
- Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n"""
35
-
36
-
37
- your_name = "elijah"
38
- custom_template = \
39
- f"{{% if {'sys_prompt' in locals()} %}}"\
40
- "{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\
41
- "{% endif %}"\
42
- "{% for message in messages %}"\
43
- "{% if message['role'] == 'user' %}"\
44
- "{{'<|im_start|>user\n' + message['content'] + '\n<|im_end|>\n'}}"\
45
- "{% elif message['role'] == 'assistant' %}"\
46
- "{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\
47
- "{% else %}"\
48
- "{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\
49
- "{% endif %}"\
50
- "{% endfor %}"\
51
- "{{ '<|im_start|>" + your_name + "\n' }}"\
52
-
53
- tokenizer.chat_template = custom_template
54
-
55
  presets = {
56
- # Make sure assistant responses end with a "\n" because reasons
57
- "Default" : [{"role": "user", "content": "good convo, bye"}, {"role": "assistant", "content": "Haha cool ttyl\n"}],
58
- "Rizz ????" : [{"role": "user", "content": "omg it's so hot when you flirt with me"}, {"role": "assistant", "content": "haha well you're lucky can even string a sentence together, the way you take my breath away 😘\n"}, {"role": "user", "content": "alright love you, gn!"}, {"role": "assistant", "content": "ttyl babe πŸ’•\n"}],
59
- "Thinky" : [{"role": "user", "content": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"role": "assistant", "content": "nah our deep convos are always the best, we should talk again soon\nttyl\n"}],
60
  }
61
 
62
  def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
 
26
  # TOKENIZER AND TEMPLATE
27
  tokenizer = AutoTokenizer.from_pretrained(model_id)
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  presets = {
30
+ "Default" : [{"role": "user", "content": "good convo, bye"}, {"role": "assistant", "content": "Haha cool ttyl"}],
31
+ "Rizz ????" : [{"role": "user", "content": "omg it's so hot when you flirt with me"}, {"role": "assistant", "content": "haha well you're lucky can even string a sentence together, the way you take my breath away 😘"}, {"role": "user", "content": "alright love you, gn!"}, {"role": "assistant", "content": "ttyl babe πŸ’•"}],
32
+ "Thinky" : [{"role": "user", "content": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"role": "assistant", "content": "nah our deep convos are always the best, we should talk again soon\nttyl"}],
 
33
  }
34
 
35
  def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):