Elijahbodden commited on
Commit
8c44a86
·
verified ·
1 Parent(s): 00f1945

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -1,22 +1,15 @@
1
  # ADD DISCLAIMERS
2
- # Q: why is this model so fucking slow? A: because i'm not made of money
3
-
4
- import gradio as gr
5
  import os
6
 
7
  os.system('pip install llama-cpp-python transformers torch')
8
 
 
9
  from llama_cpp import Llama
10
  from transformers import AutoTokenizer
11
- import torch
12
  from huggingface_hub import upload_file
13
  import json
14
  from uuid import uuid4
15
 
16
- # For logging
17
- def upload_json_to_hub(dict, file_id):
18
- upload_file(path_or_fileobj=json.dumps(dict).encode('utf-8'), path_in_repo=file_id, repo_id="Elijahbodden/EliGPT-convologs", token="os.getenv['HF_API_TOKEN']", repo_type="dataset")
19
-
20
  model_id = "Elijahbodden/eliGPTv1.1"
21
 
22
  # MODEL
@@ -66,6 +59,9 @@ presets = {
66
  "newcomer" : [{"role": "user", "content": "Hi, how are you?\n"}, {"role": "assistant", "content": "Hiiii!\n I don't think we've ever talked before, nice to meet you\n"}],
67
  }
68
 
 
 
 
69
 
70
  def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
71
  generated_tok_number = len(ids) - prompt_tok_len
@@ -100,7 +96,6 @@ def respond(
100
  response = ""
101
 
102
  convo = tokenizer.apply_chat_template(messages, tokenize=True)
103
- # print(convo)
104
  for message in model.create_completion(
105
  convo,
106
  temperature=0.75,
 
1
  # ADD DISCLAIMERS
 
 
 
2
  import os
3
 
4
  os.system('pip install llama-cpp-python transformers torch')
5
 
6
+ import gradio as gr
7
  from llama_cpp import Llama
8
  from transformers import AutoTokenizer
 
9
  from huggingface_hub import upload_file
10
  import json
11
  from uuid import uuid4
12
 
 
 
 
 
13
  model_id = "Elijahbodden/eliGPTv1.1"
14
 
15
  # MODEL
 
59
  "newcomer" : [{"role": "user", "content": "Hi, how are you?\n"}, {"role": "assistant", "content": "Hiiii!\n I don't think we've ever talked before, nice to meet you\n"}],
60
  }
61
 
62
+ # For logging
63
+ def upload_json_to_hub(dict, file_id):
64
+ upload_file(path_or_fileobj=json.dumps(dict).encode('utf-8'), path_in_repo=file_id, repo_id="Elijahbodden/EliGPT-convologs", token="os.getenv['HF_API_TOKEN']", repo_type="dataset")
65
 
66
  def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len):
67
  generated_tok_number = len(ids) - prompt_tok_len
 
96
  response = ""
97
 
98
  convo = tokenizer.apply_chat_template(messages, tokenize=True)
 
99
  for message in model.create_completion(
100
  convo,
101
  temperature=0.75,